langchain 1.0.6 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (210) hide show
  1. package/CHANGELOG.md +20 -0
  2. package/README.md +1 -1
  3. package/chat_models/universal.cjs +1 -0
  4. package/chat_models/universal.d.cts +1 -0
  5. package/chat_models/universal.d.ts +1 -0
  6. package/chat_models/universal.js +1 -0
  7. package/dist/agents/ReactAgent.cjs +1 -1
  8. package/dist/agents/ReactAgent.cjs.map +1 -1
  9. package/dist/agents/ReactAgent.js +2 -2
  10. package/dist/agents/ReactAgent.js.map +1 -1
  11. package/dist/agents/index.d.cts +0 -2
  12. package/dist/agents/index.d.ts +0 -2
  13. package/dist/agents/middleware/constants.cjs +16 -0
  14. package/dist/agents/middleware/constants.cjs.map +1 -0
  15. package/dist/agents/middleware/constants.js +15 -0
  16. package/dist/agents/middleware/constants.js.map +1 -0
  17. package/dist/agents/middleware/contextEditing.cjs.map +1 -1
  18. package/dist/agents/middleware/contextEditing.d.cts +23 -7
  19. package/dist/agents/middleware/contextEditing.d.ts +23 -7
  20. package/dist/agents/middleware/contextEditing.js.map +1 -1
  21. package/dist/agents/middleware/dynamicSystemPrompt.cjs +5 -2
  22. package/dist/agents/middleware/dynamicSystemPrompt.cjs.map +1 -1
  23. package/dist/agents/middleware/dynamicSystemPrompt.d.cts +2 -1
  24. package/dist/agents/middleware/dynamicSystemPrompt.d.ts +2 -1
  25. package/dist/agents/middleware/dynamicSystemPrompt.js +4 -2
  26. package/dist/agents/middleware/dynamicSystemPrompt.js.map +1 -1
  27. package/dist/agents/middleware/error.cjs +20 -0
  28. package/dist/agents/middleware/error.cjs.map +1 -0
  29. package/dist/agents/middleware/error.js +19 -0
  30. package/dist/agents/middleware/error.js.map +1 -0
  31. package/dist/agents/middleware/index.cjs +4 -2
  32. package/dist/agents/middleware/index.d.ts +18 -0
  33. package/dist/agents/middleware/index.js +4 -2
  34. package/dist/agents/middleware/modelRetry.cjs +162 -0
  35. package/dist/agents/middleware/modelRetry.cjs.map +1 -0
  36. package/dist/agents/middleware/modelRetry.d.cts +134 -0
  37. package/dist/agents/middleware/modelRetry.d.ts +134 -0
  38. package/dist/agents/middleware/modelRetry.js +161 -0
  39. package/dist/agents/middleware/modelRetry.js.map +1 -0
  40. package/dist/agents/middleware/{promptCaching.cjs → provider/anthropic/promptCaching.cjs} +3 -3
  41. package/dist/agents/middleware/provider/anthropic/promptCaching.cjs.map +1 -0
  42. package/dist/agents/middleware/{promptCaching.d.cts → provider/anthropic/promptCaching.d.cts} +2 -2
  43. package/dist/agents/middleware/{promptCaching.d.ts → provider/anthropic/promptCaching.d.ts} +2 -2
  44. package/dist/agents/middleware/{promptCaching.js → provider/anthropic/promptCaching.js} +2 -2
  45. package/dist/agents/middleware/provider/anthropic/promptCaching.js.map +1 -0
  46. package/dist/agents/middleware/provider/openai/moderation.cjs +299 -0
  47. package/dist/agents/middleware/provider/openai/moderation.cjs.map +1 -0
  48. package/dist/agents/middleware/provider/openai/moderation.d.cts +133 -0
  49. package/dist/agents/middleware/provider/openai/moderation.d.ts +133 -0
  50. package/dist/agents/middleware/provider/openai/moderation.js +298 -0
  51. package/dist/agents/middleware/provider/openai/moderation.js.map +1 -0
  52. package/dist/agents/middleware/summarization.d.cts +0 -4
  53. package/dist/agents/middleware/summarization.d.ts +0 -4
  54. package/dist/agents/middleware/todoListMiddleware.cjs +1 -1
  55. package/dist/agents/middleware/todoListMiddleware.cjs.map +1 -1
  56. package/dist/agents/middleware/todoListMiddleware.js +1 -1
  57. package/dist/agents/middleware/todoListMiddleware.js.map +1 -1
  58. package/dist/agents/middleware/toolRetry.cjs +32 -44
  59. package/dist/agents/middleware/toolRetry.cjs.map +1 -1
  60. package/dist/agents/middleware/toolRetry.d.cts +16 -36
  61. package/dist/agents/middleware/toolRetry.d.ts +16 -36
  62. package/dist/agents/middleware/toolRetry.js +32 -44
  63. package/dist/agents/middleware/toolRetry.js.map +1 -1
  64. package/dist/agents/middleware/types.d.cts +9 -10
  65. package/dist/agents/middleware/types.d.ts +9 -10
  66. package/dist/agents/middleware/utils.cjs +23 -0
  67. package/dist/agents/middleware/utils.cjs.map +1 -1
  68. package/dist/agents/middleware/utils.d.ts +2 -0
  69. package/dist/agents/middleware/utils.js +23 -1
  70. package/dist/agents/middleware/utils.js.map +1 -1
  71. package/dist/agents/nodes/AgentNode.cjs +50 -22
  72. package/dist/agents/nodes/AgentNode.cjs.map +1 -1
  73. package/dist/agents/nodes/AgentNode.js +52 -24
  74. package/dist/agents/nodes/AgentNode.js.map +1 -1
  75. package/dist/agents/nodes/types.d.cts +39 -3
  76. package/dist/agents/nodes/types.d.ts +39 -3
  77. package/dist/agents/responses.d.cts +0 -19
  78. package/dist/agents/responses.d.ts +0 -19
  79. package/dist/agents/runtime.d.ts +1 -0
  80. package/dist/agents/tests/utils.cjs +10 -1
  81. package/dist/agents/tests/utils.cjs.map +1 -1
  82. package/dist/agents/tests/utils.js +10 -1
  83. package/dist/agents/tests/utils.js.map +1 -1
  84. package/dist/agents/types.d.cts +68 -2
  85. package/dist/agents/types.d.ts +68 -2
  86. package/dist/agents/utils.cjs +15 -12
  87. package/dist/agents/utils.cjs.map +1 -1
  88. package/dist/agents/utils.js +16 -13
  89. package/dist/agents/utils.js.map +1 -1
  90. package/dist/chat_models/universal.cjs +50 -16
  91. package/dist/chat_models/universal.cjs.map +1 -1
  92. package/dist/chat_models/universal.d.cts +19 -1
  93. package/dist/chat_models/universal.d.ts +19 -1
  94. package/dist/chat_models/universal.js +50 -16
  95. package/dist/chat_models/universal.js.map +1 -1
  96. package/dist/index.cjs +8 -2
  97. package/dist/index.d.cts +5 -3
  98. package/dist/index.d.ts +6 -3
  99. package/dist/index.js +7 -3
  100. package/dist/load/import_constants.cjs +2 -1
  101. package/dist/load/import_constants.cjs.map +1 -1
  102. package/dist/load/import_constants.js +2 -1
  103. package/dist/load/import_constants.js.map +1 -1
  104. package/dist/load/import_map.cjs +2 -19
  105. package/dist/load/import_map.cjs.map +1 -1
  106. package/dist/load/import_map.js +2 -19
  107. package/dist/load/import_map.js.map +1 -1
  108. package/hub/node.cjs +1 -0
  109. package/hub/node.d.cts +1 -0
  110. package/hub/node.d.ts +1 -0
  111. package/hub/node.js +1 -0
  112. package/hub.cjs +1 -0
  113. package/hub.d.cts +1 -0
  114. package/hub.d.ts +1 -0
  115. package/hub.js +1 -0
  116. package/load/serializable.cjs +1 -0
  117. package/load/serializable.d.cts +1 -0
  118. package/load/serializable.d.ts +1 -0
  119. package/load/serializable.js +1 -0
  120. package/load.cjs +1 -0
  121. package/load.d.cts +1 -0
  122. package/load.d.ts +1 -0
  123. package/load.js +1 -0
  124. package/package.json +65 -52
  125. package/storage/encoder_backed.cjs +1 -0
  126. package/storage/encoder_backed.d.cts +1 -0
  127. package/storage/encoder_backed.d.ts +1 -0
  128. package/storage/encoder_backed.js +1 -0
  129. package/storage/file_system.cjs +1 -0
  130. package/storage/file_system.d.cts +1 -0
  131. package/storage/file_system.d.ts +1 -0
  132. package/storage/file_system.js +1 -0
  133. package/storage/in_memory.cjs +1 -0
  134. package/storage/in_memory.d.cts +1 -0
  135. package/storage/in_memory.d.ts +1 -0
  136. package/storage/in_memory.js +1 -0
  137. package/dist/agents/ReactAgent.d.cts.map +0 -1
  138. package/dist/agents/ReactAgent.d.ts.map +0 -1
  139. package/dist/agents/constants.cjs +0 -7
  140. package/dist/agents/constants.cjs.map +0 -1
  141. package/dist/agents/constants.d.cts.map +0 -1
  142. package/dist/agents/constants.d.ts.map +0 -1
  143. package/dist/agents/constants.js +0 -6
  144. package/dist/agents/constants.js.map +0 -1
  145. package/dist/agents/errors.d.cts.map +0 -1
  146. package/dist/agents/errors.d.ts.map +0 -1
  147. package/dist/agents/index.d.cts.map +0 -1
  148. package/dist/agents/index.d.ts.map +0 -1
  149. package/dist/agents/middleware/contextEditing.d.cts.map +0 -1
  150. package/dist/agents/middleware/contextEditing.d.ts.map +0 -1
  151. package/dist/agents/middleware/dynamicSystemPrompt.d.cts.map +0 -1
  152. package/dist/agents/middleware/dynamicSystemPrompt.d.ts.map +0 -1
  153. package/dist/agents/middleware/hitl.d.cts.map +0 -1
  154. package/dist/agents/middleware/hitl.d.ts.map +0 -1
  155. package/dist/agents/middleware/llmToolSelector.d.cts.map +0 -1
  156. package/dist/agents/middleware/llmToolSelector.d.ts.map +0 -1
  157. package/dist/agents/middleware/modelCallLimit.d.cts.map +0 -1
  158. package/dist/agents/middleware/modelCallLimit.d.ts.map +0 -1
  159. package/dist/agents/middleware/modelFallback.d.cts.map +0 -1
  160. package/dist/agents/middleware/modelFallback.d.ts.map +0 -1
  161. package/dist/agents/middleware/pii.d.cts.map +0 -1
  162. package/dist/agents/middleware/pii.d.ts.map +0 -1
  163. package/dist/agents/middleware/piiRedaction.d.cts.map +0 -1
  164. package/dist/agents/middleware/piiRedaction.d.ts.map +0 -1
  165. package/dist/agents/middleware/promptCaching.cjs.map +0 -1
  166. package/dist/agents/middleware/promptCaching.d.cts.map +0 -1
  167. package/dist/agents/middleware/promptCaching.d.ts.map +0 -1
  168. package/dist/agents/middleware/promptCaching.js.map +0 -1
  169. package/dist/agents/middleware/summarization.d.cts.map +0 -1
  170. package/dist/agents/middleware/summarization.d.ts.map +0 -1
  171. package/dist/agents/middleware/todoListMiddleware.d.cts.map +0 -1
  172. package/dist/agents/middleware/todoListMiddleware.d.ts.map +0 -1
  173. package/dist/agents/middleware/toolCallLimit.d.cts.map +0 -1
  174. package/dist/agents/middleware/toolCallLimit.d.ts.map +0 -1
  175. package/dist/agents/middleware/toolEmulator.d.cts.map +0 -1
  176. package/dist/agents/middleware/toolEmulator.d.ts.map +0 -1
  177. package/dist/agents/middleware/toolRetry.d.cts.map +0 -1
  178. package/dist/agents/middleware/toolRetry.d.ts.map +0 -1
  179. package/dist/agents/middleware/types.d.cts.map +0 -1
  180. package/dist/agents/middleware/types.d.ts.map +0 -1
  181. package/dist/agents/middleware/utils.d.cts.map +0 -1
  182. package/dist/agents/middleware/utils.d.ts.map +0 -1
  183. package/dist/agents/middleware.d.cts.map +0 -1
  184. package/dist/agents/middleware.d.ts.map +0 -1
  185. package/dist/agents/nodes/types.d.cts.map +0 -1
  186. package/dist/agents/nodes/types.d.ts.map +0 -1
  187. package/dist/agents/responses.d.cts.map +0 -1
  188. package/dist/agents/responses.d.ts.map +0 -1
  189. package/dist/agents/runtime.d.cts.map +0 -1
  190. package/dist/agents/runtime.d.ts.map +0 -1
  191. package/dist/agents/tests/utils.d.cts.map +0 -1
  192. package/dist/agents/tests/utils.d.ts.map +0 -1
  193. package/dist/agents/types.d.cts.map +0 -1
  194. package/dist/agents/types.d.ts.map +0 -1
  195. package/dist/chat_models/universal.d.cts.map +0 -1
  196. package/dist/chat_models/universal.d.ts.map +0 -1
  197. package/dist/hub/base.d.cts.map +0 -1
  198. package/dist/hub/base.d.ts.map +0 -1
  199. package/dist/hub/index.d.cts.map +0 -1
  200. package/dist/hub/index.d.ts.map +0 -1
  201. package/dist/hub/node.d.cts.map +0 -1
  202. package/dist/hub/node.d.ts.map +0 -1
  203. package/dist/load/import_type.d.cts.map +0 -1
  204. package/dist/load/import_type.d.ts.map +0 -1
  205. package/dist/load/index.d.cts.map +0 -1
  206. package/dist/load/index.d.ts.map +0 -1
  207. package/dist/storage/encoder_backed.d.cts.map +0 -1
  208. package/dist/storage/encoder_backed.d.ts.map +0 -1
  209. package/dist/storage/file_system.d.cts.map +0 -1
  210. package/dist/storage/file_system.d.ts.map +0 -1
@@ -0,0 +1,299 @@
1
+ const require_rolldown_runtime = require('../../../../_virtual/rolldown_runtime.cjs');
2
+ const require_chat_models_universal = require('../../../../chat_models/universal.cjs');
3
+ const require_middleware = require('../../../middleware.cjs');
4
+ const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
5
+
6
+ //#region src/agents/middleware/provider/openai/moderation.ts
7
+ /**
8
+ * Check if the model is an OpenAI model that supports moderation.
9
+ * @param model - The model to check.
10
+ * @returns Whether the model is an OpenAI model that supports moderation.
11
+ */
12
+ function isOpenAIModel(model) {
13
+ if (!model || typeof model !== "object" || model === null || !("client" in model) || !("_getClientOptions" in model) || typeof model._getClientOptions !== "function") return false;
14
+ /**
15
+ * client may not yet be initialized, so we need to check if the model has a _getClientOptions method.
16
+ */
17
+ model._getClientOptions();
18
+ return typeof model.client === "object" && model.client !== null && "moderations" in model.client && typeof model.client.moderations === "object" && model.client.moderations !== null && "create" in model.client.moderations && typeof model.client.moderations.create === "function";
19
+ }
20
+ /**
21
+ * Default template for violation messages.
22
+ */
23
+ const DEFAULT_VIOLATION_TEMPLATE = "I'm sorry, but I can't comply with that request. It was flagged for {categories}.";
24
+ /**
25
+ * Error raised when OpenAI flags content and `exitBehavior` is set to `"error"`.
26
+ */
27
+ var OpenAIModerationError = class extends Error {
28
+ content;
29
+ stage;
30
+ result;
31
+ originalMessage;
32
+ constructor({ content, stage, result, message }) {
33
+ super(message);
34
+ this.name = "OpenAIModerationError";
35
+ this.content = content;
36
+ this.stage = stage;
37
+ this.result = result;
38
+ this.originalMessage = message;
39
+ }
40
+ };
41
+ /**
42
+ * Middleware that moderates agent traffic using OpenAI's moderation endpoint.
43
+ *
44
+ * This middleware checks messages for content policy violations at different stages:
45
+ * - Input: User messages before they reach the model
46
+ * - Output: AI model responses
47
+ * - Tool results: Results returned from tool executions
48
+ *
49
+ * @param options - Configuration options for the middleware
50
+ * @param options.model - OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.
51
+ * @param options.moderationModel - Moderation model to use.
52
+ * @param options.checkInput - Whether to check user input messages.
53
+ * @param options.checkOutput - Whether to check model output messages.
54
+ * @param options.checkToolResults - Whether to check tool result messages.
55
+ * @param options.exitBehavior - How to handle violations.
56
+ * @param options.violationMessage - Custom template for violation messages.
57
+ * @returns Middleware function that can be used to moderate agent traffic.
58
+ *
59
+ * @example Using model instance
60
+ * ```ts
61
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
62
+ *
63
+ * const middleware = openAIModerationMiddleware({
64
+ * checkInput: true,
65
+ * checkOutput: true,
66
+ * exitBehavior: "end"
67
+ * });
68
+ *
69
+ * const agent = createAgent({
70
+ * model: "openai:gpt-4o",
71
+ * tools: [...],
72
+ * middleware: [middleware],
73
+ * });
74
+ * ```
75
+ *
76
+ * @example Using model name
77
+ * ```ts
78
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
79
+ *
80
+ * const middleware = openAIModerationMiddleware({
81
+ * model: "gpt-4o-mini",
82
+ * checkInput: true,
83
+ * checkOutput: true,
84
+ * exitBehavior: "end"
85
+ * });
86
+ *
87
+ * const agent = createAgent({
88
+ * model: "openai:gpt-4o",
89
+ * tools: [...],
90
+ * middleware: [middleware],
91
+ * });
92
+ * ```
93
+ *
94
+ * @example Custom violation message
95
+ * ```ts
96
+ * const middleware = openAIModerationMiddleware({
97
+ * violationMessage: "Content flagged: {categories}. Scores: {category_scores}"
98
+ * });
99
+ * ```
100
+ */
101
+ function openAIModerationMiddleware(options) {
102
+ const { model, moderationModel = "omni-moderation-latest", checkInput = true, checkOutput = true, checkToolResults = false, exitBehavior = "end", violationMessage } = options;
103
+ let openaiModel;
104
+ const initModerationModel = async () => {
105
+ if (openaiModel) return openaiModel;
106
+ const resolvedModel = typeof model === "string" ? await require_chat_models_universal.initChatModel(model) : model;
107
+ /**
108
+ * Check if the model is an OpenAI model.
109
+ */
110
+ if (!resolvedModel.getName().includes("ChatOpenAI")) throw new Error(`Model must be an OpenAI model to use moderation middleware. Got: ${resolvedModel.getName()}`);
111
+ /**
112
+ * check if OpenAI model package supports moderation.
113
+ */
114
+ if (!isOpenAIModel(resolvedModel)) throw new Error("Model must support moderation to use moderation middleware.");
115
+ openaiModel = resolvedModel;
116
+ return openaiModel;
117
+ };
118
+ /**
119
+ * Extract text content from a message.
120
+ */
121
+ const extractText = (message) => {
122
+ if (message.content == null) return null;
123
+ const text = message.text;
124
+ return text || null;
125
+ };
126
+ /**
127
+ * Find the last index of a message type in the messages array.
128
+ */
129
+ const findLastIndex = (messages, messageType) => {
130
+ for (let idx = messages.length - 1; idx >= 0; idx--) if (messageType.isInstance(messages[idx])) return idx;
131
+ return null;
132
+ };
133
+ /**
134
+ * Format violation message from moderation result.
135
+ */
136
+ const formatViolationMessage = (content, result) => {
137
+ const categories = [];
138
+ const categoriesObj = result.categories;
139
+ for (const [name, flagged] of Object.entries(categoriesObj)) if (flagged) categories.push(name.replace(/_/g, " "));
140
+ const categoryLabel = categories.length > 0 ? categories.join(", ") : "OpenAI's safety policies";
141
+ const template = violationMessage || DEFAULT_VIOLATION_TEMPLATE;
142
+ const scoresJson = JSON.stringify(result.category_scores, null, 2);
143
+ try {
144
+ return template.replace("{categories}", categoryLabel).replace("{category_scores}", scoresJson).replace("{original_content}", content);
145
+ } catch {
146
+ return template;
147
+ }
148
+ };
149
+ function moderateContent(input, params) {
150
+ const clientOptions = openaiModel?._getClientOptions?.();
151
+ const moderationModel$1 = params?.model ?? "omni-moderation-latest";
152
+ const moderationRequest = {
153
+ input,
154
+ model: moderationModel$1
155
+ };
156
+ return openaiModel.client.moderations.create(moderationRequest, clientOptions);
157
+ }
158
+ /**
159
+ * Apply violation handling based on exit behavior.
160
+ */
161
+ const applyViolation = (messages, index, stage, content, result) => {
162
+ const violationText = formatViolationMessage(content, result);
163
+ if (exitBehavior === "error") throw new OpenAIModerationError({
164
+ content,
165
+ stage,
166
+ result,
167
+ message: violationText
168
+ });
169
+ if (exitBehavior === "end") return {
170
+ jumpTo: "end",
171
+ messages: [new __langchain_core_messages.AIMessage({ content: violationText })]
172
+ };
173
+ if (index == null) return void 0;
174
+ /**
175
+ * Replace the original message with a new message that contains the violation text.
176
+ */
177
+ const newMessages = [...messages];
178
+ const original = newMessages[index];
179
+ const MessageConstructor = Object.getPrototypeOf(original).constructor;
180
+ newMessages[index] = new MessageConstructor({
181
+ ...original,
182
+ content: violationText
183
+ });
184
+ return { messages: newMessages };
185
+ };
186
+ /**
187
+ * Moderate user input messages.
188
+ */
189
+ const moderateUserMessage = async (messages) => {
190
+ const idx = findLastIndex(messages, __langchain_core_messages.HumanMessage);
191
+ if (idx == null) return null;
192
+ const message = messages[idx];
193
+ const text = extractText(message);
194
+ if (!text) return null;
195
+ await initModerationModel();
196
+ const response = await moderateContent(text, { model: moderationModel });
197
+ const flaggedResult = response.results.find((result) => result.flagged);
198
+ if (!flaggedResult) return null;
199
+ return applyViolation(messages, idx, "input", text, flaggedResult);
200
+ };
201
+ /**
202
+ * Moderate tool result messages.
203
+ */
204
+ const moderateToolMessages = async (messages) => {
205
+ const lastAiIdx = findLastIndex(messages, __langchain_core_messages.AIMessage);
206
+ if (lastAiIdx == null) return null;
207
+ const working = [...messages];
208
+ let modified = false;
209
+ for (let idx = lastAiIdx + 1; idx < working.length; idx++) {
210
+ const msg = working[idx];
211
+ if (!__langchain_core_messages.ToolMessage.isInstance(msg)) continue;
212
+ const text = extractText(msg);
213
+ if (!text) continue;
214
+ await initModerationModel();
215
+ const response = await moderateContent(text, { model: moderationModel });
216
+ const flaggedResult = response.results.find((result) => result.flagged);
217
+ if (!flaggedResult) continue;
218
+ const action = applyViolation(working, idx, "tool", text, flaggedResult);
219
+ if (action) {
220
+ if ("jumpTo" in action) return action;
221
+ if ("messages" in action) {
222
+ working.splice(0, working.length, ...action.messages);
223
+ modified = true;
224
+ }
225
+ }
226
+ }
227
+ if (modified) return { messages: working };
228
+ return null;
229
+ };
230
+ /**
231
+ * Moderate model output messages.
232
+ */
233
+ const moderateOutput = async (messages) => {
234
+ const lastAiIdx = findLastIndex(messages, __langchain_core_messages.AIMessage);
235
+ if (lastAiIdx == null) return null;
236
+ const aiMessage = messages[lastAiIdx];
237
+ const text = extractText(aiMessage);
238
+ if (!text) return null;
239
+ await initModerationModel();
240
+ const response = await moderateContent(text, { model: moderationModel });
241
+ const flaggedResult = response.results.find((result) => result.flagged);
242
+ if (!flaggedResult) return null;
243
+ return applyViolation(messages, lastAiIdx, "output", text, flaggedResult);
244
+ };
245
+ /**
246
+ * Moderate inputs (user messages and tool results) before model call.
247
+ */
248
+ const moderateInputs = async (messages) => {
249
+ const working = [...messages];
250
+ let modified = false;
251
+ if (checkToolResults) {
252
+ const action = await moderateToolMessages(working);
253
+ if (action) {
254
+ if ("jumpTo" in action) return action;
255
+ if ("messages" in action) {
256
+ working.splice(0, working.length, ...action.messages);
257
+ modified = true;
258
+ }
259
+ }
260
+ }
261
+ if (checkInput) {
262
+ const action = await moderateUserMessage(working);
263
+ if (action) {
264
+ if ("jumpTo" in action) return action;
265
+ if ("messages" in action) {
266
+ working.splice(0, working.length, ...action.messages);
267
+ modified = true;
268
+ }
269
+ }
270
+ }
271
+ if (modified) return { messages: working };
272
+ return null;
273
+ };
274
+ return require_middleware.createMiddleware({
275
+ name: "OpenAIModerationMiddleware",
276
+ beforeModel: {
277
+ hook: async (state) => {
278
+ if (!checkInput && !checkToolResults) return void 0;
279
+ const messages = state.messages || [];
280
+ if (messages.length === 0) return void 0;
281
+ return await moderateInputs(messages) ?? void 0;
282
+ },
283
+ canJumpTo: ["end"]
284
+ },
285
+ afterModel: {
286
+ hook: async (state) => {
287
+ if (!checkOutput) return void 0;
288
+ const messages = state.messages || [];
289
+ if (messages.length === 0) return void 0;
290
+ return await moderateOutput(messages) ?? void 0;
291
+ },
292
+ canJumpTo: ["end"]
293
+ }
294
+ });
295
+ }
296
+
297
+ //#endregion
298
+ exports.openAIModerationMiddleware = openAIModerationMiddleware;
299
+ //# sourceMappingURL=moderation.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"moderation.cjs","names":["model: unknown","options: OpenAIModerationMiddlewareOptions","openaiModel: OpenAIModel | undefined","initChatModel","message: BaseMessage","messages: BaseMessage[]","messageType: typeof AIMessage | typeof HumanMessage | typeof ToolMessage","content: string","result: ModerationResult","categories: string[]","input: string | string[]","params?: { model?: ModerationModel; options?: unknown }","moderationModel","index: number | null","stage: ViolationStage","AIMessage","HumanMessage","ToolMessage","createMiddleware"],"sources":["../../../../../src/agents/middleware/provider/openai/moderation.ts"],"sourcesContent":["import type { BaseMessage } from \"@langchain/core/messages\";\nimport { AIMessage, HumanMessage, ToolMessage } from \"@langchain/core/messages\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\n\nimport { initChatModel } from \"../../../../chat_models/universal.js\";\nimport { createMiddleware } from \"../../../middleware.js\";\nimport type { MiddlewareResult, AgentMiddleware } from \"../../types.js\";\nimport type { AgentBuiltInState } from \"../../../runtime.js\";\n\n/**\n * OpenAI model interface.\n */\ninterface OpenAIModel extends BaseLanguageModel {\n getName: () => string;\n _getClientOptions: () => unknown;\n client: {\n moderations: {\n create: (\n input: {\n input: string | string[];\n model: string;\n },\n options?: unknown\n ) => Promise<ModerationResponse>;\n };\n };\n}\n\n/**\n * Check if the model is an OpenAI model that supports moderation.\n * @param model - The model to check.\n * @returns Whether the model is an OpenAI model that supports moderation.\n */\nfunction isOpenAIModel(model: unknown): model is OpenAIModel {\n if (\n !model ||\n typeof model !== \"object\" ||\n model === null ||\n !(\"client\" in model) ||\n !(\"_getClientOptions\" in model) ||\n typeof model._getClientOptions !== \"function\"\n ) {\n return false;\n }\n\n /**\n * client may not yet be initialized, so we need to check if the model has a _getClientOptions method.\n */\n model._getClientOptions();\n return (\n typeof model.client === \"object\" &&\n model.client !== null &&\n \"moderations\" in model.client &&\n typeof model.client.moderations === \"object\" &&\n model.client.moderations !== null &&\n \"create\" in model.client.moderations &&\n typeof model.client.moderations.create === \"function\"\n );\n}\n\n/**\n * Stage where a violation occurred.\n */\nexport type ViolationStage = \"input\" | \"output\" | \"tool\";\n\n/**\n * Default template for violation messages.\n */\nconst DEFAULT_VIOLATION_TEMPLATE =\n \"I'm sorry, but I can't comply with that request. It was flagged for {categories}.\";\n\n/**\n * Result of moderation.\n * @see https://platform.openai.com/docs/api-reference/moderations/object\n */\ninterface ModerationResult {\n flagged: boolean;\n categories: Record<string, boolean>;\n category_scores: Record<string, number>;\n category_applied_input_types: Record<string, string[]>;\n}\n\n/**\n * Moderation response.\n * @see https://platform.openai.com/docs/api-reference/moderations/create\n */\ninterface ModerationResponse {\n id: string;\n model: string;\n results: ModerationResult[];\n}\n\ntype ModerationModel =\n | \"omni-moderation-latest\"\n | \"omni-moderation-2024-09-26\"\n | \"text-moderation-latest\"\n | \"text-moderation-stable\";\n\n/**\n * Error raised when OpenAI flags content and `exitBehavior` is set to `\"error\"`.\n */\nexport class OpenAIModerationError extends Error {\n content: string;\n stage: ViolationStage;\n result: ModerationResult;\n originalMessage: string;\n\n constructor({\n content,\n stage,\n result,\n message,\n }: {\n content: string;\n stage: ViolationStage;\n result: ModerationResult;\n message: string;\n }) {\n super(message);\n this.name = \"OpenAIModerationError\";\n this.content = content;\n this.stage = stage;\n this.result = result;\n this.originalMessage = message;\n }\n}\n\n/**\n * Options for configuring the OpenAI Moderation middleware.\n */\nexport interface OpenAIModerationMiddlewareOptions {\n /**\n * OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.\n * @example\n * ```ts\n * const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n * const middleware = openAIModerationMiddleware({ model });\n * const agent = createAgent({\n * model,\n * middleware: [middleware],\n * });\n * ```\n * @example\n * ```ts\n * const middleware = openAIModerationMiddleware({ model: \"gpt-4o-mini\" });\n * const agent = createAgent({\n * model: \"gpt-5\",\n * middleware: [middleware],\n * });\n * ```\n */\n model: string | BaseChatModel;\n\n /**\n * Moderation model to use.\n * @default \"omni-moderation-latest\"\n */\n moderationModel?: ModerationModel;\n\n /**\n * Whether to check user input messages.\n * @default true\n */\n checkInput?: boolean;\n\n /**\n * Whether to check model output messages.\n * @default true\n */\n checkOutput?: boolean;\n\n /**\n * Whether to check tool result messages.\n * @default false\n */\n checkToolResults?: boolean;\n\n /**\n * How to handle violations.\n * - `\"error\"`: Throw an error when content is flagged\n * - `\"end\"`: End the agent execution and return a violation message\n * - `\"replace\"`: Replace the flagged content with a violation message\n * @default \"end\"\n */\n exitBehavior?: \"error\" | \"end\" | \"replace\";\n\n /**\n * Custom template for violation messages.\n * Available placeholders: `{categories}`, `{category_scores}`, `{original_content}`\n */\n violationMessage?: string;\n}\n\n/**\n * Middleware that moderates agent traffic using OpenAI's moderation endpoint.\n *\n * This middleware checks messages for content policy violations at different stages:\n * - Input: User messages before they reach the model\n * - Output: AI model responses\n * - Tool results: Results returned from tool executions\n *\n * @param options - Configuration options for the middleware\n * @param options.model - OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.\n * @param options.moderationModel - Moderation model to use.\n * @param options.checkInput - Whether to check user input messages.\n * @param options.checkOutput - Whether to check model output messages.\n * @param options.checkToolResults - Whether to check tool result messages.\n * @param options.exitBehavior - How to handle violations.\n * @param options.violationMessage - Custom template for violation messages.\n * @returns Middleware function that can be used to moderate agent traffic.\n *\n * @example Using model instance\n * ```ts\n * import { createAgent, openAIModerationMiddleware } from \"langchain\";\n *\n * const middleware = openAIModerationMiddleware({\n * checkInput: true,\n * checkOutput: true,\n * exitBehavior: \"end\"\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * tools: [...],\n * middleware: [middleware],\n * });\n * ```\n *\n * @example Using model name\n * ```ts\n * import { createAgent, openAIModerationMiddleware } from \"langchain\";\n *\n * const middleware = openAIModerationMiddleware({\n * model: \"gpt-4o-mini\",\n * checkInput: true,\n * checkOutput: true,\n * exitBehavior: \"end\"\n * });\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * tools: [...],\n * middleware: [middleware],\n * });\n * ```\n *\n * @example Custom violation message\n * ```ts\n * const middleware = openAIModerationMiddleware({\n * violationMessage: \"Content flagged: {categories}. Scores: {category_scores}\"\n * });\n * ```\n */\nexport function openAIModerationMiddleware(\n options: OpenAIModerationMiddlewareOptions\n): AgentMiddleware {\n const {\n model,\n moderationModel = \"omni-moderation-latest\",\n checkInput = true,\n checkOutput = true,\n checkToolResults = false,\n exitBehavior = \"end\",\n violationMessage,\n } = options;\n\n let openaiModel: OpenAIModel | undefined;\n const initModerationModel = async (): Promise<OpenAIModel> => {\n if (openaiModel) {\n return openaiModel;\n }\n\n const resolvedModel =\n typeof model === \"string\" ? await initChatModel(model) : model;\n\n /**\n * Check if the model is an OpenAI model.\n */\n if (!resolvedModel.getName().includes(\"ChatOpenAI\")) {\n throw new Error(\n `Model must be an OpenAI model to use moderation middleware. Got: ${resolvedModel.getName()}`\n );\n }\n\n /**\n * check if OpenAI model package supports moderation.\n */\n if (!isOpenAIModel(resolvedModel)) {\n throw new Error(\n \"Model must support moderation to use moderation middleware.\"\n );\n }\n\n openaiModel = resolvedModel as unknown as OpenAIModel;\n return openaiModel;\n };\n\n /**\n * Extract text content from a message.\n */\n const extractText = (message: BaseMessage): string | null => {\n if (message.content == null) {\n return null;\n }\n const text = message.text;\n return text || null;\n };\n\n /**\n * Find the last index of a message type in the messages array.\n */\n const findLastIndex = (\n messages: BaseMessage[],\n messageType: typeof AIMessage | typeof HumanMessage | typeof ToolMessage\n ): number | null => {\n for (let idx = messages.length - 1; idx >= 0; idx--) {\n if (messageType.isInstance(messages[idx])) {\n return idx;\n }\n }\n return null;\n };\n\n /**\n * Format violation message from moderation result.\n */\n const formatViolationMessage = (\n content: string,\n result: ModerationResult\n ): string => {\n // Convert categories to array of flagged category names\n const categories: string[] = [];\n const categoriesObj = result.categories as unknown as Record<\n string,\n boolean\n >;\n for (const [name, flagged] of Object.entries(categoriesObj)) {\n if (flagged) {\n categories.push(name.replace(/_/g, \" \"));\n }\n }\n\n const categoryLabel =\n categories.length > 0\n ? categories.join(\", \")\n : \"OpenAI's safety policies\";\n\n const template = violationMessage || DEFAULT_VIOLATION_TEMPLATE;\n const scoresJson = JSON.stringify(\n result.category_scores as unknown as Record<string, number>,\n null,\n 2\n );\n\n try {\n return template\n .replace(\"{categories}\", categoryLabel)\n .replace(\"{category_scores}\", scoresJson)\n .replace(\"{original_content}\", content);\n } catch {\n return template;\n }\n };\n\n function moderateContent(\n input: string | string[],\n params?: { model?: ModerationModel; options?: unknown }\n ): Promise<ModerationResponse> {\n const clientOptions = openaiModel?._getClientOptions?.();\n const moderationModel = params?.model ?? \"omni-moderation-latest\";\n const moderationRequest = {\n input,\n model: moderationModel,\n };\n return openaiModel!.client.moderations.create(\n moderationRequest,\n clientOptions\n );\n }\n\n /**\n * Apply violation handling based on exit behavior.\n */\n const applyViolation = (\n messages: BaseMessage[],\n index: number | null,\n stage: ViolationStage,\n content: string,\n result: ModerationResult\n ): MiddlewareResult<Partial<AgentBuiltInState>> | undefined => {\n const violationText = formatViolationMessage(content, result);\n\n if (exitBehavior === \"error\") {\n throw new OpenAIModerationError({\n content,\n stage,\n result,\n message: violationText,\n });\n }\n\n if (exitBehavior === \"end\") {\n return {\n jumpTo: \"end\",\n messages: [new AIMessage({ content: violationText })],\n };\n }\n\n if (index == null) {\n return undefined;\n }\n\n /**\n * Replace the original message with a new message that contains the violation text.\n */\n const newMessages = [...messages];\n const original = newMessages[index];\n const MessageConstructor = Object.getPrototypeOf(original).constructor;\n newMessages[index] = new MessageConstructor({\n ...original,\n content: violationText,\n });\n\n return { messages: newMessages };\n };\n\n /**\n * Moderate user input messages.\n */\n const moderateUserMessage = async (\n messages: BaseMessage[]\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | null> => {\n const idx = findLastIndex(messages, HumanMessage);\n if (idx == null) {\n return null;\n }\n\n const message = messages[idx];\n const text = extractText(message);\n if (!text) {\n return null;\n }\n\n await initModerationModel();\n const response = await moderateContent(text, {\n model: moderationModel,\n });\n\n const flaggedResult = response.results.find((result) => result.flagged);\n if (!flaggedResult) {\n return null;\n }\n\n return applyViolation(messages, idx, \"input\", text, flaggedResult);\n };\n\n /**\n * Moderate tool result messages.\n */\n const moderateToolMessages = async (\n messages: BaseMessage[]\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | null> => {\n const lastAiIdx = findLastIndex(messages, AIMessage);\n if (lastAiIdx == null) {\n return null;\n }\n\n const working = [...messages];\n let modified = false;\n\n for (let idx = lastAiIdx + 1; idx < working.length; idx++) {\n const msg = working[idx];\n if (!ToolMessage.isInstance(msg)) {\n continue;\n }\n\n const text = extractText(msg);\n if (!text) {\n continue;\n }\n\n await initModerationModel();\n const response = await moderateContent(text, {\n model: moderationModel,\n });\n const flaggedResult = response.results.find((result) => result.flagged);\n if (!flaggedResult) {\n continue;\n }\n\n const action = applyViolation(working, idx, \"tool\", text, flaggedResult);\n if (action) {\n if (\"jumpTo\" in action) {\n return action;\n }\n if (\"messages\" in action) {\n working.splice(\n 0,\n working.length,\n ...(action.messages as BaseMessage[])\n );\n modified = true;\n }\n }\n }\n\n if (modified) {\n return { messages: working };\n }\n\n return null;\n };\n\n /**\n * Moderate model output messages.\n */\n const moderateOutput = async (\n messages: BaseMessage[]\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | null> => {\n const lastAiIdx = findLastIndex(messages, AIMessage);\n if (lastAiIdx == null) {\n return null;\n }\n\n const aiMessage = messages[lastAiIdx];\n const text = extractText(aiMessage);\n if (!text) {\n return null;\n }\n\n await initModerationModel();\n const response = await moderateContent(text, {\n model: moderationModel,\n });\n const flaggedResult = response.results.find((result) => result.flagged);\n if (!flaggedResult) {\n return null;\n }\n\n return applyViolation(messages, lastAiIdx, \"output\", text, flaggedResult);\n };\n\n /**\n * Moderate inputs (user messages and tool results) before model call.\n */\n const moderateInputs = async (\n messages: BaseMessage[]\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | null> => {\n const working = [...messages];\n let modified = false;\n\n if (checkToolResults) {\n const action = await moderateToolMessages(working);\n if (action) {\n if (\"jumpTo\" in action) {\n return action;\n }\n if (\"messages\" in action) {\n working.splice(\n 0,\n working.length,\n ...(action.messages as BaseMessage[])\n );\n modified = true;\n }\n }\n }\n\n if (checkInput) {\n const action = await moderateUserMessage(working);\n if (action) {\n if (\"jumpTo\" in action) {\n return action;\n }\n if (\"messages\" in action) {\n working.splice(\n 0,\n working.length,\n ...(action.messages as BaseMessage[])\n );\n modified = true;\n }\n }\n }\n\n if (modified) {\n return { messages: working };\n }\n\n return null;\n };\n\n return createMiddleware({\n name: \"OpenAIModerationMiddleware\",\n beforeModel: {\n hook: async (\n state\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | undefined> => {\n if (!checkInput && !checkToolResults) {\n return undefined;\n }\n\n const messages = state.messages || [];\n if (messages.length === 0) {\n return undefined;\n }\n\n return (await moderateInputs(messages)) ?? undefined;\n },\n canJumpTo: [\"end\"],\n },\n afterModel: {\n hook: async (\n state\n ): Promise<MiddlewareResult<Partial<AgentBuiltInState>> | undefined> => {\n if (!checkOutput) {\n return undefined;\n }\n\n const messages = state.messages || [];\n if (messages.length === 0) {\n return undefined;\n }\n\n return (await moderateOutput(messages)) ?? undefined;\n },\n canJumpTo: [\"end\"],\n },\n });\n}\n"],"mappings":";;;;;;;;;;;AAkCA,SAAS,cAAcA,OAAsC;AAC3D,KACE,CAAC,SACD,OAAO,UAAU,YACjB,UAAU,QACV,EAAE,YAAY,UACd,EAAE,uBAAuB,UACzB,OAAO,MAAM,sBAAsB,WAEnC,QAAO;;;;CAMT,MAAM,mBAAmB;AACzB,QACE,OAAO,MAAM,WAAW,YACxB,MAAM,WAAW,QACjB,iBAAiB,MAAM,UACvB,OAAO,MAAM,OAAO,gBAAgB,YACpC,MAAM,OAAO,gBAAgB,QAC7B,YAAY,MAAM,OAAO,eACzB,OAAO,MAAM,OAAO,YAAY,WAAW;AAE9C;;;;AAUD,MAAM,6BACJ;;;;AAgCF,IAAa,wBAAb,cAA2C,MAAM;CAC/C;CACA;CACA;CACA;CAEA,YAAY,EACV,SACA,OACA,QACA,SAMD,EAAE;EACD,MAAM,QAAQ;EACd,KAAK,OAAO;EACZ,KAAK,UAAU;EACf,KAAK,QAAQ;EACb,KAAK,SAAS;EACd,KAAK,kBAAkB;CACxB;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgID,SAAgB,2BACdC,SACiB;CACjB,MAAM,EACJ,OACA,kBAAkB,0BAClB,aAAa,MACb,cAAc,MACd,mBAAmB,OACnB,eAAe,OACf,kBACD,GAAG;CAEJ,IAAIC;CACJ,MAAM,sBAAsB,YAAkC;AAC5D,MAAI,YACF,QAAO;EAGT,MAAM,gBACJ,OAAO,UAAU,WAAW,MAAMC,4CAAc,MAAM,GAAG;;;;AAK3D,MAAI,CAAC,cAAc,SAAS,CAAC,SAAS,aAAa,CACjD,OAAM,IAAI,MACR,CAAC,iEAAiE,EAAE,cAAc,SAAS,EAAE;;;;AAOjG,MAAI,CAAC,cAAc,cAAc,CAC/B,OAAM,IAAI,MACR;EAIJ,cAAc;AACd,SAAO;CACR;;;;CAKD,MAAM,cAAc,CAACC,YAAwC;AAC3D,MAAI,QAAQ,WAAW,KACrB,QAAO;EAET,MAAM,OAAO,QAAQ;AACrB,SAAO,QAAQ;CAChB;;;;CAKD,MAAM,gBAAgB,CACpBC,UACAC,gBACkB;AAClB,OAAK,IAAI,MAAM,SAAS,SAAS,GAAG,OAAO,GAAG,MAC5C,KAAI,YAAY,WAAW,SAAS,KAAK,CACvC,QAAO;AAGX,SAAO;CACR;;;;CAKD,MAAM,yBAAyB,CAC7BC,SACAC,WACW;EAEX,MAAMC,aAAuB,CAAE;EAC/B,MAAM,gBAAgB,OAAO;AAI7B,OAAK,MAAM,CAAC,MAAM,QAAQ,IAAI,OAAO,QAAQ,cAAc,CACzD,KAAI,SACF,WAAW,KAAK,KAAK,QAAQ,MAAM,IAAI,CAAC;EAI5C,MAAM,gBACJ,WAAW,SAAS,IAChB,WAAW,KAAK,KAAK,GACrB;EAEN,MAAM,WAAW,oBAAoB;EACrC,MAAM,aAAa,KAAK,UACtB,OAAO,iBACP,MACA,EACD;AAED,MAAI;AACF,UAAO,SACJ,QAAQ,gBAAgB,cAAc,CACtC,QAAQ,qBAAqB,WAAW,CACxC,QAAQ,sBAAsB,QAAQ;EAC1C,QAAO;AACN,UAAO;EACR;CACF;CAED,SAAS,gBACPC,OACAC,QAC6B;EAC7B,MAAM,gBAAgB,aAAa,qBAAqB;EACxD,MAAMC,oBAAkB,QAAQ,SAAS;EACzC,MAAM,oBAAoB;GACxB;GACA,OAAOA;EACR;AACD,SAAO,YAAa,OAAO,YAAY,OACrC,mBACA,cACD;CACF;;;;CAKD,MAAM,iBAAiB,CACrBP,UACAQ,OACAC,OACAP,SACAC,WAC6D;EAC7D,MAAM,gBAAgB,uBAAuB,SAAS,OAAO;AAE7D,MAAI,iBAAiB,QACnB,OAAM,IAAI,sBAAsB;GAC9B;GACA;GACA;GACA,SAAS;EACV;AAGH,MAAI,iBAAiB,MACnB,QAAO;GACL,QAAQ;GACR,UAAU,CAAC,IAAIO,oCAAU,EAAE,SAAS,cAAe,EAAE;EACtD;AAGH,MAAI,SAAS,KACX,QAAO;;;;EAMT,MAAM,cAAc,CAAC,GAAG,QAAS;EACjC,MAAM,WAAW,YAAY;EAC7B,MAAM,qBAAqB,OAAO,eAAe,SAAS,CAAC;EAC3D,YAAY,SAAS,IAAI,mBAAmB;GAC1C,GAAG;GACH,SAAS;EACV;AAED,SAAO,EAAE,UAAU,YAAa;CACjC;;;;CAKD,MAAM,sBAAsB,OAC1BV,aACiE;EACjE,MAAM,MAAM,cAAc,UAAUW,uCAAa;AACjD,MAAI,OAAO,KACT,QAAO;EAGT,MAAM,UAAU,SAAS;EACzB,MAAM,OAAO,YAAY,QAAQ;AACjC,MAAI,CAAC,KACH,QAAO;EAGT,MAAM,qBAAqB;EAC3B,MAAM,WAAW,MAAM,gBAAgB,MAAM,EAC3C,OAAO,gBACR,EAAC;EAEF,MAAM,gBAAgB,SAAS,QAAQ,KAAK,CAAC,WAAW,OAAO,QAAQ;AACvE,MAAI,CAAC,cACH,QAAO;AAGT,SAAO,eAAe,UAAU,KAAK,SAAS,MAAM,cAAc;CACnE;;;;CAKD,MAAM,uBAAuB,OAC3BX,aACiE;EACjE,MAAM,YAAY,cAAc,UAAUU,oCAAU;AACpD,MAAI,aAAa,KACf,QAAO;EAGT,MAAM,UAAU,CAAC,GAAG,QAAS;EAC7B,IAAI,WAAW;AAEf,OAAK,IAAI,MAAM,YAAY,GAAG,MAAM,QAAQ,QAAQ,OAAO;GACzD,MAAM,MAAM,QAAQ;AACpB,OAAI,CAACE,sCAAY,WAAW,IAAI,CAC9B;GAGF,MAAM,OAAO,YAAY,IAAI;AAC7B,OAAI,CAAC,KACH;GAGF,MAAM,qBAAqB;GAC3B,MAAM,WAAW,MAAM,gBAAgB,MAAM,EAC3C,OAAO,gBACR,EAAC;GACF,MAAM,gBAAgB,SAAS,QAAQ,KAAK,CAAC,WAAW,OAAO,QAAQ;AACvE,OAAI,CAAC,cACH;GAGF,MAAM,SAAS,eAAe,SAAS,KAAK,QAAQ,MAAM,cAAc;AACxE,OAAI,QAAQ;AACV,QAAI,YAAY,OACd,QAAO;AAET,QAAI,cAAc,QAAQ;KACxB,QAAQ,OACN,GACA,QAAQ,QACR,GAAI,OAAO,SACZ;KACD,WAAW;IACZ;GACF;EACF;AAED,MAAI,SACF,QAAO,EAAE,UAAU,QAAS;AAG9B,SAAO;CACR;;;;CAKD,MAAM,iBAAiB,OACrBZ,aACiE;EACjE,MAAM,YAAY,cAAc,UAAUU,oCAAU;AACpD,MAAI,aAAa,KACf,QAAO;EAGT,MAAM,YAAY,SAAS;EAC3B,MAAM,OAAO,YAAY,UAAU;AACnC,MAAI,CAAC,KACH,QAAO;EAGT,MAAM,qBAAqB;EAC3B,MAAM,WAAW,MAAM,gBAAgB,MAAM,EAC3C,OAAO,gBACR,EAAC;EACF,MAAM,gBAAgB,SAAS,QAAQ,KAAK,CAAC,WAAW,OAAO,QAAQ;AACvE,MAAI,CAAC,cACH,QAAO;AAGT,SAAO,eAAe,UAAU,WAAW,UAAU,MAAM,cAAc;CAC1E;;;;CAKD,MAAM,iBAAiB,OACrBV,aACiE;EACjE,MAAM,UAAU,CAAC,GAAG,QAAS;EAC7B,IAAI,WAAW;AAEf,MAAI,kBAAkB;GACpB,MAAM,SAAS,MAAM,qBAAqB,QAAQ;AAClD,OAAI,QAAQ;AACV,QAAI,YAAY,OACd,QAAO;AAET,QAAI,cAAc,QAAQ;KACxB,QAAQ,OACN,GACA,QAAQ,QACR,GAAI,OAAO,SACZ;KACD,WAAW;IACZ;GACF;EACF;AAED,MAAI,YAAY;GACd,MAAM,SAAS,MAAM,oBAAoB,QAAQ;AACjD,OAAI,QAAQ;AACV,QAAI,YAAY,OACd,QAAO;AAET,QAAI,cAAc,QAAQ;KACxB,QAAQ,OACN,GACA,QAAQ,QACR,GAAI,OAAO,SACZ;KACD,WAAW;IACZ;GACF;EACF;AAED,MAAI,SACF,QAAO,EAAE,UAAU,QAAS;AAG9B,SAAO;CACR;AAED,QAAOa,oCAAiB;EACtB,MAAM;EACN,aAAa;GACX,MAAM,OACJ,UACsE;AACtE,QAAI,CAAC,cAAc,CAAC,iBAClB,QAAO;IAGT,MAAM,WAAW,MAAM,YAAY,CAAE;AACrC,QAAI,SAAS,WAAW,EACtB,QAAO;AAGT,WAAQ,MAAM,eAAe,SAAS,IAAK;GAC5C;GACD,WAAW,CAAC,KAAM;EACnB;EACD,YAAY;GACV,MAAM,OACJ,UACsE;AACtE,QAAI,CAAC,YACH,QAAO;IAGT,MAAM,WAAW,MAAM,YAAY,CAAE;AACrC,QAAI,SAAS,WAAW,EACtB,QAAO;AAGT,WAAQ,MAAM,eAAe,SAAS,IAAK;GAC5C;GACD,WAAW,CAAC,KAAM;EACnB;CACF,EAAC;AACH"}
@@ -0,0 +1,133 @@
1
+ import { AgentMiddleware } from "../../types.cjs";
2
+ import { BaseChatModel } from "@langchain/core/language_models/chat_models";
3
+
4
+ //#region src/agents/middleware/provider/openai/moderation.d.ts
5
+
6
+ type ModerationModel = "omni-moderation-latest" | "omni-moderation-2024-09-26" | "text-moderation-latest" | "text-moderation-stable";
7
+ /**
8
+ * Error raised when OpenAI flags content and `exitBehavior` is set to `"error"`.
9
+ */
10
+
11
+ /**
12
+ * Options for configuring the OpenAI Moderation middleware.
13
+ */
14
+ interface OpenAIModerationMiddlewareOptions {
15
+ /**
16
+ * OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.
17
+ * @example
18
+ * ```ts
19
+ * const model = new ChatOpenAI({ model: "gpt-4o-mini" });
20
+ * const middleware = openAIModerationMiddleware({ model });
21
+ * const agent = createAgent({
22
+ * model,
23
+ * middleware: [middleware],
24
+ * });
25
+ * ```
26
+ * @example
27
+ * ```ts
28
+ * const middleware = openAIModerationMiddleware({ model: "gpt-4o-mini" });
29
+ * const agent = createAgent({
30
+ * model: "gpt-5",
31
+ * middleware: [middleware],
32
+ * });
33
+ * ```
34
+ */
35
+ model: string | BaseChatModel;
36
+ /**
37
+ * Moderation model to use.
38
+ * @default "omni-moderation-latest"
39
+ */
40
+ moderationModel?: ModerationModel;
41
+ /**
42
+ * Whether to check user input messages.
43
+ * @default true
44
+ */
45
+ checkInput?: boolean;
46
+ /**
47
+ * Whether to check model output messages.
48
+ * @default true
49
+ */
50
+ checkOutput?: boolean;
51
+ /**
52
+ * Whether to check tool result messages.
53
+ * @default false
54
+ */
55
+ checkToolResults?: boolean;
56
+ /**
57
+ * How to handle violations.
58
+ * - `"error"`: Throw an error when content is flagged
59
+ * - `"end"`: End the agent execution and return a violation message
60
+ * - `"replace"`: Replace the flagged content with a violation message
61
+ * @default "end"
62
+ */
63
+ exitBehavior?: "error" | "end" | "replace";
64
+ /**
65
+ * Custom template for violation messages.
66
+ * Available placeholders: `{categories}`, `{category_scores}`, `{original_content}`
67
+ */
68
+ violationMessage?: string;
69
+ }
70
+ /**
71
+ * Middleware that moderates agent traffic using OpenAI's moderation endpoint.
72
+ *
73
+ * This middleware checks messages for content policy violations at different stages:
74
+ * - Input: User messages before they reach the model
75
+ * - Output: AI model responses
76
+ * - Tool results: Results returned from tool executions
77
+ *
78
+ * @param options - Configuration options for the middleware
79
+ * @param options.model - OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.
80
+ * @param options.moderationModel - Moderation model to use.
81
+ * @param options.checkInput - Whether to check user input messages.
82
+ * @param options.checkOutput - Whether to check model output messages.
83
+ * @param options.checkToolResults - Whether to check tool result messages.
84
+ * @param options.exitBehavior - How to handle violations.
85
+ * @param options.violationMessage - Custom template for violation messages.
86
+ * @returns Middleware function that can be used to moderate agent traffic.
87
+ *
88
+ * @example Using model instance
89
+ * ```ts
90
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
91
+ *
92
+ * const middleware = openAIModerationMiddleware({
93
+ * checkInput: true,
94
+ * checkOutput: true,
95
+ * exitBehavior: "end"
96
+ * });
97
+ *
98
+ * const agent = createAgent({
99
+ * model: "openai:gpt-4o",
100
+ * tools: [...],
101
+ * middleware: [middleware],
102
+ * });
103
+ * ```
104
+ *
105
+ * @example Using model name
106
+ * ```ts
107
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
108
+ *
109
+ * const middleware = openAIModerationMiddleware({
110
+ * model: "gpt-4o-mini",
111
+ * checkInput: true,
112
+ * checkOutput: true,
113
+ * exitBehavior: "end"
114
+ * });
115
+ *
116
+ * const agent = createAgent({
117
+ * model: "openai:gpt-4o",
118
+ * tools: [...],
119
+ * middleware: [middleware],
120
+ * });
121
+ * ```
122
+ *
123
+ * @example Custom violation message
124
+ * ```ts
125
+ * const middleware = openAIModerationMiddleware({
126
+ * violationMessage: "Content flagged: {categories}. Scores: {category_scores}"
127
+ * });
128
+ * ```
129
+ */
130
+ declare function openAIModerationMiddleware(options: OpenAIModerationMiddlewareOptions): AgentMiddleware;
131
+ //#endregion
132
+ export { OpenAIModerationMiddlewareOptions, openAIModerationMiddleware };
133
+ //# sourceMappingURL=moderation.d.cts.map
@@ -0,0 +1,133 @@
1
+ import { AgentMiddleware } from "../../types.js";
2
+ import { BaseChatModel } from "@langchain/core/language_models/chat_models";
3
+
4
+ //#region src/agents/middleware/provider/openai/moderation.d.ts
5
+
6
+ type ModerationModel = "omni-moderation-latest" | "omni-moderation-2024-09-26" | "text-moderation-latest" | "text-moderation-stable";
7
+ /**
8
+ * Error raised when OpenAI flags content and `exitBehavior` is set to `"error"`.
9
+ */
10
+
11
+ /**
12
+ * Options for configuring the OpenAI Moderation middleware.
13
+ */
14
+ interface OpenAIModerationMiddlewareOptions {
15
+ /**
16
+ * OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.
17
+ * @example
18
+ * ```ts
19
+ * const model = new ChatOpenAI({ model: "gpt-4o-mini" });
20
+ * const middleware = openAIModerationMiddleware({ model });
21
+ * const agent = createAgent({
22
+ * model,
23
+ * middleware: [middleware],
24
+ * });
25
+ * ```
26
+ * @example
27
+ * ```ts
28
+ * const middleware = openAIModerationMiddleware({ model: "gpt-4o-mini" });
29
+ * const agent = createAgent({
30
+ * model: "gpt-5",
31
+ * middleware: [middleware],
32
+ * });
33
+ * ```
34
+ */
35
+ model: string | BaseChatModel;
36
+ /**
37
+ * Moderation model to use.
38
+ * @default "omni-moderation-latest"
39
+ */
40
+ moderationModel?: ModerationModel;
41
+ /**
42
+ * Whether to check user input messages.
43
+ * @default true
44
+ */
45
+ checkInput?: boolean;
46
+ /**
47
+ * Whether to check model output messages.
48
+ * @default true
49
+ */
50
+ checkOutput?: boolean;
51
+ /**
52
+ * Whether to check tool result messages.
53
+ * @default false
54
+ */
55
+ checkToolResults?: boolean;
56
+ /**
57
+ * How to handle violations.
58
+ * - `"error"`: Throw an error when content is flagged
59
+ * - `"end"`: End the agent execution and return a violation message
60
+ * - `"replace"`: Replace the flagged content with a violation message
61
+ * @default "end"
62
+ */
63
+ exitBehavior?: "error" | "end" | "replace";
64
+ /**
65
+ * Custom template for violation messages.
66
+ * Available placeholders: `{categories}`, `{category_scores}`, `{original_content}`
67
+ */
68
+ violationMessage?: string;
69
+ }
70
+ /**
71
+ * Middleware that moderates agent traffic using OpenAI's moderation endpoint.
72
+ *
73
+ * This middleware checks messages for content policy violations at different stages:
74
+ * - Input: User messages before they reach the model
75
+ * - Output: AI model responses
76
+ * - Tool results: Results returned from tool executions
77
+ *
78
+ * @param options - Configuration options for the middleware
79
+ * @param options.model - OpenAI model to use for moderation. Can be either a model name or a BaseChatModel instance.
80
+ * @param options.moderationModel - Moderation model to use.
81
+ * @param options.checkInput - Whether to check user input messages.
82
+ * @param options.checkOutput - Whether to check model output messages.
83
+ * @param options.checkToolResults - Whether to check tool result messages.
84
+ * @param options.exitBehavior - How to handle violations.
85
+ * @param options.violationMessage - Custom template for violation messages.
86
+ * @returns Middleware function that can be used to moderate agent traffic.
87
+ *
88
+ * @example Using model instance
89
+ * ```ts
90
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
91
+ *
92
+ * const middleware = openAIModerationMiddleware({
93
+ * checkInput: true,
94
+ * checkOutput: true,
95
+ * exitBehavior: "end"
96
+ * });
97
+ *
98
+ * const agent = createAgent({
99
+ * model: "openai:gpt-4o",
100
+ * tools: [...],
101
+ * middleware: [middleware],
102
+ * });
103
+ * ```
104
+ *
105
+ * @example Using model name
106
+ * ```ts
107
+ * import { createAgent, openAIModerationMiddleware } from "langchain";
108
+ *
109
+ * const middleware = openAIModerationMiddleware({
110
+ * model: "gpt-4o-mini",
111
+ * checkInput: true,
112
+ * checkOutput: true,
113
+ * exitBehavior: "end"
114
+ * });
115
+ *
116
+ * const agent = createAgent({
117
+ * model: "openai:gpt-4o",
118
+ * tools: [...],
119
+ * middleware: [middleware],
120
+ * });
121
+ * ```
122
+ *
123
+ * @example Custom violation message
124
+ * ```ts
125
+ * const middleware = openAIModerationMiddleware({
126
+ * violationMessage: "Content flagged: {categories}. Scores: {category_scores}"
127
+ * });
128
+ * ```
129
+ */
130
+ declare function openAIModerationMiddleware(options: OpenAIModerationMiddlewareOptions): AgentMiddleware;
131
+ //#endregion
132
+ export { OpenAIModerationMiddlewareOptions, openAIModerationMiddleware };
133
+ //# sourceMappingURL=moderation.d.ts.map