plugin-sensitive-filter-xr 0.1.6 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Sensitive Filter Middleware
2
2
 
3
- `plugin-sensitive-filter-xr` filters sensitive content for both input and output in two mutually exclusive modes:
3
+ `@xpert-ai/plugin-sensitive-filter` filters sensitive content for both input and output in two mutually exclusive modes:
4
4
 
5
5
  - `rule`: deterministic rules (`keyword` / `regex`)
6
6
  - `llm`: natural-language policy evaluation with rewrite-only enforcement
@@ -94,8 +94,4 @@ Current behavior:
94
94
 
95
95
  ## Validation Commands
96
96
 
97
- ```bash
98
- /Users/xr/Documents/code/xpert-plugins/xpertai/node_modules/.bin/tsc -p /Users/xr/Documents/code/xpert-plugins/xpertai/middlewares/sensitive-filter/tsconfig.lib.json --noEmit
99
- npx jest --runInBand src/lib/sensitiveFilter.spec.ts
100
- node /Users/xr/Documents/code/xpert-plugins/plugin-dev-harness/dist/index.js --workspace ./xpertai --plugin ./middlewares/sensitive-filter
101
- ```
97
+
@@ -1 +1 @@
1
- {"version":3,"file":"sensitiveFilter.d.ts","sourceRoot":"","sources":["../../src/lib/sensitiveFilter.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAa,oBAAoB,EAA8B,MAAM,kBAAkB,CAAA;AAGnG,OAAO,EACL,eAAe,EAGf,uBAAuB,EACvB,wBAAwB,EAEzB,MAAM,sBAAsB,CAAA;AAC7B,OAAO,EAML,qBAAqB,EAKtB,MAAM,YAAY,CAAA;AAulBnB,qBAEa,yBAA0B,YAAW,wBAAwB,CAAC,qBAAqB,CAAC;IAE/F,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAY;IAEvC,QAAQ,CAAC,IAAI,EAAE,oBAAoB,CA6NlC;IAEK,gBAAgB,CACpB,OAAO,EAAE,qBAAqB,EAC9B,OAAO,EAAE,uBAAuB,GAC/B,OAAO,CAAC,eAAe,CAAC;IAa3B,OAAO,CAAC,wBAAwB;IA6OhC,OAAO,CAAC,uBAAuB;CAkbhC;AAED,YAAY,EAAE,qBAAqB,EAAE,CAAA"}
1
+ {"version":3,"file":"sensitiveFilter.d.ts","sourceRoot":"","sources":["../../src/lib/sensitiveFilter.ts"],"names":[],"mappings":"AAMA,OAAO,KAAK,EAAa,oBAAoB,EAA8B,MAAM,kBAAkB,CAAA;AAGnG,OAAO,EACL,eAAe,EAGf,uBAAuB,EACvB,wBAAwB,EAEzB,MAAM,sBAAsB,CAAA;AAC7B,OAAO,EAML,qBAAqB,EAKtB,MAAM,YAAY,CAAA;AAixBnB,qBAEa,yBAA0B,YAAW,wBAAwB,CAAC,qBAAqB,CAAC;IAE/F,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAY;IAEvC,QAAQ,CAAC,IAAI,EAAE,oBAAoB,CA6NlC;IAEK,gBAAgB,CACpB,OAAO,EAAE,qBAAqB,EAC9B,OAAO,EAAE,uBAAuB,GAC/B,OAAO,CAAC,eAAe,CAAC;IAa3B,OAAO,CAAC,wBAAwB;IAgThC,OAAO,CAAC,uBAAuB;CA2fhC;AAED,YAAY,EAAE,qBAAqB,EAAE,CAAA"}
@@ -1,6 +1,8 @@
1
1
  import { __decorate, __metadata } from "tslib";
2
2
  import { z as z4 } from 'zod/v4';
3
- import { AIMessage, HumanMessage } from '@langchain/core/messages';
3
+ import { AIMessage, AIMessageChunk, HumanMessage } from '@langchain/core/messages';
4
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
5
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
4
6
  import { Inject, Injectable } from '@nestjs/common';
5
7
  import { CommandBus } from '@nestjs/cqrs';
6
8
  import { AgentMiddlewareStrategy, CreateModelClientCommand, WrapWorkflowNodeExecutionCommand, } from '@xpert-ai/plugin-sdk';
@@ -13,6 +15,7 @@ const CONFIG_PARSE_ERROR = '敏感词过滤配置格式不正确,请检查填
13
15
  const BUSINESS_RULES_VALIDATION_ERROR = '请至少配置 1 条有效业务规则(pattern/type/action/scope/severity)。';
14
16
  const LLM_MODE_VALIDATION_ERROR = '请完善 LLM 过滤配置:需填写过滤模型、生效范围、审核规则说明。';
15
17
  const INTERNAL_LLM_INVOKE_TAG = 'sensitive-filter/internal-eval';
18
+ const INTERNAL_SOURCE_STREAM_TAG = 'sensitive-filter/internal-source-stream';
16
19
  const INTERNAL_LLM_INVOKE_OPTIONS = {
17
20
  tags: [INTERNAL_LLM_INVOKE_TAG],
18
21
  metadata: {
@@ -141,6 +144,144 @@ function replaceModelResponseText(response, text) {
141
144
  }
142
145
  return new AIMessage(text);
143
146
  }
147
+ function cloneAiMessage(source) {
148
+ return new AIMessage({
149
+ content: source.content,
150
+ additional_kwargs: source.additional_kwargs,
151
+ response_metadata: source.response_metadata,
152
+ tool_calls: source.tool_calls,
153
+ invalid_tool_calls: source.invalid_tool_calls,
154
+ usage_metadata: source.usage_metadata,
155
+ id: source.id,
156
+ name: source.name,
157
+ });
158
+ }
159
+ function cloneAiMessageWithText(source, text) {
160
+ const cloned = cloneAiMessage(source);
161
+ cloned.content = text;
162
+ return cloned;
163
+ }
164
+ function toAiMessageChunk(value) {
165
+ if (value instanceof AIMessageChunk) {
166
+ return value;
167
+ }
168
+ if (!isRecord(value) || !('content' in value)) {
169
+ return null;
170
+ }
171
+ return new AIMessageChunk({
172
+ content: value['content'],
173
+ additional_kwargs: isRecord(value['additional_kwargs']) ? value['additional_kwargs'] : {},
174
+ response_metadata: isRecord(value['response_metadata']) ? value['response_metadata'] : {},
175
+ tool_call_chunks: Array.isArray(value['tool_call_chunks']) ? value['tool_call_chunks'] : [],
176
+ tool_calls: Array.isArray(value['tool_calls']) ? value['tool_calls'] : [],
177
+ invalid_tool_calls: Array.isArray(value['invalid_tool_calls']) ? value['invalid_tool_calls'] : [],
178
+ usage_metadata: isRecord(value['usage_metadata']) ? value['usage_metadata'] : undefined,
179
+ id: typeof value['id'] === 'string' ? value['id'] : undefined,
180
+ });
181
+ }
182
+ function toAiMessage(value) {
183
+ if (value instanceof AIMessage) {
184
+ return value;
185
+ }
186
+ if (value instanceof AIMessageChunk) {
187
+ return new AIMessage({
188
+ content: value.content,
189
+ additional_kwargs: value.additional_kwargs,
190
+ response_metadata: value.response_metadata,
191
+ tool_calls: value.tool_calls,
192
+ invalid_tool_calls: value.invalid_tool_calls,
193
+ usage_metadata: value.usage_metadata,
194
+ id: value.id,
195
+ name: value.name,
196
+ });
197
+ }
198
+ if (isRecord(value) && 'content' in value) {
199
+ return new AIMessage({
200
+ content: value['content'],
201
+ additional_kwargs: isRecord(value['additional_kwargs']) ? value['additional_kwargs'] : {},
202
+ response_metadata: isRecord(value['response_metadata']) ? value['response_metadata'] : {},
203
+ tool_calls: Array.isArray(value['tool_calls']) ? value['tool_calls'] : [],
204
+ invalid_tool_calls: Array.isArray(value['invalid_tool_calls']) ? value['invalid_tool_calls'] : [],
205
+ usage_metadata: isRecord(value['usage_metadata']) ? value['usage_metadata'] : undefined,
206
+ id: typeof value['id'] === 'string' ? value['id'] : undefined,
207
+ name: typeof value['name'] === 'string' ? value['name'] : undefined,
208
+ });
209
+ }
210
+ return new AIMessage(extractPrimitiveText(value));
211
+ }
212
+ function buildInternalSourceOptions(options) {
213
+ const tags = Array.isArray(options?.tags) ? options.tags : [];
214
+ const metadata = isRecord(options?.metadata) ? options.metadata : {};
215
+ return {
216
+ ...(options ?? {}),
217
+ tags: [...tags, INTERNAL_SOURCE_STREAM_TAG],
218
+ metadata: {
219
+ ...metadata,
220
+ internal: true,
221
+ },
222
+ };
223
+ }
224
+ class BufferedOutputProxyChatModel extends BaseChatModel {
225
+ constructor(innerModel, resolveOutput) {
226
+ super({});
227
+ this.innerModel = innerModel;
228
+ this.resolveOutput = resolveOutput;
229
+ }
230
+ _llmType() {
231
+ return 'sensitive-filter-output-proxy';
232
+ }
233
+ async collectInnerMessage(messages, options) {
234
+ const internalOptions = buildInternalSourceOptions(options);
235
+ const streamFn = this.innerModel?.stream;
236
+ if (typeof streamFn === 'function') {
237
+ let mergedChunk = null;
238
+ for await (const rawChunk of streamFn.call(this.innerModel, messages, internalOptions)) {
239
+ const chunk = toAiMessageChunk(rawChunk);
240
+ if (!chunk) {
241
+ continue;
242
+ }
243
+ mergedChunk = mergedChunk ? mergedChunk.concat(chunk) : chunk;
244
+ }
245
+ if (mergedChunk) {
246
+ return toAiMessage(mergedChunk);
247
+ }
248
+ }
249
+ return toAiMessage(await this.innerModel.invoke(messages, internalOptions));
250
+ }
251
+ async finalizeMessage(messages, options) {
252
+ const sourceMessage = await this.collectInnerMessage(messages, options);
253
+ return this.resolveOutput(sourceMessage, extractPrimitiveText(sourceMessage.content));
254
+ }
255
+ async _generate(messages, options, _runManager) {
256
+ const resolved = await this.finalizeMessage(messages, options);
257
+ return {
258
+ generations: [
259
+ {
260
+ text: extractPrimitiveText(resolved.finalMessage.content),
261
+ message: resolved.finalMessage,
262
+ },
263
+ ],
264
+ };
265
+ }
266
+ async *_streamResponseChunks(messages, options, runManager) {
267
+ const resolved = await this.finalizeMessage(messages, options);
268
+ const finalText = extractPrimitiveText(resolved.finalMessage.content);
269
+ if (!finalText) {
270
+ return;
271
+ }
272
+ const generationChunk = new ChatGenerationChunk({
273
+ message: new AIMessageChunk({
274
+ content: finalText,
275
+ id: resolved.finalMessage.id,
276
+ }),
277
+ text: finalText,
278
+ });
279
+ yield generationChunk;
280
+ await runManager?.handleLLMNewToken(finalText, undefined, undefined, undefined, undefined, {
281
+ chunk: generationChunk,
282
+ });
283
+ }
284
+ }
144
285
  function rewriteModelRequestInput(request, rewrittenText) {
145
286
  if (!Array.isArray(request?.messages) || request.messages.length === 0) {
146
287
  return request;
@@ -720,12 +861,14 @@ let SensitiveFilterMiddleware = class SensitiveFilterMiddleware {
720
861
  };
721
862
  let inputBlockedMessage = null;
722
863
  let pendingInputRewrite = null;
864
+ let bufferedOutputResolution = null;
723
865
  let finalAction = 'pass';
724
866
  let auditEntries = [];
725
867
  let runtimeConfigurable = null;
726
868
  const resetRunState = () => {
727
869
  inputBlockedMessage = null;
728
870
  pendingInputRewrite = null;
871
+ bufferedOutputResolution = null;
729
872
  finalAction = 'pass';
730
873
  auditEntries = [];
731
874
  };
@@ -847,7 +990,63 @@ let SensitiveFilterMiddleware = class SensitiveFilterMiddleware {
847
990
  }
848
991
  const modelRequest = pendingInputRewrite ? rewriteModelRequestInput(request, pendingInputRewrite) : request;
849
992
  pendingInputRewrite = null;
850
- const response = await handler(modelRequest);
993
+ bufferedOutputResolution = null;
994
+ const shouldBufferOutput = compiledRules.some((rule) => rule.scope === 'output' || rule.scope === 'both');
995
+ const effectiveRequest = shouldBufferOutput
996
+ ? {
997
+ ...modelRequest,
998
+ model: new BufferedOutputProxyChatModel(modelRequest.model, async (message, outputText) => {
999
+ if (message.tool_calls?.length || message.invalid_tool_calls?.length) {
1000
+ bufferedOutputResolution = {
1001
+ finalMessage: cloneAiMessage(message),
1002
+ matched: false,
1003
+ source: 'rule',
1004
+ reason: 'tool-call-skip',
1005
+ errorPolicyTriggered: false,
1006
+ };
1007
+ return bufferedOutputResolution;
1008
+ }
1009
+ const outputMatches = findMatches(outputText, 'output', compiledRules, normalize, caseSensitive);
1010
+ const winner = pickWinningRule(outputMatches);
1011
+ if (!winner) {
1012
+ bufferedOutputResolution = {
1013
+ finalMessage: cloneAiMessage(message),
1014
+ matched: false,
1015
+ source: 'rule',
1016
+ errorPolicyTriggered: false,
1017
+ };
1018
+ return bufferedOutputResolution;
1019
+ }
1020
+ const finalText = winner.action === 'block'
1021
+ ? winner.replacementText?.trim() || DEFAULT_OUTPUT_BLOCK_MESSAGE
1022
+ : rewriteTextByRule(outputText, winner, caseSensitive);
1023
+ bufferedOutputResolution = {
1024
+ finalMessage: cloneAiMessageWithText(message, finalText),
1025
+ matched: true,
1026
+ source: 'rule',
1027
+ action: winner.action,
1028
+ reason: `rule:${winner.id}`,
1029
+ errorPolicyTriggered: false,
1030
+ };
1031
+ return bufferedOutputResolution;
1032
+ }),
1033
+ }
1034
+ : modelRequest;
1035
+ const response = await handler(effectiveRequest);
1036
+ if (bufferedOutputResolution) {
1037
+ pushAudit({
1038
+ phase: 'output',
1039
+ matched: bufferedOutputResolution.matched,
1040
+ source: bufferedOutputResolution.source,
1041
+ action: bufferedOutputResolution.action,
1042
+ reason: bufferedOutputResolution.reason,
1043
+ errorPolicyTriggered: bufferedOutputResolution.errorPolicyTriggered,
1044
+ });
1045
+ if (bufferedOutputResolution.matched && bufferedOutputResolution.action) {
1046
+ finalAction = bufferedOutputResolution.action === 'block' ? 'block' : 'rewrite';
1047
+ }
1048
+ return response;
1049
+ }
851
1050
  const outputText = extractModelResponseText(response);
852
1051
  const outputMatches = findMatches(outputText, 'output', compiledRules, normalize, caseSensitive);
853
1052
  const winner = pickWinningRule(outputMatches);
@@ -915,6 +1114,7 @@ let SensitiveFilterMiddleware = class SensitiveFilterMiddleware {
915
1114
  return structuredModelPromises.get(method);
916
1115
  };
917
1116
  let pendingInputRewrite = null;
1117
+ let bufferedOutputResolution = null;
918
1118
  let finalAction = 'pass';
919
1119
  let auditEntries = [];
920
1120
  let runtimeConfigurable = null;
@@ -923,6 +1123,7 @@ let SensitiveFilterMiddleware = class SensitiveFilterMiddleware {
923
1123
  let methodAttempts = [];
924
1124
  const resetRunState = () => {
925
1125
  pendingInputRewrite = null;
1126
+ bufferedOutputResolution = null;
926
1127
  finalAction = 'pass';
927
1128
  auditEntries = [];
928
1129
  resolvedOutputMethod = undefined;
@@ -1183,7 +1384,70 @@ let SensitiveFilterMiddleware = class SensitiveFilterMiddleware {
1183
1384
  const llmConfig = getLlmConfig();
1184
1385
  const modelRequest = pendingInputRewrite ? rewriteModelRequestInput(request, pendingInputRewrite) : request;
1185
1386
  pendingInputRewrite = null;
1186
- const response = await handler(modelRequest);
1387
+ bufferedOutputResolution = null;
1388
+ const effectiveRequest = modeIncludesScope(llmConfig.scope, 'output')
1389
+ ? {
1390
+ ...modelRequest,
1391
+ model: new BufferedOutputProxyChatModel(modelRequest.model, async (message, outputText) => {
1392
+ if (message.tool_calls?.length || message.invalid_tool_calls?.length) {
1393
+ bufferedOutputResolution = {
1394
+ finalMessage: cloneAiMessage(message),
1395
+ matched: false,
1396
+ source: 'llm',
1397
+ reason: 'tool-call-skip',
1398
+ errorPolicyTriggered: false,
1399
+ };
1400
+ return bufferedOutputResolution;
1401
+ }
1402
+ if (!outputText) {
1403
+ bufferedOutputResolution = {
1404
+ finalMessage: cloneAiMessage(message),
1405
+ matched: false,
1406
+ source: 'llm',
1407
+ reason: 'empty-output',
1408
+ errorPolicyTriggered: false,
1409
+ };
1410
+ return bufferedOutputResolution;
1411
+ }
1412
+ let decision;
1413
+ let fromErrorPolicy = false;
1414
+ try {
1415
+ decision = await invokeAndTrack('output', outputText, request?.runtime, llmConfig);
1416
+ }
1417
+ catch (error) {
1418
+ decision = resolveOnErrorDecision(llmConfig, error);
1419
+ fromErrorPolicy = true;
1420
+ }
1421
+ const finalText = decision.matched && decision.action
1422
+ ? toNonEmptyString(decision.replacementText) ?? llmConfig.rewriteFallbackText
1423
+ : outputText;
1424
+ bufferedOutputResolution = {
1425
+ finalMessage: cloneAiMessageWithText(message, finalText),
1426
+ matched: decision.matched,
1427
+ source: fromErrorPolicy ? 'error-policy' : 'llm',
1428
+ action: decision.action,
1429
+ reason: decision.reason,
1430
+ errorPolicyTriggered: fromErrorPolicy,
1431
+ };
1432
+ return bufferedOutputResolution;
1433
+ }),
1434
+ }
1435
+ : modelRequest;
1436
+ const response = await handler(effectiveRequest);
1437
+ if (bufferedOutputResolution) {
1438
+ pushAudit({
1439
+ phase: 'output',
1440
+ matched: bufferedOutputResolution.matched,
1441
+ source: bufferedOutputResolution.source,
1442
+ action: bufferedOutputResolution.action,
1443
+ reason: bufferedOutputResolution.reason,
1444
+ errorPolicyTriggered: bufferedOutputResolution.errorPolicyTriggered,
1445
+ });
1446
+ if (bufferedOutputResolution.matched && bufferedOutputResolution.action) {
1447
+ finalAction = 'rewrite';
1448
+ }
1449
+ return response;
1450
+ }
1187
1451
  if (!modeIncludesScope(llmConfig.scope, 'output')) {
1188
1452
  pushAudit({
1189
1453
  phase: 'output',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "plugin-sensitive-filter-xr",
3
- "version": "0.1.6",
3
+ "version": "0.1.7",
4
4
  "author": {
5
5
  "name": "XpertAI",
6
6
  "url": "https://xpertai.cn"