glin-profanity 2.3.8 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,357 @@
1
+ import { T as ToxicityLabel, f as MLDetectorConfig, e as MLAnalysisResult, a as FilterConfig, C as CheckProfanityResult, H as HybridAnalysisResult, F as Filter } from '../types-BgQe4FSE.cjs';
2
+ export { d as ToxicityPrediction } from '../types-BgQe4FSE.cjs';
3
+
4
+ /**
5
+ * ML-based toxicity detection using TensorFlow.js.
6
+ *
7
+ * This module provides optional ML-based profanity/toxicity detection
8
+ * using the TensorFlow.js toxicity model trained on the civil comments dataset.
9
+ *
10
+ * IMPORTANT: This requires optional peer dependencies:
11
+ * - @tensorflow/tfjs
12
+ * - @tensorflow-models/toxicity
13
+ *
14
+ * Install with: npm install @tensorflow/tfjs @tensorflow-models/toxicity
15
+ *
16
+ * @example
17
+ * ```typescript
18
+ * import { ToxicityDetector } from 'glin-profanity/ml';
19
+ *
20
+ * const detector = new ToxicityDetector({ threshold: 0.9 });
21
+ * await detector.loadModel();
22
+ *
23
+ * const result = await detector.analyze('some text to check');
24
+ * console.log(result.isToxic);
25
+ * ```
26
+ */
27
+
28
+ interface ToxicityModelPrediction {
29
+ label: string;
30
+ results: Array<{
31
+ match: boolean | null;
32
+ probabilities: Float32Array | number[];
33
+ }>;
34
+ }
35
+ interface ToxicityModel {
36
+ classify(sentences: string[]): Promise<ToxicityModelPrediction[]>;
37
+ }
38
+ /**
39
+ * ML-based toxicity detector using TensorFlow.js.
40
+ *
41
+ * This class provides neural network-based toxicity detection that can
42
+ * identify various types of harmful content including insults, threats,
43
+ * identity attacks, and obscenity.
44
+ *
45
+ * The model is loaded lazily and cached for subsequent calls.
46
+ */
47
+ declare class ToxicityDetector {
48
+ private model;
49
+ private loadingPromise;
50
+ private config;
51
+ private isAvailable;
52
+ /**
53
+ * All available toxicity labels.
54
+ */
55
+ static readonly ALL_LABELS: ToxicityLabel[];
56
+ /**
57
+ * Creates a new ToxicityDetector instance.
58
+ *
59
+ * @param config - Configuration options
60
+ *
61
+ * @example
62
+ * ```typescript
63
+ * // Basic usage with default threshold (0.85)
64
+ * const detector = new ToxicityDetector();
65
+ *
66
+ * // Custom threshold for higher precision
67
+ * const strictDetector = new ToxicityDetector({ threshold: 0.95 });
68
+ *
69
+ * // Check only specific categories
70
+ * const customDetector = new ToxicityDetector({
71
+ * threshold: 0.8,
72
+ * labels: ['insult', 'threat', 'obscene'],
73
+ * });
74
+ * ```
75
+ */
76
+ constructor(config?: MLDetectorConfig);
77
+ /**
78
+ * Dynamic import wrapper to avoid TypeScript static analysis issues.
79
+ * Uses Function constructor to bypass module resolution at compile time.
80
+ * @internal
81
+ */
82
+ private dynamicImport;
83
+ /**
84
+ * Checks if TensorFlow.js and the toxicity model are available.
85
+ * This performs a lazy check on first call and caches the result.
86
+ *
87
+ * @returns True if ML dependencies are available
88
+ */
89
+ checkAvailability(): Promise<boolean>;
90
+ /**
91
+ * Loads the toxicity model.
92
+ * This is called automatically on first analyze() call if not called explicitly.
93
+ *
94
+ * @returns The loaded model
95
+ * @throws Error if TensorFlow.js dependencies are not installed
96
+ *
97
+ * @example
98
+ * ```typescript
99
+ * const detector = new ToxicityDetector();
100
+ *
101
+ * // Explicitly preload model (optional)
102
+ * await detector.loadModel();
103
+ *
104
+ * // Or let it load automatically on first use
105
+ * const result = await detector.analyze('text');
106
+ * ```
107
+ */
108
+ loadModel(): Promise<ToxicityModel>;
109
+ private doLoadModel;
110
+ /**
111
+ * Analyzes text for toxicity using the ML model.
112
+ *
113
+ * @param text - Text to analyze
114
+ * @returns Analysis result with predictions and scores
115
+ *
116
+ * @example
117
+ * ```typescript
118
+ * const detector = new ToxicityDetector();
119
+ * const result = await detector.analyze('you are stupid');
120
+ *
121
+ * console.log(result.isToxic); // true
122
+ * console.log(result.overallScore); // 0.92
123
+ * console.log(result.matchedCategories); // ['insult', 'toxicity']
124
+ * ```
125
+ */
126
+ analyze(text: string): Promise<MLAnalysisResult>;
127
+ /**
128
+ * Analyzes multiple texts in a batch for better performance.
129
+ *
130
+ * @param texts - Array of texts to analyze
131
+ * @returns Array of analysis results
132
+ *
133
+ * @example
134
+ * ```typescript
135
+ * const detector = new ToxicityDetector();
136
+ * const results = await detector.analyzeBatch([
137
+ * 'hello friend',
138
+ * 'you are terrible',
139
+ * 'great work!',
140
+ * ]);
141
+ *
142
+ * results.forEach((result, i) => {
143
+ * console.log(`Text ${i}: ${result.isToxic ? 'toxic' : 'clean'}`);
144
+ * });
145
+ * ```
146
+ */
147
+ analyzeBatch(texts: string[]): Promise<MLAnalysisResult[]>;
148
+ /**
149
+ * Simple boolean check for toxicity.
150
+ *
151
+ * @param text - Text to check
152
+ * @returns True if text is detected as toxic
153
+ *
154
+ * @example
155
+ * ```typescript
156
+ * const detector = new ToxicityDetector();
157
+ *
158
+ * if (await detector.isToxic('some user input')) {
159
+ * console.log('Content flagged as toxic');
160
+ * }
161
+ * ```
162
+ */
163
+ isToxic(text: string): Promise<boolean>;
164
+ /**
165
+ * Gets the toxicity score for text (0-1).
166
+ *
167
+ * @param text - Text to score
168
+ * @returns Toxicity score from 0 (clean) to 1 (highly toxic)
169
+ */
170
+ getScore(text: string): Promise<number>;
171
+ /**
172
+ * Disposes of the model to free memory.
173
+ * The model will be reloaded on next analyze() call.
174
+ */
175
+ dispose(): void;
176
+ /**
177
+ * Gets the current configuration.
178
+ */
179
+ getConfig(): Required<MLDetectorConfig>;
180
+ /**
181
+ * Checks if the model is currently loaded.
182
+ */
183
+ isModelLoaded(): boolean;
184
+ }
185
+
186
+ /**
187
+ * Hybrid filter combining rule-based and ML-based detection.
188
+ *
189
+ * This class provides the best of both worlds:
190
+ * - Fast rule-based detection for common profanity
191
+ * - ML-based detection for contextual toxicity
192
+ *
193
+ * @example
194
+ * ```typescript
195
+ * import { HybridFilter } from 'glin-profanity/ml';
196
+ *
197
+ * const filter = new HybridFilter({
198
+ * // Rule-based config
199
+ * languages: ['english'],
200
+ * detectLeetspeak: true,
201
+ * // ML config
202
+ * enableML: true,
203
+ * mlThreshold: 0.85,
204
+ * });
205
+ *
206
+ * await filter.initialize();
207
+ *
208
+ * const result = await filter.checkProfanityAsync('some text');
209
+ * console.log(result.isToxic);
210
+ * ```
211
+ */
212
+
213
+ /**
214
+ * Hybrid filter configuration.
215
+ */
216
+ interface HybridFilterConfig extends FilterConfig {
217
+ /**
218
+ * Enable ML-based detection.
219
+ * Requires @tensorflow/tfjs and @tensorflow-models/toxicity.
220
+ * @default false
221
+ */
222
+ enableML?: boolean;
223
+ /**
224
+ * ML confidence threshold.
225
+ * @default 0.85
226
+ */
227
+ mlThreshold?: number;
228
+ /**
229
+ * Specific ML toxicity categories to check.
230
+ */
231
+ mlLabels?: ToxicityLabel[];
232
+ /**
233
+ * Preload ML model on initialization.
234
+ * @default false
235
+ */
236
+ preloadML?: boolean;
237
+ /**
238
+ * How to combine rule-based and ML results.
239
+ * - 'or': Flag if either method detects toxicity (more sensitive)
240
+ * - 'and': Flag only if both methods detect toxicity (more precise)
241
+ * - 'ml-override': Use ML result if available, fallback to rules
242
+ * - 'rules-first': Use rules for speed, ML for borderline cases
243
+ * @default 'or'
244
+ */
245
+ combinationMode?: 'or' | 'and' | 'ml-override' | 'rules-first';
246
+ /**
247
+ * Score threshold for "borderline" cases in rules-first mode.
248
+ * If rule-based detection is uncertain (near this threshold),
249
+ * ML will be used for confirmation.
250
+ * @default 0.5
251
+ */
252
+ borderlineThreshold?: number;
253
+ }
254
+ /**
255
+ * Hybrid profanity filter combining rule-based and ML detection.
256
+ */
257
+ declare class HybridFilter {
258
+ private ruleFilter;
259
+ private mlDetector;
260
+ private config;
261
+ private mlInitialized;
262
+ /**
263
+ * Creates a new HybridFilter instance.
264
+ *
265
+ * @param config - Configuration options
266
+ */
267
+ constructor(config?: HybridFilterConfig);
268
+ /**
269
+ * Initializes the hybrid filter, loading the ML model if enabled.
270
+ * Call this before using async methods for best performance.
271
+ *
272
+ * @example
273
+ * ```typescript
274
+ * const filter = new HybridFilter({ enableML: true });
275
+ * await filter.initialize();
276
+ * // Now ready for fast async checks
277
+ * ```
278
+ */
279
+ initialize(): Promise<void>;
280
+ /**
281
+ * Checks if ML is available and initialized.
282
+ */
283
+ isMLReady(): boolean;
284
+ /**
285
+ * Synchronous profanity check using only rule-based detection.
286
+ * Use this for fast, synchronous checks when ML isn't needed.
287
+ *
288
+ * @param text - Text to check
289
+ * @returns True if profanity detected
290
+ */
291
+ isProfane(text: string): boolean;
292
+ /**
293
+ * Synchronous detailed check using only rule-based detection.
294
+ *
295
+ * @param text - Text to check
296
+ * @returns Detailed profanity check result
297
+ */
298
+ checkProfanity(text: string): CheckProfanityResult;
299
+ /**
300
+ * Async profanity check using both rule-based and ML detection.
301
+ *
302
+ * @param text - Text to check
303
+ * @returns Combined analysis result
304
+ *
305
+ * @example
306
+ * ```typescript
307
+ * const filter = new HybridFilter({
308
+ * enableML: true,
309
+ * combinationMode: 'or',
310
+ * });
311
+ * await filter.initialize();
312
+ *
313
+ * const result = await filter.checkProfanityAsync('some text');
314
+ * if (result.isToxic) {
315
+ * console.log('Reason:', result.reason);
316
+ * console.log('Confidence:', result.confidence);
317
+ * }
318
+ * ```
319
+ */
320
+ checkProfanityAsync(text: string): Promise<HybridAnalysisResult>;
321
+ /**
322
+ * Simple async boolean check for toxicity.
323
+ *
324
+ * @param text - Text to check
325
+ * @returns True if toxic
326
+ */
327
+ isToxicAsync(text: string): Promise<boolean>;
328
+ /**
329
+ * Analyzes text with ML only (if available).
330
+ *
331
+ * @param text - Text to analyze
332
+ * @returns ML analysis result or null if ML not available
333
+ */
334
+ analyzeWithML(text: string): Promise<MLAnalysisResult | null>;
335
+ /**
336
+ * Batch analysis for multiple texts.
337
+ *
338
+ * @param texts - Array of texts to analyze
339
+ * @returns Array of hybrid analysis results
340
+ */
341
+ checkProfanityBatchAsync(texts: string[]): Promise<HybridAnalysisResult[]>;
342
+ private combineResults;
343
+ /**
344
+ * Gets the underlying rule-based filter.
345
+ */
346
+ getRuleFilter(): Filter;
347
+ /**
348
+ * Gets the underlying ML detector (if enabled).
349
+ */
350
+ getMLDetector(): ToxicityDetector | null;
351
+ /**
352
+ * Disposes of resources (ML model).
353
+ */
354
+ dispose(): void;
355
+ }
356
+
357
+ export { HybridAnalysisResult, HybridFilter, type HybridFilterConfig, MLAnalysisResult, MLDetectorConfig, ToxicityDetector, ToxicityLabel };
@@ -0,0 +1,357 @@
1
+ import { T as ToxicityLabel, f as MLDetectorConfig, e as MLAnalysisResult, a as FilterConfig, C as CheckProfanityResult, H as HybridAnalysisResult, F as Filter } from '../types-BgQe4FSE.js';
2
+ export { d as ToxicityPrediction } from '../types-BgQe4FSE.js';
3
+
4
+ /**
5
+ * ML-based toxicity detection using TensorFlow.js.
6
+ *
7
+ * This module provides optional ML-based profanity/toxicity detection
8
+ * using the TensorFlow.js toxicity model trained on the civil comments dataset.
9
+ *
10
+ * IMPORTANT: This requires optional peer dependencies:
11
+ * - @tensorflow/tfjs
12
+ * - @tensorflow-models/toxicity
13
+ *
14
+ * Install with: npm install @tensorflow/tfjs @tensorflow-models/toxicity
15
+ *
16
+ * @example
17
+ * ```typescript
18
+ * import { ToxicityDetector } from 'glin-profanity/ml';
19
+ *
20
+ * const detector = new ToxicityDetector({ threshold: 0.9 });
21
+ * await detector.loadModel();
22
+ *
23
+ * const result = await detector.analyze('some text to check');
24
+ * console.log(result.isToxic);
25
+ * ```
26
+ */
27
+
28
+ interface ToxicityModelPrediction {
29
+ label: string;
30
+ results: Array<{
31
+ match: boolean | null;
32
+ probabilities: Float32Array | number[];
33
+ }>;
34
+ }
35
+ interface ToxicityModel {
36
+ classify(sentences: string[]): Promise<ToxicityModelPrediction[]>;
37
+ }
38
+ /**
39
+ * ML-based toxicity detector using TensorFlow.js.
40
+ *
41
+ * This class provides neural network-based toxicity detection that can
42
+ * identify various types of harmful content including insults, threats,
43
+ * identity attacks, and obscenity.
44
+ *
45
+ * The model is loaded lazily and cached for subsequent calls.
46
+ */
47
+ declare class ToxicityDetector {
48
+ private model;
49
+ private loadingPromise;
50
+ private config;
51
+ private isAvailable;
52
+ /**
53
+ * All available toxicity labels.
54
+ */
55
+ static readonly ALL_LABELS: ToxicityLabel[];
56
+ /**
57
+ * Creates a new ToxicityDetector instance.
58
+ *
59
+ * @param config - Configuration options
60
+ *
61
+ * @example
62
+ * ```typescript
63
+ * // Basic usage with default threshold (0.85)
64
+ * const detector = new ToxicityDetector();
65
+ *
66
+ * // Custom threshold for higher precision
67
+ * const strictDetector = new ToxicityDetector({ threshold: 0.95 });
68
+ *
69
+ * // Check only specific categories
70
+ * const customDetector = new ToxicityDetector({
71
+ * threshold: 0.8,
72
+ * labels: ['insult', 'threat', 'obscene'],
73
+ * });
74
+ * ```
75
+ */
76
+ constructor(config?: MLDetectorConfig);
77
+ /**
78
+ * Dynamic import wrapper to avoid TypeScript static analysis issues.
79
+ * Uses Function constructor to bypass module resolution at compile time.
80
+ * @internal
81
+ */
82
+ private dynamicImport;
83
+ /**
84
+ * Checks if TensorFlow.js and the toxicity model are available.
85
+ * This performs a lazy check on first call and caches the result.
86
+ *
87
+ * @returns True if ML dependencies are available
88
+ */
89
+ checkAvailability(): Promise<boolean>;
90
+ /**
91
+ * Loads the toxicity model.
92
+ * This is called automatically on first analyze() call if not called explicitly.
93
+ *
94
+ * @returns The loaded model
95
+ * @throws Error if TensorFlow.js dependencies are not installed
96
+ *
97
+ * @example
98
+ * ```typescript
99
+ * const detector = new ToxicityDetector();
100
+ *
101
+ * // Explicitly preload model (optional)
102
+ * await detector.loadModel();
103
+ *
104
+ * // Or let it load automatically on first use
105
+ * const result = await detector.analyze('text');
106
+ * ```
107
+ */
108
+ loadModel(): Promise<ToxicityModel>;
109
+ private doLoadModel;
110
+ /**
111
+ * Analyzes text for toxicity using the ML model.
112
+ *
113
+ * @param text - Text to analyze
114
+ * @returns Analysis result with predictions and scores
115
+ *
116
+ * @example
117
+ * ```typescript
118
+ * const detector = new ToxicityDetector();
119
+ * const result = await detector.analyze('you are stupid');
120
+ *
121
+ * console.log(result.isToxic); // true
122
+ * console.log(result.overallScore); // 0.92
123
+ * console.log(result.matchedCategories); // ['insult', 'toxicity']
124
+ * ```
125
+ */
126
+ analyze(text: string): Promise<MLAnalysisResult>;
127
+ /**
128
+ * Analyzes multiple texts in a batch for better performance.
129
+ *
130
+ * @param texts - Array of texts to analyze
131
+ * @returns Array of analysis results
132
+ *
133
+ * @example
134
+ * ```typescript
135
+ * const detector = new ToxicityDetector();
136
+ * const results = await detector.analyzeBatch([
137
+ * 'hello friend',
138
+ * 'you are terrible',
139
+ * 'great work!',
140
+ * ]);
141
+ *
142
+ * results.forEach((result, i) => {
143
+ * console.log(`Text ${i}: ${result.isToxic ? 'toxic' : 'clean'}`);
144
+ * });
145
+ * ```
146
+ */
147
+ analyzeBatch(texts: string[]): Promise<MLAnalysisResult[]>;
148
+ /**
149
+ * Simple boolean check for toxicity.
150
+ *
151
+ * @param text - Text to check
152
+ * @returns True if text is detected as toxic
153
+ *
154
+ * @example
155
+ * ```typescript
156
+ * const detector = new ToxicityDetector();
157
+ *
158
+ * if (await detector.isToxic('some user input')) {
159
+ * console.log('Content flagged as toxic');
160
+ * }
161
+ * ```
162
+ */
163
+ isToxic(text: string): Promise<boolean>;
164
+ /**
165
+ * Gets the toxicity score for text (0-1).
166
+ *
167
+ * @param text - Text to score
168
+ * @returns Toxicity score from 0 (clean) to 1 (highly toxic)
169
+ */
170
+ getScore(text: string): Promise<number>;
171
+ /**
172
+ * Disposes of the model to free memory.
173
+ * The model will be reloaded on next analyze() call.
174
+ */
175
+ dispose(): void;
176
+ /**
177
+ * Gets the current configuration.
178
+ */
179
+ getConfig(): Required<MLDetectorConfig>;
180
+ /**
181
+ * Checks if the model is currently loaded.
182
+ */
183
+ isModelLoaded(): boolean;
184
+ }
185
+
186
+ /**
187
+ * Hybrid filter combining rule-based and ML-based detection.
188
+ *
189
+ * This class provides the best of both worlds:
190
+ * - Fast rule-based detection for common profanity
191
+ * - ML-based detection for contextual toxicity
192
+ *
193
+ * @example
194
+ * ```typescript
195
+ * import { HybridFilter } from 'glin-profanity/ml';
196
+ *
197
+ * const filter = new HybridFilter({
198
+ * // Rule-based config
199
+ * languages: ['english'],
200
+ * detectLeetspeak: true,
201
+ * // ML config
202
+ * enableML: true,
203
+ * mlThreshold: 0.85,
204
+ * });
205
+ *
206
+ * await filter.initialize();
207
+ *
208
+ * const result = await filter.checkProfanityAsync('some text');
209
+ * console.log(result.isToxic);
210
+ * ```
211
+ */
212
+
213
+ /**
214
+ * Hybrid filter configuration.
215
+ */
216
+ interface HybridFilterConfig extends FilterConfig {
217
+ /**
218
+ * Enable ML-based detection.
219
+ * Requires @tensorflow/tfjs and @tensorflow-models/toxicity.
220
+ * @default false
221
+ */
222
+ enableML?: boolean;
223
+ /**
224
+ * ML confidence threshold.
225
+ * @default 0.85
226
+ */
227
+ mlThreshold?: number;
228
+ /**
229
+ * Specific ML toxicity categories to check.
230
+ */
231
+ mlLabels?: ToxicityLabel[];
232
+ /**
233
+ * Preload ML model on initialization.
234
+ * @default false
235
+ */
236
+ preloadML?: boolean;
237
+ /**
238
+ * How to combine rule-based and ML results.
239
+ * - 'or': Flag if either method detects toxicity (more sensitive)
240
+ * - 'and': Flag only if both methods detect toxicity (more precise)
241
+ * - 'ml-override': Use ML result if available, fallback to rules
242
+ * - 'rules-first': Use rules for speed, ML for borderline cases
243
+ * @default 'or'
244
+ */
245
+ combinationMode?: 'or' | 'and' | 'ml-override' | 'rules-first';
246
+ /**
247
+ * Score threshold for "borderline" cases in rules-first mode.
248
+ * If rule-based detection is uncertain (near this threshold),
249
+ * ML will be used for confirmation.
250
+ * @default 0.5
251
+ */
252
+ borderlineThreshold?: number;
253
+ }
254
+ /**
255
+ * Hybrid profanity filter combining rule-based and ML detection.
256
+ */
257
+ declare class HybridFilter {
258
+ private ruleFilter;
259
+ private mlDetector;
260
+ private config;
261
+ private mlInitialized;
262
+ /**
263
+ * Creates a new HybridFilter instance.
264
+ *
265
+ * @param config - Configuration options
266
+ */
267
+ constructor(config?: HybridFilterConfig);
268
+ /**
269
+ * Initializes the hybrid filter, loading the ML model if enabled.
270
+ * Call this before using async methods for best performance.
271
+ *
272
+ * @example
273
+ * ```typescript
274
+ * const filter = new HybridFilter({ enableML: true });
275
+ * await filter.initialize();
276
+ * // Now ready for fast async checks
277
+ * ```
278
+ */
279
+ initialize(): Promise<void>;
280
+ /**
281
+ * Checks if ML is available and initialized.
282
+ */
283
+ isMLReady(): boolean;
284
+ /**
285
+ * Synchronous profanity check using only rule-based detection.
286
+ * Use this for fast, synchronous checks when ML isn't needed.
287
+ *
288
+ * @param text - Text to check
289
+ * @returns True if profanity detected
290
+ */
291
+ isProfane(text: string): boolean;
292
+ /**
293
+ * Synchronous detailed check using only rule-based detection.
294
+ *
295
+ * @param text - Text to check
296
+ * @returns Detailed profanity check result
297
+ */
298
+ checkProfanity(text: string): CheckProfanityResult;
299
+ /**
300
+ * Async profanity check using both rule-based and ML detection.
301
+ *
302
+ * @param text - Text to check
303
+ * @returns Combined analysis result
304
+ *
305
+ * @example
306
+ * ```typescript
307
+ * const filter = new HybridFilter({
308
+ * enableML: true,
309
+ * combinationMode: 'or',
310
+ * });
311
+ * await filter.initialize();
312
+ *
313
+ * const result = await filter.checkProfanityAsync('some text');
314
+ * if (result.isToxic) {
315
+ * console.log('Reason:', result.reason);
316
+ * console.log('Confidence:', result.confidence);
317
+ * }
318
+ * ```
319
+ */
320
+ checkProfanityAsync(text: string): Promise<HybridAnalysisResult>;
321
+ /**
322
+ * Simple async boolean check for toxicity.
323
+ *
324
+ * @param text - Text to check
325
+ * @returns True if toxic
326
+ */
327
+ isToxicAsync(text: string): Promise<boolean>;
328
+ /**
329
+ * Analyzes text with ML only (if available).
330
+ *
331
+ * @param text - Text to analyze
332
+ * @returns ML analysis result or null if ML not available
333
+ */
334
+ analyzeWithML(text: string): Promise<MLAnalysisResult | null>;
335
+ /**
336
+ * Batch analysis for multiple texts.
337
+ *
338
+ * @param texts - Array of texts to analyze
339
+ * @returns Array of hybrid analysis results
340
+ */
341
+ checkProfanityBatchAsync(texts: string[]): Promise<HybridAnalysisResult[]>;
342
+ private combineResults;
343
+ /**
344
+ * Gets the underlying rule-based filter.
345
+ */
346
+ getRuleFilter(): Filter;
347
+ /**
348
+ * Gets the underlying ML detector (if enabled).
349
+ */
350
+ getMLDetector(): ToxicityDetector | null;
351
+ /**
352
+ * Disposes of resources (ML model).
353
+ */
354
+ dispose(): void;
355
+ }
356
+
357
+ export { HybridAnalysisResult, HybridFilter, type HybridFilterConfig, MLAnalysisResult, MLDetectorConfig, ToxicityDetector, ToxicityLabel };