glin-profanity 3.1.5 → 3.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +84 -566
  2. package/dist/{types-CdDqSZY7.d.cts → Filter-BGcyIAvO.d.ts} +4 -162
  3. package/dist/{types-CdDqSZY7.d.ts → Filter-D34Wsmrj.d.cts} +4 -162
  4. package/dist/frameworks/index.cjs +5257 -0
  5. package/dist/frameworks/index.d.cts +2 -0
  6. package/dist/frameworks/index.d.ts +2 -0
  7. package/dist/frameworks/index.js +5252 -0
  8. package/dist/frameworks/nextjs.cjs +5257 -0
  9. package/dist/frameworks/nextjs.d.cts +173 -0
  10. package/dist/frameworks/nextjs.d.ts +173 -0
  11. package/dist/frameworks/nextjs.js +5252 -0
  12. package/dist/index.cjs +151 -85
  13. package/dist/index.d.cts +5 -29
  14. package/dist/index.d.ts +5 -29
  15. package/dist/index.js +152 -85
  16. package/dist/integrations/index.cjs +6110 -0
  17. package/dist/integrations/index.d.cts +5 -0
  18. package/dist/integrations/index.d.ts +5 -0
  19. package/dist/integrations/index.js +6082 -0
  20. package/dist/integrations/langchain.cjs +5252 -0
  21. package/dist/integrations/langchain.d.cts +231 -0
  22. package/dist/integrations/langchain.d.ts +231 -0
  23. package/dist/integrations/langchain.js +5239 -0
  24. package/dist/integrations/openai.cjs +5367 -0
  25. package/dist/integrations/openai.d.cts +167 -0
  26. package/dist/integrations/openai.d.ts +167 -0
  27. package/dist/integrations/openai.js +5362 -0
  28. package/dist/integrations/semantic.cjs +5314 -0
  29. package/dist/integrations/semantic.d.cts +268 -0
  30. package/dist/integrations/semantic.d.ts +268 -0
  31. package/dist/integrations/semantic.js +5309 -0
  32. package/dist/integrations/vercel-ai.cjs +5282 -0
  33. package/dist/integrations/vercel-ai.d.cts +224 -0
  34. package/dist/integrations/vercel-ai.d.ts +224 -0
  35. package/dist/integrations/vercel-ai.js +5273 -0
  36. package/dist/ml/index.cjs +358 -56
  37. package/dist/ml/index.d.cts +5 -2
  38. package/dist/ml/index.d.ts +5 -2
  39. package/dist/ml/index.js +354 -57
  40. package/dist/ml/transformers.cjs +5237 -0
  41. package/dist/ml/transformers.d.cts +232 -0
  42. package/dist/ml/transformers.d.ts +232 -0
  43. package/dist/ml/transformers.js +5231 -0
  44. package/dist/multimodal/audio.cjs +5269 -0
  45. package/dist/multimodal/audio.d.cts +255 -0
  46. package/dist/multimodal/audio.d.ts +255 -0
  47. package/dist/multimodal/audio.js +5264 -0
  48. package/dist/multimodal/index.cjs +5432 -0
  49. package/dist/multimodal/index.d.cts +4 -0
  50. package/dist/multimodal/index.d.ts +4 -0
  51. package/dist/multimodal/index.js +5422 -0
  52. package/dist/multimodal/ocr.cjs +5193 -0
  53. package/dist/multimodal/ocr.d.cts +157 -0
  54. package/dist/multimodal/ocr.d.ts +157 -0
  55. package/dist/multimodal/ocr.js +5187 -0
  56. package/dist/react.cjs +5133 -0
  57. package/dist/react.d.cts +13 -0
  58. package/dist/react.d.ts +13 -0
  59. package/dist/react.js +5131 -0
  60. package/dist/types-B9c_ik4k.d.cts +88 -0
  61. package/dist/types-B9c_ik4k.d.ts +88 -0
  62. package/dist/types-BuKh9tvV.d.ts +20 -0
  63. package/dist/types-Ct_ueYqw.d.cts +76 -0
  64. package/dist/types-Ct_ueYqw.d.ts +76 -0
  65. package/dist/types-DI8nzwWc.d.cts +20 -0
  66. package/package.json +170 -3
@@ -0,0 +1,88 @@
1
+ /**
2
+ * Type definitions for glin-profanity JavaScript/TypeScript package.
3
+ * Unified API that mirrors the Python package structure.
4
+ */
5
+ /** Severity levels for profanity matches - unified with Python */
6
+ declare enum SeverityLevel {
7
+ EXACT = 1,
8
+ FUZZY = 2
9
+ }
10
+ /** Supported languages - unified list with Python */
11
+ type Language = 'arabic' | 'chinese' | 'czech' | 'danish' | 'dutch' | 'english' | 'esperanto' | 'finnish' | 'french' | 'german' | 'hindi' | 'hungarian' | 'italian' | 'japanese' | 'korean' | 'norwegian' | 'persian' | 'polish' | 'portuguese' | 'russian' | 'spanish' | 'swedish' | 'thai' | 'turkish';
12
+ /** Represents a profanity match in text - unified with Python */
13
+ interface Match {
14
+ word: string;
15
+ index: number;
16
+ severity: SeverityLevel;
17
+ contextScore?: number;
18
+ reason?: string;
19
+ isWhitelisted?: boolean;
20
+ }
21
+ /** Result of profanity check operation - unified field names */
22
+ interface CheckProfanityResult {
23
+ containsProfanity: boolean;
24
+ profaneWords: string[];
25
+ processedText?: string;
26
+ severityMap?: Record<string, SeverityLevel>;
27
+ matches?: Match[];
28
+ contextScore?: number;
29
+ reason?: string;
30
+ }
31
+ /** Configuration for context-aware filtering - unified with Python */
32
+ interface ContextAwareConfig {
33
+ enableContextAware?: boolean;
34
+ contextWindow?: number;
35
+ confidenceThreshold?: number;
36
+ domainWhitelists?: Record<string, string[]>;
37
+ }
38
+ /** Leetspeak detection intensity levels */
39
+ type LeetspeakLevel = 'basic' | 'moderate' | 'aggressive';
40
+ /** Main filter configuration options - unified with Python */
41
+ interface FilterConfig extends ContextAwareConfig {
42
+ languages?: Language[];
43
+ allLanguages?: boolean;
44
+ caseSensitive?: boolean;
45
+ wordBoundaries?: boolean;
46
+ customWords?: string[];
47
+ replaceWith?: string;
48
+ severityLevels?: boolean;
49
+ ignoreWords?: string[];
50
+ logProfanity?: boolean;
51
+ allowObfuscatedMatch?: boolean;
52
+ fuzzyToleranceLevel?: number;
53
+ /**
54
+ * Enable leetspeak detection (e.g., "f4ck" → "fuck").
55
+ * @default false
56
+ */
57
+ detectLeetspeak?: boolean;
58
+ /**
59
+ * Leetspeak detection intensity level.
60
+ * - `basic`: Numbers only (0→o, 1→i, 3→e, 4→a, 5→s)
61
+ * - `moderate`: Basic + symbols (@→a, $→s, !→i)
62
+ * - `aggressive`: All known substitutions
63
+ * @default 'moderate'
64
+ */
65
+ leetspeakLevel?: LeetspeakLevel;
66
+ /**
67
+ * Enable Unicode normalization to detect homoglyphs and obfuscation.
68
+ * @default true
69
+ */
70
+ normalizeUnicode?: boolean;
71
+ /**
72
+ * Cache profanity check results for repeated strings.
73
+ * @default false
74
+ */
75
+ cacheResults?: boolean;
76
+ /**
77
+ * Maximum cache size when caching is enabled.
78
+ * @default 1000
79
+ */
80
+ maxCacheSize?: number;
81
+ }
82
+ /** Result with minimum severity filtering */
83
+ interface FilteredProfanityResult {
84
+ result: CheckProfanityResult;
85
+ filteredWords: string[];
86
+ }
87
+
88
+ export { type CheckProfanityResult as C, type FilterConfig as F, type Language as L, type Match as M, SeverityLevel as S, type FilteredProfanityResult as a, type ContextAwareConfig as b };
@@ -0,0 +1,88 @@
1
+ /**
2
+ * Type definitions for glin-profanity JavaScript/TypeScript package.
3
+ * Unified API that mirrors the Python package structure.
4
+ */
5
+ /** Severity levels for profanity matches - unified with Python */
6
+ declare enum SeverityLevel {
7
+ EXACT = 1,
8
+ FUZZY = 2
9
+ }
10
+ /** Supported languages - unified list with Python */
11
+ type Language = 'arabic' | 'chinese' | 'czech' | 'danish' | 'dutch' | 'english' | 'esperanto' | 'finnish' | 'french' | 'german' | 'hindi' | 'hungarian' | 'italian' | 'japanese' | 'korean' | 'norwegian' | 'persian' | 'polish' | 'portuguese' | 'russian' | 'spanish' | 'swedish' | 'thai' | 'turkish';
12
+ /** Represents a profanity match in text - unified with Python */
13
+ interface Match {
14
+ word: string;
15
+ index: number;
16
+ severity: SeverityLevel;
17
+ contextScore?: number;
18
+ reason?: string;
19
+ isWhitelisted?: boolean;
20
+ }
21
+ /** Result of profanity check operation - unified field names */
22
+ interface CheckProfanityResult {
23
+ containsProfanity: boolean;
24
+ profaneWords: string[];
25
+ processedText?: string;
26
+ severityMap?: Record<string, SeverityLevel>;
27
+ matches?: Match[];
28
+ contextScore?: number;
29
+ reason?: string;
30
+ }
31
+ /** Configuration for context-aware filtering - unified with Python */
32
+ interface ContextAwareConfig {
33
+ enableContextAware?: boolean;
34
+ contextWindow?: number;
35
+ confidenceThreshold?: number;
36
+ domainWhitelists?: Record<string, string[]>;
37
+ }
38
+ /** Leetspeak detection intensity levels */
39
+ type LeetspeakLevel = 'basic' | 'moderate' | 'aggressive';
40
+ /** Main filter configuration options - unified with Python */
41
+ interface FilterConfig extends ContextAwareConfig {
42
+ languages?: Language[];
43
+ allLanguages?: boolean;
44
+ caseSensitive?: boolean;
45
+ wordBoundaries?: boolean;
46
+ customWords?: string[];
47
+ replaceWith?: string;
48
+ severityLevels?: boolean;
49
+ ignoreWords?: string[];
50
+ logProfanity?: boolean;
51
+ allowObfuscatedMatch?: boolean;
52
+ fuzzyToleranceLevel?: number;
53
+ /**
54
+ * Enable leetspeak detection (e.g., "f4ck" → "fuck").
55
+ * @default false
56
+ */
57
+ detectLeetspeak?: boolean;
58
+ /**
59
+ * Leetspeak detection intensity level.
60
+ * - `basic`: Numbers only (0→o, 1→i, 3→e, 4→a, 5→s)
61
+ * - `moderate`: Basic + symbols (@→a, $→s, !→i)
62
+ * - `aggressive`: All known substitutions
63
+ * @default 'moderate'
64
+ */
65
+ leetspeakLevel?: LeetspeakLevel;
66
+ /**
67
+ * Enable Unicode normalization to detect homoglyphs and obfuscation.
68
+ * @default true
69
+ */
70
+ normalizeUnicode?: boolean;
71
+ /**
72
+ * Cache profanity check results for repeated strings.
73
+ * @default false
74
+ */
75
+ cacheResults?: boolean;
76
+ /**
77
+ * Maximum cache size when caching is enabled.
78
+ * @default 1000
79
+ */
80
+ maxCacheSize?: number;
81
+ }
82
+ /** Result with minimum severity filtering */
83
+ interface FilteredProfanityResult {
84
+ result: CheckProfanityResult;
85
+ filteredWords: string[];
86
+ }
87
+
88
+ export { type CheckProfanityResult as C, type FilterConfig as F, type Language as L, type Match as M, SeverityLevel as S, type FilteredProfanityResult as a, type ContextAwareConfig as b };
@@ -0,0 +1,20 @@
1
+ import { F as FilterConfig, S as SeverityLevel, C as CheckProfanityResult } from './types-B9c_ik4k.js';
2
+
3
+ /**
4
+ * Configuration options for the profanity checker hook and functions.
5
+ * Extends FilterConfig with additional convenience options for V3 features.
6
+ */
7
+ interface ProfanityCheckerConfig extends Omit<FilterConfig, 'logProfanity'> {
8
+ /** Minimum severity level to include in results */
9
+ minSeverity?: SeverityLevel;
10
+ /** Auto-replace profanity with replaceWith string */
11
+ autoReplace?: boolean;
12
+ /** Custom callback when profanity is detected */
13
+ customActions?: (result: CheckProfanityResult) => void;
14
+ }
15
+ interface ProfanityCheckResult extends CheckProfanityResult {
16
+ filteredWords: string[];
17
+ autoReplaced: string;
18
+ }
19
+
20
+ export type { ProfanityCheckerConfig as P, ProfanityCheckResult as a };
@@ -0,0 +1,76 @@
1
+ /**
2
+ * Type definitions for ML-based profanity detection.
3
+ */
4
+ /**
5
+ * Toxicity categories detected by the TensorFlow.js model.
6
+ * These map to the civil comments dataset labels.
7
+ */
8
+ type ToxicityLabel = 'identity_attack' | 'insult' | 'obscene' | 'severe_toxicity' | 'sexual_explicit' | 'threat' | 'toxicity';
9
+ /**
10
+ * Result from a single toxicity prediction.
11
+ */
12
+ interface ToxicityPrediction {
13
+ /** The toxicity category */
14
+ label: ToxicityLabel;
15
+ /** Whether the text matches this category (null if below threshold) */
16
+ match: boolean | null;
17
+ /** Probability scores [non-toxic, toxic] */
18
+ probabilities: [number, number];
19
+ }
20
+ /**
21
+ * Result from ML-based toxicity analysis.
22
+ */
23
+ interface MLAnalysisResult {
24
+ /** Whether any toxicity was detected */
25
+ isToxic: boolean;
26
+ /** Overall toxicity score (0-1) */
27
+ overallScore: number;
28
+ /** Predictions for each category */
29
+ predictions: ToxicityPrediction[];
30
+ /** Categories that matched */
31
+ matchedCategories: ToxicityLabel[];
32
+ /** Processing time in milliseconds */
33
+ processingTimeMs: number;
34
+ }
35
+ /**
36
+ * Configuration for the ML toxicity detector.
37
+ */
38
+ interface MLDetectorConfig {
39
+ /**
40
+ * Minimum confidence threshold for predictions.
41
+ * Values below this threshold will return null for match.
42
+ * @default 0.85
43
+ */
44
+ threshold?: number;
45
+ /**
46
+ * Specific toxicity categories to check.
47
+ * If not specified, all categories are checked.
48
+ */
49
+ labels?: ToxicityLabel[];
50
+ /**
51
+ * Whether to load the model immediately on instantiation.
52
+ * If false, model will be loaded on first use.
53
+ * @default false
54
+ */
55
+ preloadModel?: boolean;
56
+ }
57
+ /**
58
+ * Combined result from both rule-based and ML detection.
59
+ */
60
+ interface HybridAnalysisResult {
61
+ /** Rule-based detection result */
62
+ ruleBasedResult: {
63
+ containsProfanity: boolean;
64
+ profaneWords: string[];
65
+ };
66
+ /** ML-based detection result (null if ML not enabled) */
67
+ mlResult: MLAnalysisResult | null;
68
+ /** Combined decision */
69
+ isToxic: boolean;
70
+ /** Confidence score for the decision */
71
+ confidence: number;
72
+ /** Reason for the decision */
73
+ reason: string;
74
+ }
75
+
76
+ export type { HybridAnalysisResult as H, MLDetectorConfig as M, ToxicityLabel as T, MLAnalysisResult as a, ToxicityPrediction as b };
@@ -0,0 +1,76 @@
1
+ /**
2
+ * Type definitions for ML-based profanity detection.
3
+ */
4
+ /**
5
+ * Toxicity categories detected by the TensorFlow.js model.
6
+ * These map to the civil comments dataset labels.
7
+ */
8
+ type ToxicityLabel = 'identity_attack' | 'insult' | 'obscene' | 'severe_toxicity' | 'sexual_explicit' | 'threat' | 'toxicity';
9
+ /**
10
+ * Result from a single toxicity prediction.
11
+ */
12
+ interface ToxicityPrediction {
13
+ /** The toxicity category */
14
+ label: ToxicityLabel;
15
+ /** Whether the text matches this category (null if below threshold) */
16
+ match: boolean | null;
17
+ /** Probability scores [non-toxic, toxic] */
18
+ probabilities: [number, number];
19
+ }
20
+ /**
21
+ * Result from ML-based toxicity analysis.
22
+ */
23
+ interface MLAnalysisResult {
24
+ /** Whether any toxicity was detected */
25
+ isToxic: boolean;
26
+ /** Overall toxicity score (0-1) */
27
+ overallScore: number;
28
+ /** Predictions for each category */
29
+ predictions: ToxicityPrediction[];
30
+ /** Categories that matched */
31
+ matchedCategories: ToxicityLabel[];
32
+ /** Processing time in milliseconds */
33
+ processingTimeMs: number;
34
+ }
35
+ /**
36
+ * Configuration for the ML toxicity detector.
37
+ */
38
+ interface MLDetectorConfig {
39
+ /**
40
+ * Minimum confidence threshold for predictions.
41
+ * Values below this threshold will return null for match.
42
+ * @default 0.85
43
+ */
44
+ threshold?: number;
45
+ /**
46
+ * Specific toxicity categories to check.
47
+ * If not specified, all categories are checked.
48
+ */
49
+ labels?: ToxicityLabel[];
50
+ /**
51
+ * Whether to load the model immediately on instantiation.
52
+ * If false, model will be loaded on first use.
53
+ * @default false
54
+ */
55
+ preloadModel?: boolean;
56
+ }
57
+ /**
58
+ * Combined result from both rule-based and ML detection.
59
+ */
60
+ interface HybridAnalysisResult {
61
+ /** Rule-based detection result */
62
+ ruleBasedResult: {
63
+ containsProfanity: boolean;
64
+ profaneWords: string[];
65
+ };
66
+ /** ML-based detection result (null if ML not enabled) */
67
+ mlResult: MLAnalysisResult | null;
68
+ /** Combined decision */
69
+ isToxic: boolean;
70
+ /** Confidence score for the decision */
71
+ confidence: number;
72
+ /** Reason for the decision */
73
+ reason: string;
74
+ }
75
+
76
+ export type { HybridAnalysisResult as H, MLDetectorConfig as M, ToxicityLabel as T, MLAnalysisResult as a, ToxicityPrediction as b };
@@ -0,0 +1,20 @@
1
+ import { F as FilterConfig, S as SeverityLevel, C as CheckProfanityResult } from './types-B9c_ik4k.cjs';
2
+
3
+ /**
4
+ * Configuration options for the profanity checker hook and functions.
5
+ * Extends FilterConfig with additional convenience options for V3 features.
6
+ */
7
+ interface ProfanityCheckerConfig extends Omit<FilterConfig, 'logProfanity'> {
8
+ /** Minimum severity level to include in results */
9
+ minSeverity?: SeverityLevel;
10
+ /** Auto-replace profanity with replaceWith string */
11
+ autoReplace?: boolean;
12
+ /** Custom callback when profanity is detected */
13
+ customActions?: (result: CheckProfanityResult) => void;
14
+ }
15
+ interface ProfanityCheckResult extends CheckProfanityResult {
16
+ filteredWords: string[];
17
+ autoReplaced: string;
18
+ }
19
+
20
+ export type { ProfanityCheckerConfig as P, ProfanityCheckResult as a };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "glin-profanity",
3
- "version": "3.1.5",
3
+ "version": "3.2.2",
4
4
  "description": "Glin-Profanity is a lightweight and efficient npm package designed to detect and filter profane language in text inputs across multiple languages. Whether you’re building a chat application, a comment section, or any platform where user-generated content is involved, Glin-Profanity helps you maintain a clean and respectful environment.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -17,6 +17,16 @@
17
17
  "default": "./dist/index.js"
18
18
  }
19
19
  },
20
+ "./react": {
21
+ "require": {
22
+ "types": "./dist/react.d.cts",
23
+ "default": "./dist/react.cjs"
24
+ },
25
+ "import": {
26
+ "types": "./dist/react.d.ts",
27
+ "default": "./dist/react.js"
28
+ }
29
+ },
20
30
  "./ml": {
21
31
  "require": {
22
32
  "types": "./dist/ml/index.d.cts",
@@ -26,12 +36,128 @@
26
36
  "types": "./dist/ml/index.d.ts",
27
37
  "default": "./dist/ml/index.js"
28
38
  }
39
+ },
40
+ "./ai": {
41
+ "require": {
42
+ "types": "./dist/integrations/index.d.cts",
43
+ "default": "./dist/integrations/index.cjs"
44
+ },
45
+ "import": {
46
+ "types": "./dist/integrations/index.d.ts",
47
+ "default": "./dist/integrations/index.js"
48
+ }
49
+ },
50
+ "./ai/openai": {
51
+ "require": {
52
+ "types": "./dist/integrations/openai.d.cts",
53
+ "default": "./dist/integrations/openai.cjs"
54
+ },
55
+ "import": {
56
+ "types": "./dist/integrations/openai.d.ts",
57
+ "default": "./dist/integrations/openai.js"
58
+ }
59
+ },
60
+ "./ai/langchain": {
61
+ "require": {
62
+ "types": "./dist/integrations/langchain.d.cts",
63
+ "default": "./dist/integrations/langchain.cjs"
64
+ },
65
+ "import": {
66
+ "types": "./dist/integrations/langchain.d.ts",
67
+ "default": "./dist/integrations/langchain.js"
68
+ }
69
+ },
70
+ "./ai/vercel": {
71
+ "require": {
72
+ "types": "./dist/integrations/vercel-ai.d.cts",
73
+ "default": "./dist/integrations/vercel-ai.cjs"
74
+ },
75
+ "import": {
76
+ "types": "./dist/integrations/vercel-ai.d.ts",
77
+ "default": "./dist/integrations/vercel-ai.js"
78
+ }
79
+ },
80
+ "./ai/semantic": {
81
+ "require": {
82
+ "types": "./dist/integrations/semantic.d.cts",
83
+ "default": "./dist/integrations/semantic.cjs"
84
+ },
85
+ "import": {
86
+ "types": "./dist/integrations/semantic.d.ts",
87
+ "default": "./dist/integrations/semantic.js"
88
+ }
89
+ },
90
+ "./ml/transformers": {
91
+ "require": {
92
+ "types": "./dist/ml/transformers.d.cts",
93
+ "default": "./dist/ml/transformers.cjs"
94
+ },
95
+ "import": {
96
+ "types": "./dist/ml/transformers.d.ts",
97
+ "default": "./dist/ml/transformers.js"
98
+ }
99
+ },
100
+ "./multimodal": {
101
+ "require": {
102
+ "types": "./dist/multimodal/index.d.cts",
103
+ "default": "./dist/multimodal/index.cjs"
104
+ },
105
+ "import": {
106
+ "types": "./dist/multimodal/index.d.ts",
107
+ "default": "./dist/multimodal/index.js"
108
+ }
109
+ },
110
+ "./ocr": {
111
+ "require": {
112
+ "types": "./dist/multimodal/ocr.d.cts",
113
+ "default": "./dist/multimodal/ocr.cjs"
114
+ },
115
+ "import": {
116
+ "types": "./dist/multimodal/ocr.d.ts",
117
+ "default": "./dist/multimodal/ocr.js"
118
+ }
119
+ },
120
+ "./audio": {
121
+ "require": {
122
+ "types": "./dist/multimodal/audio.d.cts",
123
+ "default": "./dist/multimodal/audio.cjs"
124
+ },
125
+ "import": {
126
+ "types": "./dist/multimodal/audio.d.ts",
127
+ "default": "./dist/multimodal/audio.js"
128
+ }
129
+ },
130
+ "./frameworks": {
131
+ "require": {
132
+ "types": "./dist/frameworks/index.d.cts",
133
+ "default": "./dist/frameworks/index.cjs"
134
+ },
135
+ "import": {
136
+ "types": "./dist/frameworks/index.d.ts",
137
+ "default": "./dist/frameworks/index.js"
138
+ }
139
+ },
140
+ "./nextjs": {
141
+ "require": {
142
+ "types": "./dist/frameworks/nextjs.d.cts",
143
+ "default": "./dist/frameworks/nextjs.cjs"
144
+ },
145
+ "import": {
146
+ "types": "./dist/frameworks/nextjs.d.ts",
147
+ "default": "./dist/frameworks/nextjs.js"
148
+ }
29
149
  }
30
150
  },
31
151
  "peerDependencies": {
32
152
  "react": ">=16.8.0",
33
153
  "@tensorflow-models/toxicity": ">=1.2.0",
34
- "@tensorflow/tfjs": ">=4.0.0"
154
+ "@tensorflow/tfjs": ">=4.0.0",
155
+ "zod": ">=3.0.0",
156
+ "openai": ">=4.0.0",
157
+ "@langchain/core": ">=0.1.0",
158
+ "ai": ">=3.0.0",
159
+ "@xenova/transformers": ">=2.0.0",
160
+ "tesseract.js": ">=5.0.0"
35
161
  },
36
162
  "peerDependenciesMeta": {
37
163
  "react": {
@@ -42,6 +168,24 @@
42
168
  },
43
169
  "@tensorflow-models/toxicity": {
44
170
  "optional": true
171
+ },
172
+ "zod": {
173
+ "optional": true
174
+ },
175
+ "openai": {
176
+ "optional": true
177
+ },
178
+ "@langchain/core": {
179
+ "optional": true
180
+ },
181
+ "ai": {
182
+ "optional": true
183
+ },
184
+ "@xenova/transformers": {
185
+ "optional": true
186
+ },
187
+ "tesseract.js": {
188
+ "optional": true
45
189
  }
46
190
  },
47
191
  "files": [
@@ -105,6 +249,14 @@
105
249
  "profanity-moderation-framework",
106
250
  "profanity-moderation-plugin",
107
251
  "profanity-moderation-module",
252
+ "openai",
253
+ "openai-function-calling",
254
+ "langchain",
255
+ "langchain-tool",
256
+ "vercel-ai",
257
+ "ai-sdk",
258
+ "semantic-analysis",
259
+ "embeddings",
108
260
  "tensorflow",
109
261
  "tensorflowjs",
110
262
  "ml",
@@ -115,7 +267,22 @@
115
267
  "neural-network",
116
268
  "leetspeak",
117
269
  "unicode",
118
- "homoglyph"
270
+ "homoglyph",
271
+ "transformers",
272
+ "transformers-js",
273
+ "huggingface",
274
+ "onnx",
275
+ "ocr",
276
+ "tesseract",
277
+ "image-moderation",
278
+ "audio-moderation",
279
+ "speech-to-text",
280
+ "whisper",
281
+ "nextjs",
282
+ "nextjs-middleware",
283
+ "server-actions",
284
+ "content-moderation",
285
+ "multimodal"
119
286
  ],
120
287
  "author": "glinr",
121
288
  "license": "ISC",