glin-profanity 3.1.5 → 3.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +84 -566
  2. package/dist/{types-CdDqSZY7.d.cts → Filter-BGcyIAvO.d.ts} +4 -162
  3. package/dist/{types-CdDqSZY7.d.ts → Filter-D34Wsmrj.d.cts} +4 -162
  4. package/dist/frameworks/index.cjs +5257 -0
  5. package/dist/frameworks/index.d.cts +2 -0
  6. package/dist/frameworks/index.d.ts +2 -0
  7. package/dist/frameworks/index.js +5252 -0
  8. package/dist/frameworks/nextjs.cjs +5257 -0
  9. package/dist/frameworks/nextjs.d.cts +173 -0
  10. package/dist/frameworks/nextjs.d.ts +173 -0
  11. package/dist/frameworks/nextjs.js +5252 -0
  12. package/dist/index.cjs +151 -85
  13. package/dist/index.d.cts +5 -29
  14. package/dist/index.d.ts +5 -29
  15. package/dist/index.js +152 -85
  16. package/dist/integrations/index.cjs +6110 -0
  17. package/dist/integrations/index.d.cts +5 -0
  18. package/dist/integrations/index.d.ts +5 -0
  19. package/dist/integrations/index.js +6082 -0
  20. package/dist/integrations/langchain.cjs +5252 -0
  21. package/dist/integrations/langchain.d.cts +231 -0
  22. package/dist/integrations/langchain.d.ts +231 -0
  23. package/dist/integrations/langchain.js +5239 -0
  24. package/dist/integrations/openai.cjs +5367 -0
  25. package/dist/integrations/openai.d.cts +167 -0
  26. package/dist/integrations/openai.d.ts +167 -0
  27. package/dist/integrations/openai.js +5362 -0
  28. package/dist/integrations/semantic.cjs +5314 -0
  29. package/dist/integrations/semantic.d.cts +268 -0
  30. package/dist/integrations/semantic.d.ts +268 -0
  31. package/dist/integrations/semantic.js +5309 -0
  32. package/dist/integrations/vercel-ai.cjs +5282 -0
  33. package/dist/integrations/vercel-ai.d.cts +224 -0
  34. package/dist/integrations/vercel-ai.d.ts +224 -0
  35. package/dist/integrations/vercel-ai.js +5273 -0
  36. package/dist/ml/index.cjs +358 -56
  37. package/dist/ml/index.d.cts +5 -2
  38. package/dist/ml/index.d.ts +5 -2
  39. package/dist/ml/index.js +354 -57
  40. package/dist/ml/transformers.cjs +5237 -0
  41. package/dist/ml/transformers.d.cts +232 -0
  42. package/dist/ml/transformers.d.ts +232 -0
  43. package/dist/ml/transformers.js +5231 -0
  44. package/dist/multimodal/audio.cjs +5269 -0
  45. package/dist/multimodal/audio.d.cts +255 -0
  46. package/dist/multimodal/audio.d.ts +255 -0
  47. package/dist/multimodal/audio.js +5264 -0
  48. package/dist/multimodal/index.cjs +5432 -0
  49. package/dist/multimodal/index.d.cts +4 -0
  50. package/dist/multimodal/index.d.ts +4 -0
  51. package/dist/multimodal/index.js +5422 -0
  52. package/dist/multimodal/ocr.cjs +5193 -0
  53. package/dist/multimodal/ocr.d.cts +157 -0
  54. package/dist/multimodal/ocr.d.ts +157 -0
  55. package/dist/multimodal/ocr.js +5187 -0
  56. package/dist/react.cjs +5133 -0
  57. package/dist/react.d.cts +13 -0
  58. package/dist/react.d.ts +13 -0
  59. package/dist/react.js +5131 -0
  60. package/dist/types-B9c_ik4k.d.cts +88 -0
  61. package/dist/types-B9c_ik4k.d.ts +88 -0
  62. package/dist/types-BuKh9tvV.d.ts +20 -0
  63. package/dist/types-Ct_ueYqw.d.cts +76 -0
  64. package/dist/types-Ct_ueYqw.d.ts +76 -0
  65. package/dist/types-DI8nzwWc.d.cts +20 -0
  66. package/package.json +170 -3
@@ -0,0 +1,268 @@
1
+ import { F as FilterConfig, C as CheckProfanityResult } from '../types-B9c_ik4k.cjs';
2
+ export { L as Language } from '../types-B9c_ik4k.cjs';
3
+
4
+ /**
5
+ * Semantic Analysis Hooks for glin-profanity
6
+ *
7
+ * Provides hooks and utilities for combining profanity detection with
8
+ * semantic analysis using embeddings. Useful for advanced content moderation
9
+ * that goes beyond keyword matching.
10
+ *
11
+ * @example
12
+ * ```typescript
13
+ * import { createSemanticAnalyzer } from 'glin-profanity/ai/semantic';
14
+ *
15
+ * const analyzer = createSemanticAnalyzer({
16
+ * embeddingProvider: async (text) => {
17
+ * // Your embedding provider (OpenAI, Cohere, etc.)
18
+ * const response = await openai.embeddings.create({
19
+ * model: 'text-embedding-3-small',
20
+ * input: text,
21
+ * });
22
+ * return response.data[0].embedding;
23
+ * },
24
+ * });
25
+ *
26
+ * const result = await analyzer.analyze('This is a test message');
27
+ * console.log(result.combinedScore); // 0.0 - 1.0
28
+ * ```
29
+ *
30
+ * @packageDocumentation
31
+ * @module glin-profanity/ai/semantic
32
+ */
33
+
34
+ /**
35
+ * Embedding provider function type
36
+ */
37
+ type EmbeddingProvider = (text: string) => Promise<number[]>;
38
+ /**
39
+ * Semantic analyzer configuration
40
+ */
41
+ interface SemanticAnalyzerConfig {
42
+ /** Function to generate embeddings for text */
43
+ embeddingProvider: EmbeddingProvider;
44
+ /** Base filter configuration */
45
+ filterConfig?: Partial<FilterConfig>;
46
+ /** Weight for keyword-based detection (0-1). Default: 0.6 */
47
+ keywordWeight?: number;
48
+ /** Weight for semantic similarity (0-1). Default: 0.4 */
49
+ semanticWeight?: number;
50
+ /** Threshold for flagging content (0-1). Default: 0.5 */
51
+ threshold?: number;
52
+ /** Reference toxic content embeddings for comparison */
53
+ toxicReferenceEmbeddings?: number[][];
54
+ }
55
+ /**
56
+ * Semantic analysis result
57
+ */
58
+ interface SemanticAnalysisResult {
59
+ /** Combined moderation score (0-1, higher = more problematic) */
60
+ combinedScore: number;
61
+ /** Keyword-based profanity score (0-1) */
62
+ keywordScore: number;
63
+ /** Semantic similarity score to toxic content (0-1) */
64
+ semanticScore: number;
65
+ /** Whether content should be flagged based on threshold */
66
+ shouldFlag: boolean;
67
+ /** Detailed profanity check result */
68
+ profanityResult: CheckProfanityResult;
69
+ /** Breakdown of scoring components */
70
+ breakdown: {
71
+ profaneWordCount: number;
72
+ averageSeverity: number;
73
+ maxSemanticSimilarity: number;
74
+ contextScore?: number;
75
+ };
76
+ }
77
+ /**
78
+ * Creates a semantic analyzer that combines keyword-based profanity detection
79
+ * with embedding-based semantic analysis.
80
+ *
81
+ * @example
82
+ * ```typescript
83
+ * import OpenAI from 'openai';
84
+ * import { createSemanticAnalyzer } from 'glin-profanity/ai/semantic';
85
+ *
86
+ * const openai = new OpenAI();
87
+ *
88
+ * const analyzer = createSemanticAnalyzer({
89
+ * embeddingProvider: async (text) => {
90
+ * const response = await openai.embeddings.create({
91
+ * model: 'text-embedding-3-small',
92
+ * input: text,
93
+ * });
94
+ * return response.data[0].embedding;
95
+ * },
96
+ * keywordWeight: 0.6,
97
+ * semanticWeight: 0.4,
98
+ * threshold: 0.5,
99
+ * });
100
+ *
101
+ * const result = await analyzer.analyze('Hello world');
102
+ * console.log(result.shouldFlag); // false
103
+ * ```
104
+ */
105
+ declare function createSemanticAnalyzer(config: SemanticAnalyzerConfig): {
106
+ /**
107
+ * Analyze text for both keyword profanity and semantic toxicity
108
+ */
109
+ analyze(text: string): Promise<SemanticAnalysisResult>;
110
+ /**
111
+ * Batch analyze multiple texts
112
+ */
113
+ analyzeBatch(texts: string[]): Promise<SemanticAnalysisResult[]>;
114
+ /**
115
+ * Add custom toxic reference patterns
116
+ */
117
+ addToxicPatterns(patterns: string[]): Promise<void>;
118
+ /**
119
+ * Clear cached toxic embeddings
120
+ */
121
+ clearCache(): void;
122
+ /**
123
+ * Get current configuration
124
+ */
125
+ getConfig(): {
126
+ keywordWeight: number;
127
+ semanticWeight: number;
128
+ threshold: number;
129
+ filterConfig: FilterConfig;
130
+ };
131
+ };
132
+ /**
133
+ * Hooks for integrating semantic analysis into application flows
134
+ */
135
+ declare const semanticHooks: {
136
+ /**
137
+ * Pre-process hook for chat messages
138
+ *
139
+ * @example
140
+ * ```typescript
141
+ * const { shouldBlock, reason, sanitized } = await semanticHooks.preProcessMessage(
142
+ * message,
143
+ * analyzer,
144
+ * { autoSanitize: true }
145
+ * );
146
+ * ```
147
+ */
148
+ preProcessMessage(message: string, analyzer: ReturnType<typeof createSemanticAnalyzer>, options?: {
149
+ autoSanitize?: boolean;
150
+ threshold?: number;
151
+ }): Promise<{
152
+ shouldBlock: boolean;
153
+ reason: string;
154
+ sanitized: string;
155
+ analysis: SemanticAnalysisResult;
156
+ }>;
157
+ /**
158
+ * Post-process hook for AI-generated content
159
+ *
160
+ * @example
161
+ * ```typescript
162
+ * const { isSafe, analysis } = await semanticHooks.postProcessAIResponse(
163
+ * aiResponse,
164
+ * analyzer
165
+ * );
166
+ * ```
167
+ */
168
+ postProcessAIResponse(response: string, analyzer: ReturnType<typeof createSemanticAnalyzer>): Promise<{
169
+ isSafe: boolean;
170
+ analysis: SemanticAnalysisResult;
171
+ warnings: string[];
172
+ }>;
173
+ /**
174
+ * Conversation monitoring hook
175
+ *
176
+ * @example
177
+ * ```typescript
178
+ * const monitor = semanticHooks.createConversationMonitor(analyzer);
179
+ * monitor.addMessage('user', 'Hello');
180
+ * monitor.addMessage('assistant', 'Hi there!');
181
+ * const report = await monitor.getReport();
182
+ * ```
183
+ */
184
+ createConversationMonitor(analyzer: ReturnType<typeof createSemanticAnalyzer>): {
185
+ addMessage(role: string, content: string): Promise<SemanticAnalysisResult>;
186
+ getMessages(): {
187
+ role: string;
188
+ content: string;
189
+ timestamp: Date;
190
+ }[];
191
+ getReport(): Promise<{
192
+ totalMessages: number;
193
+ flaggedMessages: number;
194
+ averageScore: number;
195
+ isHealthy: boolean;
196
+ flaggedIndices: number[];
197
+ }>;
198
+ clear(): void;
199
+ };
200
+ };
201
+ /**
202
+ * Configuration for creating a fetch-based embedding provider
203
+ */
204
+ interface FetchEmbeddingProviderConfig {
205
+ /** API key for authentication (optional for local models) */
206
+ apiKey?: string;
207
+ /** Model name or deployment name - REQUIRED, no defaults to stay model-agnostic */
208
+ model: string;
209
+ /** Base URL for the API (default: https://api.openai.com/v1) */
210
+ baseUrl?: string;
211
+ /** Endpoint path (default: /embeddings) */
212
+ endpoint?: string;
213
+ /** Custom headers to include in requests */
214
+ headers?: Record<string, string>;
215
+ /** Custom response parser - extracts embedding array from API response */
216
+ parseResponse?: (response: unknown) => number[];
217
+ }
218
+ /**
219
+ * Utility function to create an embedding provider using fetch
220
+ * Works with any OpenAI-compatible API (OpenAI, Azure, Ollama, vLLM, etc.)
221
+ *
222
+ * @example
223
+ * ```typescript
224
+ * // OpenAI
225
+ * const openaiProvider = createFetchEmbeddingProvider({
226
+ * apiKey: process.env.OPENAI_API_KEY,
227
+ * model: process.env.EMBEDDING_MODEL || 'text-embedding-3-small',
228
+ * });
229
+ *
230
+ * // Azure OpenAI
231
+ * const azureProvider = createFetchEmbeddingProvider({
232
+ * apiKey: process.env.AZURE_OPENAI_KEY,
233
+ * model: process.env.AZURE_EMBEDDING_DEPLOYMENT,
234
+ * baseUrl: `https://${process.env.AZURE_RESOURCE}.openai.azure.com/openai/deployments/${process.env.AZURE_EMBEDDING_DEPLOYMENT}`,
235
+ * headers: { 'api-version': '2024-02-01' },
236
+ * });
237
+ *
238
+ * // Local Ollama
239
+ * const ollamaProvider = createFetchEmbeddingProvider({
240
+ * model: 'nomic-embed-text',
241
+ * baseUrl: 'http://localhost:11434',
242
+ * endpoint: '/api/embeddings',
243
+ * parseResponse: (data) => (data as { embedding: number[] }).embedding,
244
+ * });
245
+ *
246
+ * // Cohere
247
+ * const cohereProvider = createFetchEmbeddingProvider({
248
+ * apiKey: process.env.COHERE_API_KEY,
249
+ * model: 'embed-english-v3.0',
250
+ * baseUrl: 'https://api.cohere.ai/v1',
251
+ * endpoint: '/embed',
252
+ * parseResponse: (data) => (data as { embeddings: number[][] }).embeddings[0],
253
+ * });
254
+ *
255
+ * const analyzer = createSemanticAnalyzer({ embeddingProvider: openaiProvider });
256
+ * ```
257
+ */
258
+ declare function createFetchEmbeddingProvider(config: FetchEmbeddingProviderConfig): EmbeddingProvider;
259
+ /**
260
+ * @deprecated Use createFetchEmbeddingProvider instead for better flexibility
261
+ */
262
+ declare function createOpenAIEmbeddingProvider(config: {
263
+ apiKey: string;
264
+ model: string;
265
+ baseUrl?: string;
266
+ }): EmbeddingProvider;
267
+
268
+ export { CheckProfanityResult, type EmbeddingProvider, type FetchEmbeddingProviderConfig, FilterConfig, type SemanticAnalysisResult, type SemanticAnalyzerConfig, createFetchEmbeddingProvider, createOpenAIEmbeddingProvider, createSemanticAnalyzer, semanticHooks };
@@ -0,0 +1,268 @@
1
+ import { F as FilterConfig, C as CheckProfanityResult } from '../types-B9c_ik4k.js';
2
+ export { L as Language } from '../types-B9c_ik4k.js';
3
+
4
+ /**
5
+ * Semantic Analysis Hooks for glin-profanity
6
+ *
7
+ * Provides hooks and utilities for combining profanity detection with
8
+ * semantic analysis using embeddings. Useful for advanced content moderation
9
+ * that goes beyond keyword matching.
10
+ *
11
+ * @example
12
+ * ```typescript
13
+ * import { createSemanticAnalyzer } from 'glin-profanity/ai/semantic';
14
+ *
15
+ * const analyzer = createSemanticAnalyzer({
16
+ * embeddingProvider: async (text) => {
17
+ * // Your embedding provider (OpenAI, Cohere, etc.)
18
+ * const response = await openai.embeddings.create({
19
+ * model: 'text-embedding-3-small',
20
+ * input: text,
21
+ * });
22
+ * return response.data[0].embedding;
23
+ * },
24
+ * });
25
+ *
26
+ * const result = await analyzer.analyze('This is a test message');
27
+ * console.log(result.combinedScore); // 0.0 - 1.0
28
+ * ```
29
+ *
30
+ * @packageDocumentation
31
+ * @module glin-profanity/ai/semantic
32
+ */
33
+
34
+ /**
35
+ * Embedding provider function type
36
+ */
37
+ type EmbeddingProvider = (text: string) => Promise<number[]>;
38
+ /**
39
+ * Semantic analyzer configuration
40
+ */
41
+ interface SemanticAnalyzerConfig {
42
+ /** Function to generate embeddings for text */
43
+ embeddingProvider: EmbeddingProvider;
44
+ /** Base filter configuration */
45
+ filterConfig?: Partial<FilterConfig>;
46
+ /** Weight for keyword-based detection (0-1). Default: 0.6 */
47
+ keywordWeight?: number;
48
+ /** Weight for semantic similarity (0-1). Default: 0.4 */
49
+ semanticWeight?: number;
50
+ /** Threshold for flagging content (0-1). Default: 0.5 */
51
+ threshold?: number;
52
+ /** Reference toxic content embeddings for comparison */
53
+ toxicReferenceEmbeddings?: number[][];
54
+ }
55
+ /**
56
+ * Semantic analysis result
57
+ */
58
+ interface SemanticAnalysisResult {
59
+ /** Combined moderation score (0-1, higher = more problematic) */
60
+ combinedScore: number;
61
+ /** Keyword-based profanity score (0-1) */
62
+ keywordScore: number;
63
+ /** Semantic similarity score to toxic content (0-1) */
64
+ semanticScore: number;
65
+ /** Whether content should be flagged based on threshold */
66
+ shouldFlag: boolean;
67
+ /** Detailed profanity check result */
68
+ profanityResult: CheckProfanityResult;
69
+ /** Breakdown of scoring components */
70
+ breakdown: {
71
+ profaneWordCount: number;
72
+ averageSeverity: number;
73
+ maxSemanticSimilarity: number;
74
+ contextScore?: number;
75
+ };
76
+ }
77
+ /**
78
+ * Creates a semantic analyzer that combines keyword-based profanity detection
79
+ * with embedding-based semantic analysis.
80
+ *
81
+ * @example
82
+ * ```typescript
83
+ * import OpenAI from 'openai';
84
+ * import { createSemanticAnalyzer } from 'glin-profanity/ai/semantic';
85
+ *
86
+ * const openai = new OpenAI();
87
+ *
88
+ * const analyzer = createSemanticAnalyzer({
89
+ * embeddingProvider: async (text) => {
90
+ * const response = await openai.embeddings.create({
91
+ * model: 'text-embedding-3-small',
92
+ * input: text,
93
+ * });
94
+ * return response.data[0].embedding;
95
+ * },
96
+ * keywordWeight: 0.6,
97
+ * semanticWeight: 0.4,
98
+ * threshold: 0.5,
99
+ * });
100
+ *
101
+ * const result = await analyzer.analyze('Hello world');
102
+ * console.log(result.shouldFlag); // false
103
+ * ```
104
+ */
105
+ declare function createSemanticAnalyzer(config: SemanticAnalyzerConfig): {
106
+ /**
107
+ * Analyze text for both keyword profanity and semantic toxicity
108
+ */
109
+ analyze(text: string): Promise<SemanticAnalysisResult>;
110
+ /**
111
+ * Batch analyze multiple texts
112
+ */
113
+ analyzeBatch(texts: string[]): Promise<SemanticAnalysisResult[]>;
114
+ /**
115
+ * Add custom toxic reference patterns
116
+ */
117
+ addToxicPatterns(patterns: string[]): Promise<void>;
118
+ /**
119
+ * Clear cached toxic embeddings
120
+ */
121
+ clearCache(): void;
122
+ /**
123
+ * Get current configuration
124
+ */
125
+ getConfig(): {
126
+ keywordWeight: number;
127
+ semanticWeight: number;
128
+ threshold: number;
129
+ filterConfig: FilterConfig;
130
+ };
131
+ };
132
+ /**
133
+ * Hooks for integrating semantic analysis into application flows
134
+ */
135
+ declare const semanticHooks: {
136
+ /**
137
+ * Pre-process hook for chat messages
138
+ *
139
+ * @example
140
+ * ```typescript
141
+ * const { shouldBlock, reason, sanitized } = await semanticHooks.preProcessMessage(
142
+ * message,
143
+ * analyzer,
144
+ * { autoSanitize: true }
145
+ * );
146
+ * ```
147
+ */
148
+ preProcessMessage(message: string, analyzer: ReturnType<typeof createSemanticAnalyzer>, options?: {
149
+ autoSanitize?: boolean;
150
+ threshold?: number;
151
+ }): Promise<{
152
+ shouldBlock: boolean;
153
+ reason: string;
154
+ sanitized: string;
155
+ analysis: SemanticAnalysisResult;
156
+ }>;
157
+ /**
158
+ * Post-process hook for AI-generated content
159
+ *
160
+ * @example
161
+ * ```typescript
162
+ * const { isSafe, analysis } = await semanticHooks.postProcessAIResponse(
163
+ * aiResponse,
164
+ * analyzer
165
+ * );
166
+ * ```
167
+ */
168
+ postProcessAIResponse(response: string, analyzer: ReturnType<typeof createSemanticAnalyzer>): Promise<{
169
+ isSafe: boolean;
170
+ analysis: SemanticAnalysisResult;
171
+ warnings: string[];
172
+ }>;
173
+ /**
174
+ * Conversation monitoring hook
175
+ *
176
+ * @example
177
+ * ```typescript
178
+ * const monitor = semanticHooks.createConversationMonitor(analyzer);
179
+ * monitor.addMessage('user', 'Hello');
180
+ * monitor.addMessage('assistant', 'Hi there!');
181
+ * const report = await monitor.getReport();
182
+ * ```
183
+ */
184
+ createConversationMonitor(analyzer: ReturnType<typeof createSemanticAnalyzer>): {
185
+ addMessage(role: string, content: string): Promise<SemanticAnalysisResult>;
186
+ getMessages(): {
187
+ role: string;
188
+ content: string;
189
+ timestamp: Date;
190
+ }[];
191
+ getReport(): Promise<{
192
+ totalMessages: number;
193
+ flaggedMessages: number;
194
+ averageScore: number;
195
+ isHealthy: boolean;
196
+ flaggedIndices: number[];
197
+ }>;
198
+ clear(): void;
199
+ };
200
+ };
201
+ /**
202
+ * Configuration for creating a fetch-based embedding provider
203
+ */
204
+ interface FetchEmbeddingProviderConfig {
205
+ /** API key for authentication (optional for local models) */
206
+ apiKey?: string;
207
+ /** Model name or deployment name - REQUIRED, no defaults to stay model-agnostic */
208
+ model: string;
209
+ /** Base URL for the API (default: https://api.openai.com/v1) */
210
+ baseUrl?: string;
211
+ /** Endpoint path (default: /embeddings) */
212
+ endpoint?: string;
213
+ /** Custom headers to include in requests */
214
+ headers?: Record<string, string>;
215
+ /** Custom response parser - extracts embedding array from API response */
216
+ parseResponse?: (response: unknown) => number[];
217
+ }
218
+ /**
219
+ * Utility function to create an embedding provider using fetch
220
+ * Works with any OpenAI-compatible API (OpenAI, Azure, Ollama, vLLM, etc.)
221
+ *
222
+ * @example
223
+ * ```typescript
224
+ * // OpenAI
225
+ * const openaiProvider = createFetchEmbeddingProvider({
226
+ * apiKey: process.env.OPENAI_API_KEY,
227
+ * model: process.env.EMBEDDING_MODEL || 'text-embedding-3-small',
228
+ * });
229
+ *
230
+ * // Azure OpenAI
231
+ * const azureProvider = createFetchEmbeddingProvider({
232
+ * apiKey: process.env.AZURE_OPENAI_KEY,
233
+ * model: process.env.AZURE_EMBEDDING_DEPLOYMENT,
234
+ * baseUrl: `https://${process.env.AZURE_RESOURCE}.openai.azure.com/openai/deployments/${process.env.AZURE_EMBEDDING_DEPLOYMENT}`,
235
+ * headers: { 'api-version': '2024-02-01' },
236
+ * });
237
+ *
238
+ * // Local Ollama
239
+ * const ollamaProvider = createFetchEmbeddingProvider({
240
+ * model: 'nomic-embed-text',
241
+ * baseUrl: 'http://localhost:11434',
242
+ * endpoint: '/api/embeddings',
243
+ * parseResponse: (data) => (data as { embedding: number[] }).embedding,
244
+ * });
245
+ *
246
+ * // Cohere
247
+ * const cohereProvider = createFetchEmbeddingProvider({
248
+ * apiKey: process.env.COHERE_API_KEY,
249
+ * model: 'embed-english-v3.0',
250
+ * baseUrl: 'https://api.cohere.ai/v1',
251
+ * endpoint: '/embed',
252
+ * parseResponse: (data) => (data as { embeddings: number[][] }).embeddings[0],
253
+ * });
254
+ *
255
+ * const analyzer = createSemanticAnalyzer({ embeddingProvider: openaiProvider });
256
+ * ```
257
+ */
258
+ declare function createFetchEmbeddingProvider(config: FetchEmbeddingProviderConfig): EmbeddingProvider;
259
+ /**
260
+ * @deprecated Use createFetchEmbeddingProvider instead for better flexibility
261
+ */
262
+ declare function createOpenAIEmbeddingProvider(config: {
263
+ apiKey: string;
264
+ model: string;
265
+ baseUrl?: string;
266
+ }): EmbeddingProvider;
267
+
268
+ export { CheckProfanityResult, type EmbeddingProvider, type FetchEmbeddingProviderConfig, FilterConfig, type SemanticAnalysisResult, type SemanticAnalyzerConfig, createFetchEmbeddingProvider, createOpenAIEmbeddingProvider, createSemanticAnalyzer, semanticHooks };