@vettly/shared 0.1.11 → 0.1.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/README.md +479 -15
  2. package/package.json +3 -3
package/README.md CHANGED
@@ -1,29 +1,493 @@
1
1
  # @vettly/shared
2
2
 
3
- Shared TypeScript types for the Vettly SDK ecosystem.
3
+ Type-safe contracts for compliance-critical applications. Runtime-validated schemas that ensure consistency across all Vettly packages.
4
4
 
5
- ## Vettly Packages
5
+ ## Why Type-Safe Contracts Matter
6
6
 
7
- | Package | Description |
8
- |---------|-------------|
9
- | [@vettly/sdk](https://npmjs.com/package/@vettly/sdk) | Core TypeScript SDK |
10
- | [@vettly/react](https://npmjs.com/package/@vettly/react) | React components |
11
- | [@vettly/express](https://npmjs.com/package/@vettly/express) | Express middleware |
12
- | [@vettly/nextjs](https://npmjs.com/package/@vettly/nextjs) | Next.js integration |
7
+ Content moderation decisions have legal consequences. When a user appeals a blocking decision, you need to prove:
13
8
 
14
- ## Usage
9
+ - The **exact policy version** that was applied
10
+ - The **category thresholds** that triggered the action
11
+ - The **content fingerprint** that proves content wasn't altered
12
+
13
+ This package provides the canonical type definitions and Zod schemas that power the entire Vettly ecosystem. All packages (`@vettly/sdk`, `@vettly/react`) depend on these shared contracts.
14
+
15
+ ## Installation
16
+
17
+ ```bash
18
+ npm install @vettly/shared
19
+ ```
20
+
21
+ ---
22
+
23
+ ## Decision Actions
24
+
25
+ Vettly uses **graduated actions** rather than binary allow/block:
26
+
27
+ ```typescript
28
+ import type { Action } from '@vettly/shared'
29
+ import { ActionSchema } from '@vettly/shared'
30
+
31
+ // 'allow' | 'warn' | 'flag' | 'block'
32
+ const action: Action = 'flag'
33
+
34
+ // Validate at runtime
35
+ ActionSchema.parse('flag') // ✓
36
+ ActionSchema.parse('invalid') // throws ZodError
37
+ ```
38
+
39
+ | Action | Meaning | Typical Use |
40
+ |--------|---------|-------------|
41
+ | `allow` | Content passes all policy checks | Publish immediately |
42
+ | `warn` | Minor concern, user should be notified | Show warning, allow post |
43
+ | `flag` | Needs human review before action | Queue for moderator |
44
+ | `block` | Violates policy, cannot be published | Reject submission |
45
+
46
+ ---
47
+
48
+ ## Content Types
49
+
50
+ ```typescript
51
+ import type { ContentType, UseCaseType } from '@vettly/shared'
52
+
53
+ // ContentType: 'text' | 'image' | 'video'
54
+ const type: ContentType = 'image'
55
+
56
+ // UseCaseType provides context for smarter moderation
57
+ // 'social_post' | 'comment' | 'profile' | 'message' | 'review' | 'listing' | 'bio' | 'other'
58
+ const context: UseCaseType = 'profile'
59
+ ```
60
+
61
+ ---
62
+
63
+ ## Categories
64
+
65
+ Standard moderation categories with consistent naming:
66
+
67
+ ```typescript
68
+ import type { Category } from '@vettly/shared'
69
+ import { CategorySchema } from '@vettly/shared'
70
+
71
+ // All categories
72
+ const categories: Category[] = [
73
+ 'hate_speech', // Attacks based on protected characteristics
74
+ 'harassment', // Targeted abuse or bullying
75
+ 'violence', // Graphic violence or threats
76
+ 'self_harm', // Self-harm or suicide content
77
+ 'sexual', // Adult or explicit content
78
+ 'spam', // Commercial spam or manipulation
79
+ 'profanity', // Strong language
80
+ 'scam', // Fraud or deceptive content
81
+ 'illegal', // Illegal activities
82
+ ]
83
+ ```
84
+
85
+ ---
86
+
87
+ ## Policy Schema
88
+
89
+ Policies define how content is evaluated:
15
90
 
16
91
  ```typescript
17
- import type {
18
- CheckRequest,
19
- CheckResponse,
20
- Policy,
21
- Category,
22
- Action
92
+ import type { Policy, Rule, Override, FallbackConfig } from '@vettly/shared'
93
+ import { PolicySchema } from '@vettly/shared'
94
+
95
+ const policy: Policy = {
96
+ name: 'Community Guidelines',
97
+ version: '2024-01-15-abc123', // Immutable version identifier
98
+ rules: [
99
+ {
100
+ category: 'hate_speech',
101
+ threshold: 0.7, // Score 0-1 that triggers action
102
+ provider: 'openai',
103
+ action: 'block',
104
+ priority: 1, // Higher priority rules evaluated first
105
+ },
106
+ {
107
+ category: 'profanity',
108
+ threshold: 0.5,
109
+ provider: 'openai',
110
+ action: 'warn',
111
+ priority: 0,
112
+ }
113
+ ],
114
+ fallback: {
115
+ provider: 'mock', // Fallback if primary provider fails
116
+ on_timeout: true,
117
+ timeout_ms: 5000,
118
+ }
119
+ }
120
+
121
+ // Validate policy at runtime
122
+ PolicySchema.parse(policy)
123
+ ```
124
+
125
+ ### Custom Prompt Rules (Pro+)
126
+
127
+ For semantic image analysis with custom prompts:
128
+
129
+ ```typescript
130
+ const customRule: Rule = {
131
+ category: 'sexual',
132
+ threshold: 0.5,
133
+ provider: 'gemini_vision',
134
+ action: 'flag',
135
+ customPrompt: 'Does this image contain nudity or sexually suggestive content?',
136
+ customCategory: 'nudity_detection',
137
+ }
138
+ ```
139
+
140
+ ---
141
+
142
+ ## Decision Records
143
+
144
+ Every moderation decision is recorded with full audit trail:
145
+
146
+ ```typescript
147
+ import type { Decision } from '@vettly/shared'
148
+
149
+ const decision: Decision = {
150
+ id: '550e8400-e29b-41d4-a716-446655440000',
151
+ content: 'The original content text',
152
+ contentHash: 'sha256:abc123...', // Tamper-evident fingerprint
153
+ contentType: 'text',
154
+ policy: {
155
+ id: 'community-guidelines',
156
+ version: '2024-01-15-abc123', // Exact policy version applied
157
+ },
158
+ result: {
159
+ safe: false,
160
+ flagged: true,
161
+ action: 'block',
162
+ categories: [
163
+ {
164
+ category: 'hate_speech',
165
+ score: 0.91,
166
+ threshold: 0.7, // The threshold that was configured
167
+ triggered: true,
168
+ },
169
+ {
170
+ category: 'harassment',
171
+ score: 0.08,
172
+ threshold: 0.8,
173
+ triggered: false,
174
+ }
175
+ ],
176
+ },
177
+ provider: {
178
+ name: 'openai',
179
+ latency: 147,
180
+ cost: 0.000025,
181
+ },
182
+ metadata: {
183
+ userId: 'user_123',
184
+ sessionId: 'session_456',
185
+ },
186
+ timestamp: '2024-01-15T12:00:00.000Z',
187
+ requestId: 'req_unique_123', // For idempotency
188
+ }
189
+ ```
190
+
191
+ ---
192
+
193
+ ## Request/Response Types
194
+
195
+ ### Single Content Check
196
+
197
+ ```typescript
198
+ import type { CheckRequest, CheckResponse } from '@vettly/shared'
199
+ import { CheckRequestSchema, CheckResponseSchema } from '@vettly/shared'
200
+
201
+ const request: CheckRequest = {
202
+ content: 'User-generated text',
203
+ policyId: 'community-safe',
204
+ contentType: 'text',
205
+ language: 'en', // ISO 639-1 code
206
+ metadata: { userId: 'user_123' },
207
+ requestId: 'req_unique_for_idempotency',
208
+ }
209
+
210
+ // Validate incoming requests
211
+ const validated = CheckRequestSchema.parse(untrustedInput)
212
+
213
+ const response: CheckResponse = {
214
+ decisionId: '550e8400-e29b-41d4-a716-446655440000',
215
+ safe: true,
216
+ flagged: false,
217
+ action: 'allow',
218
+ categories: [
219
+ { category: 'hate_speech', score: 0.02, triggered: false }
220
+ ],
221
+ provider: 'openai',
222
+ latency: 123,
223
+ cost: 0.000015,
224
+ requestId: 'req_unique_for_idempotency',
225
+ }
226
+ ```
227
+
228
+ ### Multi-Modal Check
229
+
230
+ For checking text, images, and video together:
231
+
232
+ ```typescript
233
+ import type { MultiModalCheckRequest, MultiModalCheckResponse, ContentItemResult } from '@vettly/shared'
234
+
235
+ const request: MultiModalCheckRequest = {
236
+ text: 'Post caption text',
237
+ images: [
238
+ 'https://cdn.example.com/image1.jpg',
239
+ 'data:image/png;base64,...',
240
+ ],
241
+ video: 'https://cdn.example.com/video.mp4',
242
+ context: {
243
+ useCase: 'social_post',
244
+ userId: 'user_123',
245
+ userReputation: 0.95, // 0-1 trust score
246
+ locale: 'en-US',
247
+ region: 'US',
248
+ language: 'en',
249
+ },
250
+ policyId: 'social-media',
251
+ metadata: { postId: 'post_456' },
252
+ }
253
+
254
+ const response: MultiModalCheckResponse = {
255
+ decisionId: '...',
256
+ safe: false, // Overall: false if ANY item unsafe
257
+ flagged: true, // Overall: true if ANY item flagged
258
+ action: 'block', // Overall: most severe action
259
+ results: [
260
+ {
261
+ contentType: 'text',
262
+ safe: true,
263
+ flagged: false,
264
+ action: 'allow',
265
+ categories: [...],
266
+ provider: 'openai',
267
+ latency: 50,
268
+ cost: 0.00001,
269
+ },
270
+ {
271
+ contentType: 'image',
272
+ contentRef: 'https://cdn.example.com/image1.jpg',
273
+ contentItemId: 'item-uuid', // For linking to evidence
274
+ safe: false,
275
+ flagged: true,
276
+ action: 'block',
277
+ categories: [...],
278
+ provider: 'hive',
279
+ latency: 200,
280
+ cost: 0.0001,
281
+ evidence: {
282
+ url: 'https://evidence.vettly.dev/...', // Signed URL
283
+ expiresAt: '2024-01-16T12:00:00Z',
284
+ },
285
+ }
286
+ ],
287
+ totalLatency: 250,
288
+ totalCost: 0.00011,
289
+ }
290
+ ```
291
+
292
+ ---
293
+
294
+ ## Providers
295
+
296
+ Supported moderation providers:
297
+
298
+ ```typescript
299
+ import type { ProviderName, ProviderResult } from '@vettly/shared'
300
+
301
+ // 'openai' | 'openai_gpt' | 'hive' | 'azure' | 'gemini_vision' | 'mock' | 'fallback'
302
+ const provider: ProviderName = 'openai'
303
+
304
+ const result: ProviderResult = {
305
+ provider: 'openai',
306
+ flagged: true,
307
+ categories: {
308
+ hate_speech: 0.91,
309
+ harassment: 0.08,
310
+ },
311
+ confidence: 0.95,
312
+ latency: 147,
313
+ cost: 0.000025,
314
+ raw: { /* original provider response */ },
315
+ }
316
+ ```
317
+
318
+ ---
319
+
320
+ ## Webhook Events
321
+
322
+ ```typescript
323
+ import type { WebhookEventType, WebhookEndpoint } from '@vettly/shared'
324
+
325
+ // Event types
326
+ const events: WebhookEventType[] = [
327
+ 'decision.created', // Any decision made
328
+ 'decision.flagged', // Content flagged for review
329
+ 'decision.blocked', // Content blocked
330
+ 'policy.created', // New policy created
331
+ 'policy.updated', // Policy updated
332
+ ]
333
+
334
+ const webhook: WebhookEndpoint = {
335
+ url: 'https://your-app.com/webhooks/vettly',
336
+ events: ['decision.blocked', 'decision.flagged'],
337
+ description: 'Production webhook for content moderation',
338
+ }
339
+ ```
340
+
341
+ ---
342
+
343
+ ## Error Types
344
+
345
+ ```typescript
346
+ import {
347
+ ModerationError,
348
+ PolicyValidationError,
349
+ ProviderError
350
+ } from '@vettly/shared'
351
+
352
+ try {
353
+ await checkContent(...)
354
+ } catch (error) {
355
+ if (error instanceof PolicyValidationError) {
356
+ // Invalid policy YAML or configuration
357
+ console.log('Policy error:', error.message)
358
+ console.log('Details:', error.details)
359
+ } else if (error instanceof ProviderError) {
360
+ // Moderation provider failed
361
+ console.log('Provider error:', error.message)
362
+ console.log('Provider:', error.details?.provider)
363
+ } else if (error instanceof ModerationError) {
364
+ // General moderation error
365
+ console.log(`${error.code}: ${error.message}`)
366
+ console.log('Status:', error.statusCode)
367
+ }
368
+ }
369
+ ```
370
+
371
+ ---
372
+
373
+ ## Utility Functions
374
+
375
+ ```typescript
376
+ import {
377
+ hashContent,
378
+ generateUUID,
379
+ generateRequestId,
380
+ calculatePolicyVersion,
381
+ formatCost,
382
+ formatLatency
383
+ } from '@vettly/shared'
384
+
385
+ // SHA256 content hashing for tamper-evident fingerprinting
386
+ const hash = hashContent('user content')
387
+ // '2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824'
388
+
389
+ // Generate UUIDs for decisions
390
+ const id = generateUUID()
391
+ // '550e8400-e29b-41d4-a716-446655440000'
392
+
393
+ // Generate idempotency keys
394
+ const requestId = generateRequestId()
395
+ // 'req_1705320000000_x7k9m2'
396
+
397
+ // Calculate policy version from YAML
398
+ const version = calculatePolicyVersion(yamlContent)
399
+ // 'abc123def456...' (16-char hash)
400
+
401
+ // Format for display
402
+ formatCost(0.000123) // '$0.000123'
403
+ formatLatency(1250) // '1.25s'
404
+ formatLatency(150) // '150ms'
405
+ ```
406
+
407
+ ---
408
+
409
+ ## Runtime Validation
410
+
411
+ All types have corresponding Zod schemas for runtime validation:
412
+
413
+ ```typescript
414
+ import {
415
+ ActionSchema,
416
+ CategorySchema,
417
+ ContentTypeSchema,
418
+ PolicySchema,
419
+ CheckRequestSchema,
420
+ CheckResponseSchema,
421
+ DecisionSchema,
422
+ MultiModalCheckRequestSchema,
423
+ MultiModalCheckResponseSchema,
23
424
  } from '@vettly/shared'
425
+
426
+ // Validate untrusted input
427
+ const result = CheckRequestSchema.safeParse(untrustedInput)
428
+
429
+ if (result.success) {
430
+ // result.data is fully typed as CheckRequest
431
+ processRequest(result.data)
432
+ } else {
433
+ // result.error contains validation errors
434
+ console.log('Validation failed:', result.error.issues)
435
+ }
436
+
437
+ // Strict parsing (throws on failure)
438
+ try {
439
+ const request = CheckRequestSchema.parse(untrustedInput)
440
+ } catch (error) {
441
+ if (error instanceof z.ZodError) {
442
+ console.log('Validation errors:', error.issues)
443
+ }
444
+ }
24
445
  ```
25
446
 
447
+ ---
448
+
449
+ ## JSON Types
450
+
451
+ For database storage and serialization:
452
+
453
+ ```typescript
454
+ import type { JsonValue, JsonObject, JsonArray, JsonPrimitive } from '@vettly/shared'
455
+
456
+ // JSON-serializable metadata
457
+ const metadata: JsonObject = {
458
+ userId: 'user_123',
459
+ tags: ['user-reported', 'priority'],
460
+ score: 0.95,
461
+ reviewed: false,
462
+ }
463
+ ```
464
+
465
+ ---
466
+
467
+ ## Moderation Context
468
+
469
+ Additional context for smarter moderation:
470
+
471
+ ```typescript
472
+ import type { ModerationContext } from '@vettly/shared'
473
+
474
+ const context: ModerationContext = {
475
+ userId: 'user_123',
476
+ sessionId: 'session_456',
477
+ locale: 'en-US',
478
+ region: 'US',
479
+ metadata: {
480
+ deviceType: 'mobile',
481
+ appVersion: '2.1.0',
482
+ },
483
+ }
484
+ ```
485
+
486
+ ---
487
+
26
488
  ## Links
27
489
 
28
490
  - [vettly.dev](https://vettly.dev) - Sign up
29
491
  - [docs.vettly.dev](https://docs.vettly.dev) - Documentation
492
+ - [@vettly/sdk](https://www.npmjs.com/package/@vettly/sdk) - Server-side SDK
493
+ - [@vettly/react](https://www.npmjs.com/package/@vettly/react) - React components
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vettly/shared",
3
- "version": "0.1.11",
3
+ "version": "0.1.13",
4
4
  "description": "Shared TypeScript types for Vettly decision infrastructure",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -33,7 +33,7 @@
33
33
  "license": "MIT",
34
34
  "repository": {
35
35
  "type": "git",
36
- "url": "https://github.com/brian-nextaura/vettly-docs.git",
36
+ "url": "https://github.com/nextauralabs/vettly-docs.git",
37
37
  "directory": "packages/shared"
38
38
  },
39
39
  "homepage": "https://vettly.dev",
@@ -41,7 +41,7 @@
41
41
  "access": "public"
42
42
  },
43
43
  "bugs": {
44
- "url": "https://github.com/brian-nextaura/vettly-docs/issues"
44
+ "url": "https://github.com/nextauralabs/vettly-docs/issues"
45
45
  },
46
46
  "dependencies": {
47
47
  "zod": "^3.22.4"