@tuteliq/mcp 3.2.0 → 3.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js DELETED
@@ -1,1396 +0,0 @@
1
- #!/usr/bin/env node
2
- "use strict";
3
- Object.defineProperty(exports, "__esModule", { value: true });
4
- const index_js_1 = require("@modelcontextprotocol/sdk/server/index.js");
5
- const stdio_js_1 = require("@modelcontextprotocol/sdk/server/stdio.js");
6
- const types_js_1 = require("@modelcontextprotocol/sdk/types.js");
7
- const sdk_1 = require("@tuteliq/sdk");
8
- const fs_1 = require("fs");
9
- // Initialize Tuteliq client
10
- const apiKey = process.env.TUTELIQ_API_KEY;
11
- if (!apiKey) {
12
- console.error('Error: TUTELIQ_API_KEY environment variable is required');
13
- process.exit(1);
14
- }
15
- const client = new sdk_1.Tuteliq(apiKey);
16
- // Severity emoji mapping
17
- const severityEmoji = {
18
- low: '🟡',
19
- medium: '🟠',
20
- high: '🔴',
21
- critical: '⛔',
22
- };
23
- const riskEmoji = {
24
- safe: '✅',
25
- none: '✅',
26
- low: '🟡',
27
- medium: '🟠',
28
- high: '🔴',
29
- critical: '⛔',
30
- };
31
- const trendEmoji = {
32
- improving: '📈',
33
- stable: '➡️',
34
- worsening: '📉',
35
- };
36
- // Tool definitions
37
- const tools = [
38
- // =========================================================================
39
- // Safety Detection Tools
40
- // =========================================================================
41
- {
42
- name: 'detect_bullying',
43
- description: 'Analyze text content to detect bullying, harassment, or harmful language. Returns severity, type of bullying, confidence score, and recommended actions.',
44
- inputSchema: {
45
- type: 'object',
46
- properties: {
47
- content: {
48
- type: 'string',
49
- description: 'The text content to analyze for bullying',
50
- },
51
- context: {
52
- type: 'object',
53
- description: 'Optional context for better analysis',
54
- properties: {
55
- language: { type: 'string' },
56
- ageGroup: { type: 'string' },
57
- relationship: { type: 'string' },
58
- platform: { type: 'string' },
59
- },
60
- },
61
- },
62
- required: ['content'],
63
- },
64
- },
65
- {
66
- name: 'detect_grooming',
67
- description: 'Analyze a conversation for grooming patterns and predatory behavior. Identifies manipulation tactics, boundary violations, and isolation attempts.',
68
- inputSchema: {
69
- type: 'object',
70
- properties: {
71
- messages: {
72
- type: 'array',
73
- description: 'Array of messages in the conversation',
74
- items: {
75
- type: 'object',
76
- properties: {
77
- role: {
78
- type: 'string',
79
- enum: ['adult', 'child', 'unknown'],
80
- description: 'Role of the message sender',
81
- },
82
- content: {
83
- type: 'string',
84
- description: 'Message content',
85
- },
86
- },
87
- required: ['role', 'content'],
88
- },
89
- },
90
- childAge: {
91
- type: 'number',
92
- description: 'Age of the child in the conversation',
93
- },
94
- },
95
- required: ['messages'],
96
- },
97
- },
98
- {
99
- name: 'detect_unsafe',
100
- description: 'Detect unsafe content including self-harm, violence, drugs, explicit material, or other harmful content categories.',
101
- inputSchema: {
102
- type: 'object',
103
- properties: {
104
- content: {
105
- type: 'string',
106
- description: 'The text content to analyze for unsafe content',
107
- },
108
- context: {
109
- type: 'object',
110
- description: 'Optional context for better analysis',
111
- properties: {
112
- language: { type: 'string' },
113
- ageGroup: { type: 'string' },
114
- platform: { type: 'string' },
115
- },
116
- },
117
- },
118
- required: ['content'],
119
- },
120
- },
121
- {
122
- name: 'analyze',
123
- description: 'Quick comprehensive safety analysis that checks for both bullying and unsafe content. Best for general content screening.',
124
- inputSchema: {
125
- type: 'object',
126
- properties: {
127
- content: {
128
- type: 'string',
129
- description: 'The text content to analyze',
130
- },
131
- include: {
132
- type: 'array',
133
- items: { type: 'string', enum: ['bullying', 'unsafe'] },
134
- description: 'Which checks to run (default: both)',
135
- },
136
- },
137
- required: ['content'],
138
- },
139
- },
140
- {
141
- name: 'analyze_emotions',
142
- description: 'Analyze emotional content and mental state indicators. Identifies dominant emotions, trends, and provides follow-up recommendations.',
143
- inputSchema: {
144
- type: 'object',
145
- properties: {
146
- content: {
147
- type: 'string',
148
- description: 'The text content to analyze for emotions',
149
- },
150
- },
151
- required: ['content'],
152
- },
153
- },
154
- {
155
- name: 'get_action_plan',
156
- description: 'Generate age-appropriate guidance and action steps for handling a safety situation. Tailored for children, parents, or educators.',
157
- inputSchema: {
158
- type: 'object',
159
- properties: {
160
- situation: {
161
- type: 'string',
162
- description: 'Description of the situation needing guidance',
163
- },
164
- childAge: {
165
- type: 'number',
166
- description: 'Age of the child involved',
167
- },
168
- audience: {
169
- type: 'string',
170
- enum: ['child', 'parent', 'educator', 'platform'],
171
- description: 'Who the guidance is for (default: parent)',
172
- },
173
- severity: {
174
- type: 'string',
175
- enum: ['low', 'medium', 'high', 'critical'],
176
- description: 'Severity of the situation',
177
- },
178
- },
179
- required: ['situation'],
180
- },
181
- },
182
- {
183
- name: 'generate_report',
184
- description: 'Generate a comprehensive incident report from a conversation. Includes summary, risk level, and recommended next steps.',
185
- inputSchema: {
186
- type: 'object',
187
- properties: {
188
- messages: {
189
- type: 'array',
190
- description: 'Array of messages in the incident',
191
- items: {
192
- type: 'object',
193
- properties: {
194
- sender: { type: 'string', description: 'Name/ID of sender' },
195
- content: { type: 'string', description: 'Message content' },
196
- },
197
- required: ['sender', 'content'],
198
- },
199
- },
200
- childAge: {
201
- type: 'number',
202
- description: 'Age of the child involved',
203
- },
204
- incidentType: {
205
- type: 'string',
206
- description: 'Type of incident (e.g., bullying, grooming)',
207
- },
208
- },
209
- required: ['messages'],
210
- },
211
- },
212
- // =========================================================================
213
- // Voice & Image Analysis Tools
214
- // =========================================================================
215
- {
216
- name: 'analyze_voice',
217
- description: 'Analyze an audio file for safety concerns. Transcribes the audio via Whisper, then runs safety analysis on the transcript. Returns timestamped segments for incident reports. Supports mp3, wav, m4a, ogg, flac, webm, mp4.',
218
- inputSchema: {
219
- type: 'object',
220
- properties: {
221
- file_path: {
222
- type: 'string',
223
- description: 'Absolute path to the audio file on disk',
224
- },
225
- analysis_type: {
226
- type: 'string',
227
- enum: ['bullying', 'unsafe', 'grooming', 'emotions', 'all'],
228
- description: 'Type of analysis to run on the transcript (default: all)',
229
- },
230
- child_age: {
231
- type: 'number',
232
- description: 'Child age (used for grooming analysis)',
233
- },
234
- language: {
235
- type: 'string',
236
- description: 'Language hint for transcription (e.g., "en", "es")',
237
- },
238
- },
239
- required: ['file_path'],
240
- },
241
- },
242
- {
243
- name: 'analyze_image',
244
- description: 'Analyze an image for visual safety concerns and OCR text extraction. Uses vision AI for content classification, then runs safety analysis on any extracted text. Supports png, jpg, jpeg, gif, webp.',
245
- inputSchema: {
246
- type: 'object',
247
- properties: {
248
- file_path: {
249
- type: 'string',
250
- description: 'Absolute path to the image file on disk',
251
- },
252
- analysis_type: {
253
- type: 'string',
254
- enum: ['bullying', 'unsafe', 'emotions', 'all'],
255
- description: 'Type of analysis to run on extracted text (default: all)',
256
- },
257
- },
258
- required: ['file_path'],
259
- },
260
- },
261
- // =========================================================================
262
- // Webhook Management Tools
263
- // =========================================================================
264
- {
265
- name: 'list_webhooks',
266
- description: 'List all webhooks configured for your account.',
267
- inputSchema: {
268
- type: 'object',
269
- properties: {},
270
- required: [],
271
- },
272
- },
273
- {
274
- name: 'create_webhook',
275
- description: 'Create a new webhook endpoint. The returned secret is only shown once — store it securely for signature verification.',
276
- inputSchema: {
277
- type: 'object',
278
- properties: {
279
- name: { type: 'string', description: 'Display name for the webhook' },
280
- url: { type: 'string', description: 'HTTPS URL to receive webhook payloads' },
281
- events: {
282
- type: 'array',
283
- items: { type: 'string' },
284
- description: 'Event types to subscribe to (e.g., incident.critical, grooming.detected, unsafe.detected, bullying.detected)',
285
- },
286
- },
287
- required: ['name', 'url', 'events'],
288
- },
289
- },
290
- {
291
- name: 'update_webhook',
292
- description: 'Update an existing webhook configuration.',
293
- inputSchema: {
294
- type: 'object',
295
- properties: {
296
- id: { type: 'string', description: 'Webhook ID' },
297
- name: { type: 'string', description: 'New display name' },
298
- url: { type: 'string', description: 'New HTTPS URL' },
299
- events: { type: 'array', items: { type: 'string' }, description: 'New event subscriptions' },
300
- is_active: { type: 'boolean', description: 'Enable or disable the webhook' },
301
- },
302
- required: ['id'],
303
- },
304
- },
305
- {
306
- name: 'delete_webhook',
307
- description: 'Permanently delete a webhook.',
308
- inputSchema: {
309
- type: 'object',
310
- properties: {
311
- id: { type: 'string', description: 'Webhook ID to delete' },
312
- },
313
- required: ['id'],
314
- },
315
- },
316
- {
317
- name: 'test_webhook',
318
- description: 'Send a test payload to a webhook to verify it is working correctly.',
319
- inputSchema: {
320
- type: 'object',
321
- properties: {
322
- id: { type: 'string', description: 'Webhook ID to test' },
323
- },
324
- required: ['id'],
325
- },
326
- },
327
- {
328
- name: 'regenerate_webhook_secret',
329
- description: 'Regenerate a webhook signing secret. The old secret is immediately invalidated.',
330
- inputSchema: {
331
- type: 'object',
332
- properties: {
333
- id: { type: 'string', description: 'Webhook ID' },
334
- },
335
- required: ['id'],
336
- },
337
- },
338
- // =========================================================================
339
- // Pricing Tools
340
- // =========================================================================
341
- {
342
- name: 'get_pricing',
343
- description: 'Get available pricing plans for Tuteliq.',
344
- inputSchema: {
345
- type: 'object',
346
- properties: {},
347
- required: [],
348
- },
349
- },
350
- {
351
- name: 'get_pricing_details',
352
- description: 'Get detailed pricing plans with monthly/yearly prices, API call limits, and feature lists.',
353
- inputSchema: {
354
- type: 'object',
355
- properties: {},
356
- required: [],
357
- },
358
- },
359
- // =========================================================================
360
- // Usage & Billing Tools
361
- // =========================================================================
362
- {
363
- name: 'get_usage_history',
364
- description: 'Get daily usage history for the past N days, showing request counts per day.',
365
- inputSchema: {
366
- type: 'object',
367
- properties: {
368
- days: {
369
- type: 'number',
370
- description: 'Number of days to retrieve (1-30, default: 7)',
371
- },
372
- },
373
- required: [],
374
- },
375
- },
376
- {
377
- name: 'get_usage_by_tool',
378
- description: 'Get usage broken down by tool/endpoint for a specific date.',
379
- inputSchema: {
380
- type: 'object',
381
- properties: {
382
- date: {
383
- type: 'string',
384
- description: 'Date in YYYY-MM-DD format (default: today)',
385
- },
386
- },
387
- required: [],
388
- },
389
- },
390
- {
391
- name: 'get_usage_monthly',
392
- description: 'Get monthly usage summary including billing period, limits, rate limits, and upgrade recommendations.',
393
- inputSchema: {
394
- type: 'object',
395
- properties: {},
396
- required: [],
397
- },
398
- },
399
- // =========================================================================
400
- // GDPR Account Tools
401
- // =========================================================================
402
- {
403
- name: 'delete_account_data',
404
- description: 'Delete all account data (GDPR Article 17 — Right to Erasure). Permanently removes all stored user data.',
405
- inputSchema: {
406
- type: 'object',
407
- properties: {},
408
- required: [],
409
- },
410
- },
411
- {
412
- name: 'export_account_data',
413
- description: 'Export all account data as JSON (GDPR Article 20 — Right to Data Portability).',
414
- inputSchema: {
415
- type: 'object',
416
- properties: {},
417
- required: [],
418
- },
419
- },
420
- {
421
- name: 'record_consent',
422
- description: 'Record user consent for a specific data processing purpose (GDPR Article 7).',
423
- inputSchema: {
424
- type: 'object',
425
- properties: {
426
- consent_type: {
427
- type: 'string',
428
- enum: ['data_processing', 'analytics', 'marketing', 'third_party_sharing', 'child_safety_monitoring'],
429
- description: 'Type of consent to record',
430
- },
431
- version: {
432
- type: 'string',
433
- description: 'Policy version the user is consenting to',
434
- },
435
- },
436
- required: ['consent_type', 'version'],
437
- },
438
- },
439
- {
440
- name: 'get_consent_status',
441
- description: 'Get current consent status for all or a specific consent type (GDPR Article 7).',
442
- inputSchema: {
443
- type: 'object',
444
- properties: {
445
- type: {
446
- type: 'string',
447
- enum: ['data_processing', 'analytics', 'marketing', 'third_party_sharing', 'child_safety_monitoring'],
448
- description: 'Optional: filter by consent type',
449
- },
450
- },
451
- required: [],
452
- },
453
- },
454
- {
455
- name: 'withdraw_consent',
456
- description: 'Withdraw a previously granted consent (GDPR Article 7.3).',
457
- inputSchema: {
458
- type: 'object',
459
- properties: {
460
- type: {
461
- type: 'string',
462
- enum: ['data_processing', 'analytics', 'marketing', 'third_party_sharing', 'child_safety_monitoring'],
463
- description: 'Type of consent to withdraw',
464
- },
465
- },
466
- required: ['type'],
467
- },
468
- },
469
- {
470
- name: 'rectify_data',
471
- description: 'Rectify (correct) user data in a specific collection (GDPR Article 16 — Right to Rectification).',
472
- inputSchema: {
473
- type: 'object',
474
- properties: {
475
- collection: {
476
- type: 'string',
477
- description: 'Firestore collection name',
478
- },
479
- document_id: {
480
- type: 'string',
481
- description: 'Document ID to rectify',
482
- },
483
- fields: {
484
- type: 'object',
485
- description: 'Fields to update (only allowlisted fields accepted)',
486
- },
487
- },
488
- required: ['collection', 'document_id', 'fields'],
489
- },
490
- },
491
- {
492
- name: 'get_audit_logs',
493
- description: 'Get audit trail of all data operations (GDPR Article 15 — Right of Access).',
494
- inputSchema: {
495
- type: 'object',
496
- properties: {
497
- action: {
498
- type: 'string',
499
- enum: ['data_access', 'data_export', 'data_deletion', 'data_rectification', 'consent_granted', 'consent_withdrawn', 'breach_notification'],
500
- description: 'Optional: filter by action type',
501
- },
502
- limit: {
503
- type: 'number',
504
- description: 'Maximum number of results',
505
- },
506
- },
507
- required: [],
508
- },
509
- },
510
- // =========================================================================
511
- // Breach Management Tools
512
- // =========================================================================
513
- {
514
- name: 'log_breach',
515
- description: 'Log a new data breach (GDPR Article 33/34). Records breach details and starts the 72-hour notification clock.',
516
- inputSchema: {
517
- type: 'object',
518
- properties: {
519
- title: { type: 'string', description: 'Brief title of the breach' },
520
- description: { type: 'string', description: 'Detailed description of what happened' },
521
- severity: { type: 'string', enum: ['low', 'medium', 'high', 'critical'], description: 'Breach severity' },
522
- affected_user_ids: { type: 'array', items: { type: 'string' }, description: 'List of affected user IDs' },
523
- data_categories: { type: 'array', items: { type: 'string' }, description: 'Categories of data affected (e.g. email, name, address)' },
524
- reported_by: { type: 'string', description: 'Who reported the breach' },
525
- },
526
- required: ['title', 'description', 'severity', 'affected_user_ids', 'data_categories', 'reported_by'],
527
- },
528
- },
529
- {
530
- name: 'list_breaches',
531
- description: 'List all data breaches, optionally filtered by status.',
532
- inputSchema: {
533
- type: 'object',
534
- properties: {
535
- status: { type: 'string', enum: ['detected', 'investigating', 'contained', 'reported', 'resolved'], description: 'Filter by breach status' },
536
- limit: { type: 'number', description: 'Maximum number of results' },
537
- },
538
- required: [],
539
- },
540
- },
541
- {
542
- name: 'get_breach',
543
- description: 'Get details of a specific data breach by ID.',
544
- inputSchema: {
545
- type: 'object',
546
- properties: {
547
- id: { type: 'string', description: 'Breach ID' },
548
- },
549
- required: ['id'],
550
- },
551
- },
552
- {
553
- name: 'update_breach_status',
554
- description: 'Update a breach status and notification progress.',
555
- inputSchema: {
556
- type: 'object',
557
- properties: {
558
- id: { type: 'string', description: 'Breach ID' },
559
- status: { type: 'string', enum: ['detected', 'investigating', 'contained', 'reported', 'resolved'], description: 'New breach status' },
560
- notification_status: { type: 'string', enum: ['pending', 'users_notified', 'dpa_notified', 'completed'], description: 'Notification progress status' },
561
- notes: { type: 'string', description: 'Additional notes about the update' },
562
- },
563
- required: ['id', 'status'],
564
- },
565
- },
566
- // =========================================================================
567
- // Fraud Detection Tools
568
- // =========================================================================
569
- {
570
- name: 'detect_social_engineering',
571
- description: 'Detect social engineering tactics such as pretexting, urgency fabrication, trust exploitation, and authority impersonation in text content.',
572
- inputSchema: {
573
- type: 'object',
574
- properties: {
575
- content: { type: 'string', description: 'Text content to analyze' },
576
- context: { type: 'object', description: 'Optional analysis context' },
577
- include_evidence: { type: 'boolean', description: 'Include supporting evidence excerpts' },
578
- external_id: { type: 'string', description: 'External tracking ID' },
579
- customer_id: { type: 'string', description: 'Customer identifier' },
580
- },
581
- required: ['content'],
582
- },
583
- },
584
- {
585
- name: 'detect_app_fraud',
586
- description: 'Detect app-based fraud patterns such as fake investment platforms, phishing apps, subscription traps, and malicious download links.',
587
- inputSchema: {
588
- type: 'object',
589
- properties: {
590
- content: { type: 'string', description: 'Text content to analyze' },
591
- context: { type: 'object', description: 'Optional analysis context' },
592
- include_evidence: { type: 'boolean', description: 'Include supporting evidence excerpts' },
593
- external_id: { type: 'string', description: 'External tracking ID' },
594
- customer_id: { type: 'string', description: 'Customer identifier' },
595
- },
596
- required: ['content'],
597
- },
598
- },
599
- {
600
- name: 'detect_romance_scam',
601
- description: 'Detect romance scam patterns such as love-bombing, financial requests, identity deception, and emotional manipulation in conversations.',
602
- inputSchema: {
603
- type: 'object',
604
- properties: {
605
- content: { type: 'string', description: 'Text content to analyze' },
606
- context: { type: 'object', description: 'Optional analysis context' },
607
- include_evidence: { type: 'boolean', description: 'Include supporting evidence excerpts' },
608
- external_id: { type: 'string', description: 'External tracking ID' },
609
- customer_id: { type: 'string', description: 'Customer identifier' },
610
- },
611
- required: ['content'],
612
- },
613
- },
614
- {
615
- name: 'detect_mule_recruitment',
616
- description: 'Detect money mule recruitment tactics such as easy-money offers, bank account sharing requests, and laundering facilitation.',
617
- inputSchema: {
618
- type: 'object',
619
- properties: {
620
- content: { type: 'string', description: 'Text content to analyze' },
621
- context: { type: 'object', description: 'Optional analysis context' },
622
- include_evidence: { type: 'boolean', description: 'Include supporting evidence excerpts' },
623
- external_id: { type: 'string', description: 'External tracking ID' },
624
- customer_id: { type: 'string', description: 'Customer identifier' },
625
- },
626
- required: ['content'],
627
- },
628
- },
629
- // =========================================================================
630
- // Safety Extended Tools
631
- // =========================================================================
632
- {
633
- name: 'detect_gambling_harm',
634
- description: 'Detect gambling-related harm indicators such as chasing losses, borrowing to gamble, concealment behavior, and emotional distress from gambling.',
635
- inputSchema: {
636
- type: 'object',
637
- properties: {
638
- content: { type: 'string', description: 'Text content to analyze' },
639
- context: { type: 'object', description: 'Optional analysis context' },
640
- include_evidence: { type: 'boolean', description: 'Include supporting evidence excerpts' },
641
- external_id: { type: 'string', description: 'External tracking ID' },
642
- customer_id: { type: 'string', description: 'Customer identifier' },
643
- },
644
- required: ['content'],
645
- },
646
- },
647
- {
648
- name: 'detect_coercive_control',
649
- description: 'Detect coercive control patterns such as isolation tactics, financial control, monitoring behavior, threats, and emotional manipulation.',
650
- inputSchema: {
651
- type: 'object',
652
- properties: {
653
- content: { type: 'string', description: 'Text content to analyze' },
654
- context: { type: 'object', description: 'Optional analysis context' },
655
- include_evidence: { type: 'boolean', description: 'Include supporting evidence excerpts' },
656
- external_id: { type: 'string', description: 'External tracking ID' },
657
- customer_id: { type: 'string', description: 'Customer identifier' },
658
- },
659
- required: ['content'],
660
- },
661
- },
662
- {
663
- name: 'detect_vulnerability_exploitation',
664
- description: 'Detect exploitation of vulnerable individuals including targeting the elderly, disabled, financially distressed, or emotionally vulnerable.',
665
- inputSchema: {
666
- type: 'object',
667
- properties: {
668
- content: { type: 'string', description: 'Text content to analyze' },
669
- context: { type: 'object', description: 'Optional analysis context' },
670
- include_evidence: { type: 'boolean', description: 'Include supporting evidence excerpts' },
671
- external_id: { type: 'string', description: 'External tracking ID' },
672
- customer_id: { type: 'string', description: 'Customer identifier' },
673
- },
674
- required: ['content'],
675
- },
676
- },
677
- {
678
- name: 'detect_radicalisation',
679
- description: 'Detect radicalisation indicators such as extremist rhetoric, us-vs-them framing, calls to action, conspiracy narratives, and ideological grooming.',
680
- inputSchema: {
681
- type: 'object',
682
- properties: {
683
- content: { type: 'string', description: 'Text content to analyze' },
684
- context: { type: 'object', description: 'Optional analysis context' },
685
- include_evidence: { type: 'boolean', description: 'Include supporting evidence excerpts' },
686
- external_id: { type: 'string', description: 'External tracking ID' },
687
- customer_id: { type: 'string', description: 'Customer identifier' },
688
- },
689
- required: ['content'],
690
- },
691
- },
692
- // =========================================================================
693
- // Multi-Endpoint Analysis
694
- // =========================================================================
695
- {
696
- name: 'analyse_multi',
697
- description: 'Run multiple detection endpoints on a single piece of text. Returns individual results per endpoint plus an aggregated summary with overall risk level.',
698
- inputSchema: {
699
- type: 'object',
700
- properties: {
701
- content: { type: 'string', description: 'Text content to analyze' },
702
- endpoints: {
703
- type: 'array',
704
- items: { type: 'string' },
705
- description: 'Detection endpoints to run (e.g., social_engineering, app_fraud, romance_scam, mule_recruitment, gambling_harm, coercive_control, vulnerability_exploitation, radicalisation)',
706
- },
707
- context: { type: 'object', description: 'Optional analysis context' },
708
- include_evidence: { type: 'boolean', description: 'Include supporting evidence in individual results' },
709
- external_id: { type: 'string', description: 'External tracking ID' },
710
- customer_id: { type: 'string', description: 'Customer identifier' },
711
- },
712
- required: ['content', 'endpoints'],
713
- },
714
- },
715
- // =========================================================================
716
- // Video Analysis
717
- // =========================================================================
718
- {
719
- name: 'analyze_video',
720
- description: 'Analyze a video file for safety concerns. Extracts key frames and runs safety classification on each. Returns timestamped findings with severity scores. Supports mp4, mov, avi, webm, mkv.',
721
- inputSchema: {
722
- type: 'object',
723
- properties: {
724
- file_path: { type: 'string', description: 'Absolute path to the video file on disk' },
725
- age_group: { type: 'string', description: 'Age group for calibrated analysis (e.g., "child", "teen", "adult")' },
726
- },
727
- required: ['file_path'],
728
- },
729
- },
730
- ];
731
- // Create MCP server
732
- const server = new index_js_1.Server({
733
- name: 'tuteliq-mcp',
734
- version: '2.2.0',
735
- }, {
736
- capabilities: {
737
- tools: {},
738
- },
739
- });
740
- // List tools handler
741
- server.setRequestHandler(types_js_1.ListToolsRequestSchema, async () => {
742
- return { tools };
743
- });
744
- // Helper to extract filename from path
745
- function filenameFromPath(filePath) {
746
- return filePath.split('/').pop() || filePath;
747
- }
748
- // Format a DetectionResult as markdown
749
- function formatDetectionResult(result) {
750
- const detected = result.detected;
751
- const levelEmoji = riskEmoji[result.level] || '⚪';
752
- const label = result.endpoint
753
- .split('_')
754
- .map(w => w.charAt(0).toUpperCase() + w.slice(1))
755
- .join(' ');
756
- const header = detected
757
- ? `## ${levelEmoji} ${label} Detected`
758
- : `## ✅ No ${label} Detected`;
759
- const categories = result.categories.length > 0
760
- ? `**Categories:** ${result.categories.map(c => c.tag).join(', ')}`
761
- : '';
762
- const evidence = result.evidence && result.evidence.length > 0
763
- ? `### Evidence\n${result.evidence.map(e => `- _"${e.text}"_ — **${e.tactic}** (weight: ${e.weight.toFixed(2)})`).join('\n')}`
764
- : '';
765
- const calibration = result.age_calibration?.applied
766
- ? `**Age Calibration:** ${result.age_calibration.age_group} (${result.age_calibration.multiplier}x)`
767
- : '';
768
- return `${header}
769
-
770
- **Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
771
- **Level:** ${result.level}
772
- **Confidence:** ${(result.confidence * 100).toFixed(0)}%
773
- ${categories}
774
-
775
- ### Rationale
776
- ${result.rationale}
777
-
778
- ### Recommended Action
779
- \`${result.recommended_action}\`
780
-
781
- ${evidence}
782
- ${calibration}`.trim();
783
- }
784
- // Format an AnalyseMultiResult as markdown
785
- function formatMultiResult(result) {
786
- const s = result.summary;
787
- const overallEmoji = riskEmoji[s.overall_risk_level] || '⚪';
788
- const summarySection = `## Multi-Endpoint Analysis
789
-
790
- **Overall Risk:** ${overallEmoji} ${s.overall_risk_level}
791
- **Endpoints Analyzed:** ${s.total_endpoints}
792
- **Threats Detected:** ${s.detected_count}
793
- **Highest Risk:** ${s.highest_risk.endpoint} (${(s.highest_risk.risk_score * 100).toFixed(0)}%)
794
- ${result.cross_endpoint_modifier ? `**Cross-Endpoint Modifier:** ${result.cross_endpoint_modifier.toFixed(2)}x` : ''}`;
795
- const perEndpoint = result.results
796
- .map(r => {
797
- const emoji = r.detected ? (riskEmoji[r.level] || '⚪') : '✅';
798
- return `### ${emoji} ${r.endpoint}
799
- **Detected:** ${r.detected ? 'Yes' : 'No'} | **Risk:** ${(r.risk_score * 100).toFixed(0)}% | **Level:** ${r.level}
800
- ${r.categories.length > 0 ? `**Categories:** ${r.categories.map(c => c.tag).join(', ')}` : ''}
801
- ${r.rationale}`;
802
- })
803
- .join('\n\n');
804
- return `${summarySection}
805
-
806
- ---
807
-
808
- ${perEndpoint}`;
809
- }
810
- // Format a VideoAnalysisResult as markdown
811
- function formatVideoResult(result) {
812
- const emoji = severityEmoji[result.overall_severity] || '✅';
813
- const findingsSection = result.safety_findings.length > 0
814
- ? result.safety_findings
815
- .map(f => {
816
- const fEmoji = severityEmoji[f.severity <= 0.3 ? 'low' : f.severity <= 0.6 ? 'medium' : f.severity <= 0.85 ? 'high' : 'critical'] || '⚪';
817
- return `- \`${f.timestamp.toFixed(1)}s\` (frame ${f.frame_index}) ${fEmoji} ${f.description}\n Categories: ${f.categories.join(', ')} | Severity: ${(f.severity * 100).toFixed(0)}%`;
818
- })
819
- .join('\n')
820
- : '_No safety findings._';
821
- return `## 🎬 Video Analysis
822
-
823
- **Overall Severity:** ${emoji} ${result.overall_severity}
824
- **Overall Risk Score:** ${(result.overall_risk_score * 100).toFixed(0)}%
825
- **Frames Analyzed:** ${result.frames_analyzed}
826
-
827
- ### Safety Findings
828
- ${findingsSection}`;
829
- }
830
- // Call tool handler
831
- server.setRequestHandler(types_js_1.CallToolRequestSchema, async (request) => {
832
- const { name, arguments: args = {} } = request.params;
833
- try {
834
- switch (name) {
835
- // =====================================================================
836
- // Safety Detection
837
- // =====================================================================
838
- case 'detect_bullying': {
839
- const result = await client.detectBullying({
840
- content: args.content,
841
- context: args.context,
842
- });
843
- const emoji = severityEmoji[result.severity] || '⚪';
844
- const response = `## ${result.is_bullying ? '⚠️ Bullying Detected' : '✅ No Bullying Detected'}
845
-
846
- **Severity:** ${emoji} ${result.severity.charAt(0).toUpperCase() + result.severity.slice(1)}
847
- **Confidence:** ${(result.confidence * 100).toFixed(0)}%
848
- **Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
849
-
850
- ${result.is_bullying ? `**Types:** ${result.bullying_type.join(', ')}` : ''}
851
-
852
- ### Rationale
853
- ${result.rationale}
854
-
855
- ### Recommended Action
856
- \`${result.recommended_action}\``;
857
- return { content: [{ type: 'text', text: response }] };
858
- }
859
- case 'detect_grooming': {
860
- const messages = args.messages.map((m) => ({
861
- role: m.role,
862
- content: m.content,
863
- }));
864
- const result = await client.detectGrooming({
865
- messages,
866
- childAge: args.childAge,
867
- });
868
- const emoji = riskEmoji[result.grooming_risk] || '⚪';
869
- const response = `## ${result.grooming_risk === 'none' ? '✅ No Grooming Detected' : '⚠️ Grooming Risk Detected'}
870
-
871
- **Risk Level:** ${emoji} ${result.grooming_risk.charAt(0).toUpperCase() + result.grooming_risk.slice(1)}
872
- **Confidence:** ${(result.confidence * 100).toFixed(0)}%
873
- **Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
874
-
875
- ${result.flags.length > 0 ? `**Warning Flags:**\n${result.flags.map(f => `- 🚩 ${f}`).join('\n')}` : ''}
876
-
877
- ### Rationale
878
- ${result.rationale}
879
-
880
- ### Recommended Action
881
- \`${result.recommended_action}\``;
882
- return { content: [{ type: 'text', text: response }] };
883
- }
884
- case 'detect_unsafe': {
885
- const result = await client.detectUnsafe({
886
- content: args.content,
887
- context: args.context,
888
- });
889
- const emoji = severityEmoji[result.severity] || '⚪';
890
- const response = `## ${result.unsafe ? '⚠️ Unsafe Content Detected' : '✅ Content is Safe'}
891
-
892
- **Severity:** ${emoji} ${result.severity.charAt(0).toUpperCase() + result.severity.slice(1)}
893
- **Confidence:** ${(result.confidence * 100).toFixed(0)}%
894
- **Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
895
-
896
- ${result.unsafe ? `**Categories:**\n${result.categories.map(c => `- ⚠️ ${c}`).join('\n')}` : ''}
897
-
898
- ### Rationale
899
- ${result.rationale}
900
-
901
- ### Recommended Action
902
- \`${result.recommended_action}\``;
903
- return { content: [{ type: 'text', text: response }] };
904
- }
905
- case 'analyze': {
906
- const result = await client.analyze({
907
- content: args.content,
908
- include: args.include,
909
- });
910
- const emoji = riskEmoji[result.risk_level] || '⚪';
911
- const response = `## Safety Analysis Results
912
-
913
- **Overall Risk:** ${emoji} ${result.risk_level.charAt(0).toUpperCase() + result.risk_level.slice(1)}
914
- **Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
915
-
916
- ### Summary
917
- ${result.summary}
918
-
919
- ### Recommended Action
920
- \`${result.recommended_action}\`
921
-
922
- ---
923
- ${result.bullying ? `
924
- **Bullying Check:** ${result.bullying.is_bullying ? '⚠️ Detected' : '✅ Clear'}
925
- ` : ''}${result.unsafe ? `
926
- **Unsafe Content:** ${result.unsafe.unsafe ? '⚠️ Detected' : '✅ Clear'}
927
- ` : ''}`;
928
- return { content: [{ type: 'text', text: response }] };
929
- }
930
- case 'analyze_emotions': {
931
- const result = await client.analyzeEmotions({
932
- content: args.content,
933
- });
934
- const emoji = trendEmoji[result.trend] || '➡️';
935
- const emotionScoresList = Object.entries(result.emotion_scores)
936
- .sort((a, b) => b[1] - a[1])
937
- .map(([emotion, score]) => `- ${emotion}: ${(score * 100).toFixed(0)}%`)
938
- .join('\n');
939
- const response = `## Emotion Analysis
940
-
941
- **Dominant Emotions:** ${result.dominant_emotions.join(', ')}
942
- **Trend:** ${emoji} ${result.trend.charAt(0).toUpperCase() + result.trend.slice(1)}
943
-
944
- ### Emotion Scores
945
- ${emotionScoresList}
946
-
947
- ### Summary
948
- ${result.summary}
949
-
950
- ### Recommended Follow-up
951
- ${result.recommended_followup}`;
952
- return { content: [{ type: 'text', text: response }] };
953
- }
954
- case 'get_action_plan': {
955
- const result = await client.getActionPlan({
956
- situation: args.situation,
957
- childAge: args.childAge,
958
- audience: args.audience,
959
- severity: args.severity,
960
- });
961
- const response = `## Action Plan
962
-
963
- **Audience:** ${result.audience}
964
- **Tone:** ${result.tone}
965
- ${result.reading_level ? `**Reading Level:** ${result.reading_level}` : ''}
966
-
967
- ### Steps
968
- ${result.steps.map((step, i) => `${i + 1}. ${step}`).join('\n')}`;
969
- return { content: [{ type: 'text', text: response }] };
970
- }
971
- case 'generate_report': {
972
- const messages = args.messages.map((m) => ({
973
- sender: m.sender,
974
- content: m.content,
975
- }));
976
- const result = await client.generateReport({
977
- messages,
978
- childAge: args.childAge,
979
- incident: args.incidentType ? { type: args.incidentType } : undefined,
980
- });
981
- const emoji = riskEmoji[result.risk_level] || '⚪';
982
- const response = `## 📋 Incident Report
983
-
984
- **Risk Level:** ${emoji} ${result.risk_level.charAt(0).toUpperCase() + result.risk_level.slice(1)}
985
-
986
- ### Summary
987
- ${result.summary}
988
-
989
- ### Categories
990
- ${result.categories.map(c => `- ${c}`).join('\n')}
991
-
992
- ### Recommended Next Steps
993
- ${result.recommended_next_steps.map((step, i) => `${i + 1}. ${step}`).join('\n')}`;
994
- return { content: [{ type: 'text', text: response }] };
995
- }
996
- // =====================================================================
997
- // Voice & Image Analysis
998
- // =====================================================================
999
- case 'analyze_voice': {
1000
- const filePath = args.file_path;
1001
- const buffer = (0, fs_1.readFileSync)(filePath);
1002
- const filename = filenameFromPath(filePath);
1003
- const result = await client.analyzeVoice({
1004
- file: buffer,
1005
- filename,
1006
- analysisType: args.analysis_type || 'all',
1007
- language: args.language,
1008
- childAge: args.child_age,
1009
- });
1010
- const emoji = severityEmoji[result.overall_severity] || '✅';
1011
- const segmentLines = result.transcription.segments
1012
- .slice(0, 20)
1013
- .map(s => `\`${s.start.toFixed(1)}s–${s.end.toFixed(1)}s\` ${s.text}`)
1014
- .join('\n');
1015
- const analysisLines = [];
1016
- if (result.analysis.bullying) {
1017
- analysisLines.push(`**Bullying:** ${result.analysis.bullying.is_bullying ? '⚠️ Detected' : '✅ Clear'} (${(result.analysis.bullying.risk_score * 100).toFixed(0)}%)`);
1018
- }
1019
- if (result.analysis.unsafe) {
1020
- analysisLines.push(`**Unsafe:** ${result.analysis.unsafe.unsafe ? '⚠️ Detected' : '✅ Clear'} (${(result.analysis.unsafe.risk_score * 100).toFixed(0)}%)`);
1021
- }
1022
- if (result.analysis.grooming) {
1023
- analysisLines.push(`**Grooming:** ${result.analysis.grooming.grooming_risk !== 'none' ? '⚠️ ' + result.analysis.grooming.grooming_risk : '✅ Clear'} (${(result.analysis.grooming.risk_score * 100).toFixed(0)}%)`);
1024
- }
1025
- if (result.analysis.emotions) {
1026
- analysisLines.push(`**Emotions:** ${result.analysis.emotions.dominant_emotions.join(', ')} (${trendEmoji[result.analysis.emotions.trend] || ''} ${result.analysis.emotions.trend})`);
1027
- }
1028
- const response = `## 🎙️ Voice Analysis
1029
-
1030
- **Overall Severity:** ${emoji} ${result.overall_severity}
1031
- **Overall Risk Score:** ${(result.overall_risk_score * 100).toFixed(0)}%
1032
- **Language:** ${result.transcription.language}
1033
- **Duration:** ${result.transcription.duration.toFixed(1)}s
1034
-
1035
- ### Transcript
1036
- ${result.transcription.text}
1037
-
1038
- ### Timestamped Segments
1039
- ${segmentLines}${result.transcription.segments.length > 20 ? `\n_...and ${result.transcription.segments.length - 20} more segments_` : ''}
1040
-
1041
- ### Analysis Results
1042
- ${analysisLines.join('\n')}`;
1043
- return { content: [{ type: 'text', text: response }] };
1044
- }
1045
- case 'analyze_image': {
1046
- const filePath = args.file_path;
1047
- const buffer = (0, fs_1.readFileSync)(filePath);
1048
- const filename = filenameFromPath(filePath);
1049
- const result = await client.analyzeImage({
1050
- file: buffer,
1051
- filename,
1052
- analysisType: args.analysis_type || 'all',
1053
- });
1054
- const emoji = severityEmoji[result.overall_severity] || '✅';
1055
- const textAnalysisLines = [];
1056
- if (result.text_analysis?.bullying) {
1057
- textAnalysisLines.push(`**Bullying:** ${result.text_analysis.bullying.is_bullying ? '⚠️ Detected' : '✅ Clear'} (${(result.text_analysis.bullying.risk_score * 100).toFixed(0)}%)`);
1058
- }
1059
- if (result.text_analysis?.unsafe) {
1060
- textAnalysisLines.push(`**Unsafe:** ${result.text_analysis.unsafe.unsafe ? '⚠️ Detected' : '✅ Clear'} (${(result.text_analysis.unsafe.risk_score * 100).toFixed(0)}%)`);
1061
- }
1062
- if (result.text_analysis?.emotions) {
1063
- textAnalysisLines.push(`**Emotions:** ${result.text_analysis.emotions.dominant_emotions.join(', ')}`);
1064
- }
1065
- const response = `## 🖼️ Image Analysis
1066
-
1067
- **Overall Severity:** ${emoji} ${result.overall_severity}
1068
- **Overall Risk Score:** ${(result.overall_risk_score * 100).toFixed(0)}%
1069
-
1070
- ### Vision Results
1071
- **Description:** ${result.vision.visual_description}
1072
- **Visual Severity:** ${severityEmoji[result.vision.visual_severity] || '✅'} ${result.vision.visual_severity}
1073
- **Visual Confidence:** ${(result.vision.visual_confidence * 100).toFixed(0)}%
1074
- **Contains Text:** ${result.vision.contains_text ? 'Yes' : 'No'}
1075
- **Contains Faces:** ${result.vision.contains_faces ? 'Yes' : 'No'}
1076
- ${result.vision.visual_categories.length > 0 ? `**Visual Categories:** ${result.vision.visual_categories.join(', ')}` : ''}
1077
-
1078
- ${result.vision.extracted_text ? `### Extracted Text (OCR)\n${result.vision.extracted_text}` : ''}
1079
-
1080
- ${textAnalysisLines.length > 0 ? `### Text Analysis Results\n${textAnalysisLines.join('\n')}` : ''}`;
1081
- return { content: [{ type: 'text', text: response }] };
1082
- }
1083
- // =====================================================================
1084
- // Webhook Management
1085
- // =====================================================================
1086
- case 'list_webhooks': {
1087
- const result = await client.listWebhooks();
1088
- if (result.webhooks.length === 0) {
1089
- return { content: [{ type: 'text', text: 'No webhooks configured.' }] };
1090
- }
1091
- const lines = result.webhooks.map(w => `- ${w.is_active ? '🟢' : '⚪'} **${w.name}** — \`${w.url}\`\n Events: ${w.events.join(', ')} _(${w.id})_`).join('\n');
1092
- return { content: [{ type: 'text', text: `## Webhooks\n\n${lines}` }] };
1093
- }
1094
- case 'create_webhook': {
1095
- const result = await client.createWebhook({
1096
- name: args.name,
1097
- url: args.url,
1098
- events: args.events,
1099
- });
1100
- return { content: [{ type: 'text', text: `## ✅ Webhook Created\n\n**ID:** ${result.id}\n**Name:** ${result.name}\n**URL:** ${result.url}\n**Events:** ${result.events.join(', ')}\n\n⚠️ **Secret (save this — shown only once):**\n\`${result.secret}\`` }] };
1101
- }
1102
- case 'update_webhook': {
1103
- const result = await client.updateWebhook(args.id, {
1104
- name: args.name,
1105
- url: args.url,
1106
- events: args.events,
1107
- isActive: args.is_active,
1108
- });
1109
- return { content: [{ type: 'text', text: `## ✅ Webhook Updated\n\n**ID:** ${result.id}\n**Name:** ${result.name}\n**Active:** ${result.is_active ? '🟢 Yes' : '⚪ No'}` }] };
1110
- }
1111
- case 'delete_webhook': {
1112
- await client.deleteWebhook(args.id);
1113
- return { content: [{ type: 'text', text: `## ✅ Webhook Deleted\n\nWebhook \`${args.id}\` has been permanently deleted.` }] };
1114
- }
1115
- case 'test_webhook': {
1116
- const result = await client.testWebhook(args.id);
1117
- return { content: [{ type: 'text', text: `## ${result.success ? '✅' : '❌'} Webhook Test\n\n**Success:** ${result.success}\n**Status Code:** ${result.status_code}\n**Latency:** ${result.latency_ms}ms${result.error ? `\n**Error:** ${result.error}` : ''}` }] };
1118
- }
1119
- case 'regenerate_webhook_secret': {
1120
- const result = await client.regenerateWebhookSecret(args.id);
1121
- return { content: [{ type: 'text', text: `## ✅ Secret Regenerated\n\nThe old secret has been invalidated.\n\n⚠️ **New Secret (save this — shown only once):**\n\`${result.secret}\`` }] };
1122
- }
1123
- // =====================================================================
1124
- // Pricing
1125
- // =====================================================================
1126
- case 'get_pricing': {
1127
- const result = await client.getPricing();
1128
- const lines = result.plans.map(p => `### ${p.name}\n**Price:** ${p.price}\n${p.features.map(f => `- ${f}`).join('\n')}`).join('\n\n');
1129
- return { content: [{ type: 'text', text: `## Tuteliq Pricing\n\n${lines}` }] };
1130
- }
1131
- case 'get_pricing_details': {
1132
- const result = await client.getPricingDetails();
1133
- const lines = result.plans.map(p => `### ${p.name}\n**Monthly:** ${p.price_monthly}/mo | **Yearly:** ${p.price_yearly}/mo\n**API Calls:** ${p.api_calls_per_month}/mo | **Rate Limit:** ${p.rate_limit}/min\n${p.features.map(f => `- ${f}`).join('\n')}`).join('\n\n');
1134
- return { content: [{ type: 'text', text: `## Tuteliq Pricing Details\n\n${lines}` }] };
1135
- }
1136
- // =====================================================================
1137
- // Usage & Billing
1138
- // =====================================================================
1139
- case 'get_usage_history': {
1140
- const result = await client.getUsageHistory(args.days);
1141
- if (result.days.length === 0) {
1142
- return { content: [{ type: 'text', text: 'No usage data available.' }] };
1143
- }
1144
- const lines = result.days.map(d => `| ${d.date} | ${d.total_requests} | ${d.success_requests} | ${d.error_requests} |`).join('\n');
1145
- return { content: [{ type: 'text', text: `## Usage History\n\n| Date | Total | Success | Errors |\n|------|-------|---------|--------|\n${lines}` }] };
1146
- }
1147
- case 'get_usage_by_tool': {
1148
- const result = await client.getUsageByTool(args.date);
1149
- const toolLines = Object.entries(result.tools).map(([tool, count]) => `- **${tool}:** ${count}`).join('\n');
1150
- const endpointLines = Object.entries(result.endpoints).map(([ep, count]) => `- **${ep}:** ${count}`).join('\n');
1151
- return { content: [{ type: 'text', text: `## Usage by Tool — ${result.date}\n\n### By Tool\n${toolLines || '_No data_'}\n\n### By Endpoint\n${endpointLines || '_No data_'}` }] };
1152
- }
1153
- case 'get_usage_monthly': {
1154
- const result = await client.getUsageMonthly();
1155
- const response = `## Monthly Usage
1156
-
1157
- **Tier:** ${result.tier_display_name}
1158
- **Billing Period:** ${result.billing.current_period_start} → ${result.billing.current_period_end} (${result.billing.days_remaining} days left)
1159
-
1160
- ### Usage
1161
- **Used:** ${result.usage.used} / ${result.usage.limit} (${result.usage.percent_used.toFixed(1)}%)
1162
- **Remaining:** ${result.usage.remaining}
1163
- **Rate Limit:** ${result.rate_limit.requests_per_minute}/min
1164
-
1165
- ${result.recommendations ? `### Recommendation\n${result.recommendations.reason}\n**Suggested Tier:** ${result.recommendations.suggested_tier}\n[Upgrade](${result.recommendations.upgrade_url})` : ''}`;
1166
- return { content: [{ type: 'text', text: response }] };
1167
- }
1168
- // =====================================================================
1169
- // GDPR Account
1170
- // =====================================================================
1171
- case 'delete_account_data': {
1172
- const result = await client.deleteAccountData();
1173
- return { content: [{ type: 'text', text: `## ✅ Account Data Deleted\n\n**Message:** ${result.message}\n**Records Deleted:** ${result.deleted_count}` }] };
1174
- }
1175
- case 'export_account_data': {
1176
- const result = await client.exportAccountData();
1177
- const collections = Object.keys(result.data).join(', ');
1178
- return { content: [{ type: 'text', text: `## 📦 Account Data Export\n\n**User ID:** ${result.userId}\n**Exported At:** ${result.exportedAt}\n**Collections:** ${collections}\n\n\`\`\`json\n${JSON.stringify(result.data, null, 2).slice(0, 5000)}\n\`\`\`` }] };
1179
- }
1180
- case 'record_consent': {
1181
- const result = await client.recordConsent({
1182
- consent_type: args.consent_type,
1183
- version: args.version,
1184
- });
1185
- return { content: [{ type: 'text', text: `## ✅ Consent Recorded\n\n**Type:** ${result.consent.consent_type}\n**Status:** ${result.consent.status}\n**Version:** ${result.consent.version}` }] };
1186
- }
1187
- case 'get_consent_status': {
1188
- const result = await client.getConsentStatus(args.type);
1189
- if (result.consents.length === 0) {
1190
- return { content: [{ type: 'text', text: 'No consent records found.' }] };
1191
- }
1192
- const lines = result.consents.map(c => `- **${c.consent_type}**: ${c.status === 'granted' ? '✅' : '❌'} ${c.status} (v${c.version})`).join('\n');
1193
- return { content: [{ type: 'text', text: `## Consent Status\n\n${lines}` }] };
1194
- }
1195
- case 'withdraw_consent': {
1196
- const result = await client.withdrawConsent(args.type);
1197
- return { content: [{ type: 'text', text: `## ⚠️ Consent Withdrawn\n\n**Type:** ${result.consent.consent_type}\n**Status:** ${result.consent.status}` }] };
1198
- }
1199
- case 'rectify_data': {
1200
- const result = await client.rectifyData({
1201
- collection: args.collection,
1202
- document_id: args.document_id,
1203
- fields: args.fields,
1204
- });
1205
- return { content: [{ type: 'text', text: `## ✅ Data Rectified\n\n**Message:** ${result.message}\n**Updated Fields:** ${result.updated_fields.join(', ')}` }] };
1206
- }
1207
- case 'get_audit_logs': {
1208
- const result = await client.getAuditLogs({
1209
- action: args.action,
1210
- limit: args.limit,
1211
- });
1212
- if (result.audit_logs.length === 0) {
1213
- return { content: [{ type: 'text', text: 'No audit logs found.' }] };
1214
- }
1215
- const logLines = result.audit_logs.map(l => `- \`${l.created_at}\` **${l.action}** _(${l.id})_`).join('\n');
1216
- return { content: [{ type: 'text', text: `## 📋 Audit Logs\n\n${logLines}` }] };
1217
- }
1218
- // =====================================================================
1219
- // Breach Management
1220
- // =====================================================================
1221
- case 'log_breach': {
1222
- const result = await client.logBreach({
1223
- title: args.title,
1224
- description: args.description,
1225
- severity: args.severity,
1226
- affected_user_ids: args.affected_user_ids,
1227
- data_categories: args.data_categories,
1228
- reported_by: args.reported_by,
1229
- });
1230
- const b = result.breach;
1231
- return { content: [{ type: 'text', text: `## ⚠️ Breach Logged\n\n**ID:** ${b.id}\n**Title:** ${b.title}\n**Severity:** ${severityEmoji[b.severity] || '⚪'} ${b.severity}\n**Status:** ${b.status}\n**Notification Deadline:** ${b.notification_deadline}\n**Affected Users:** ${b.affected_user_ids.length}\n**Data Categories:** ${b.data_categories.join(', ')}` }] };
1232
- }
1233
- case 'list_breaches': {
1234
- const result = await client.listBreaches({
1235
- status: args.status,
1236
- limit: args.limit,
1237
- });
1238
- if (result.breaches.length === 0) {
1239
- return { content: [{ type: 'text', text: 'No breaches found.' }] };
1240
- }
1241
- const breachLines = result.breaches.map(b => `- ${severityEmoji[b.severity] || '⚪'} **${b.title}** — ${b.status} _(${b.id})_`).join('\n');
1242
- return { content: [{ type: 'text', text: `## Data Breaches\n\n${breachLines}` }] };
1243
- }
1244
- case 'get_breach': {
1245
- const result = await client.getBreach(args.id);
1246
- const b = result.breach;
1247
- return { content: [{ type: 'text', text: `## Breach Details\n\n**ID:** ${b.id}\n**Title:** ${b.title}\n**Severity:** ${severityEmoji[b.severity] || '⚪'} ${b.severity}\n**Status:** ${b.status}\n**Notification:** ${b.notification_status}\n**Reported By:** ${b.reported_by}\n**Deadline:** ${b.notification_deadline}\n**Created:** ${b.created_at}\n**Updated:** ${b.updated_at}\n\n### Description\n${b.description}\n\n**Affected Users:** ${b.affected_user_ids.join(', ')}\n**Data Categories:** ${b.data_categories.join(', ')}` }] };
1248
- }
1249
- case 'update_breach_status': {
1250
- const result = await client.updateBreachStatus(args.id, {
1251
- status: args.status,
1252
- notification_status: args.notification_status,
1253
- notes: args.notes,
1254
- });
1255
- const b = result.breach;
1256
- return { content: [{ type: 'text', text: `## ✅ Breach Updated\n\n**ID:** ${b.id}\n**Status:** ${b.status}\n**Notification:** ${b.notification_status}` }] };
1257
- }
1258
- // =====================================================================
1259
- // Fraud Detection
1260
- // =====================================================================
1261
- case 'detect_social_engineering': {
1262
- const result = await client.detectSocialEngineering({
1263
- content: args.content,
1264
- context: args.context,
1265
- includeEvidence: args.include_evidence,
1266
- external_id: args.external_id,
1267
- customer_id: args.customer_id,
1268
- });
1269
- return { content: [{ type: 'text', text: formatDetectionResult(result) }] };
1270
- }
1271
- case 'detect_app_fraud': {
1272
- const result = await client.detectAppFraud({
1273
- content: args.content,
1274
- context: args.context,
1275
- includeEvidence: args.include_evidence,
1276
- external_id: args.external_id,
1277
- customer_id: args.customer_id,
1278
- });
1279
- return { content: [{ type: 'text', text: formatDetectionResult(result) }] };
1280
- }
1281
- case 'detect_romance_scam': {
1282
- const result = await client.detectRomanceScam({
1283
- content: args.content,
1284
- context: args.context,
1285
- includeEvidence: args.include_evidence,
1286
- external_id: args.external_id,
1287
- customer_id: args.customer_id,
1288
- });
1289
- return { content: [{ type: 'text', text: formatDetectionResult(result) }] };
1290
- }
1291
- case 'detect_mule_recruitment': {
1292
- const result = await client.detectMuleRecruitment({
1293
- content: args.content,
1294
- context: args.context,
1295
- includeEvidence: args.include_evidence,
1296
- external_id: args.external_id,
1297
- customer_id: args.customer_id,
1298
- });
1299
- return { content: [{ type: 'text', text: formatDetectionResult(result) }] };
1300
- }
1301
- // =====================================================================
1302
- // Safety Extended
1303
- // =====================================================================
1304
- case 'detect_gambling_harm': {
1305
- const result = await client.detectGamblingHarm({
1306
- content: args.content,
1307
- context: args.context,
1308
- includeEvidence: args.include_evidence,
1309
- external_id: args.external_id,
1310
- customer_id: args.customer_id,
1311
- });
1312
- return { content: [{ type: 'text', text: formatDetectionResult(result) }] };
1313
- }
1314
- case 'detect_coercive_control': {
1315
- const result = await client.detectCoerciveControl({
1316
- content: args.content,
1317
- context: args.context,
1318
- includeEvidence: args.include_evidence,
1319
- external_id: args.external_id,
1320
- customer_id: args.customer_id,
1321
- });
1322
- return { content: [{ type: 'text', text: formatDetectionResult(result) }] };
1323
- }
1324
- case 'detect_vulnerability_exploitation': {
1325
- const result = await client.detectVulnerabilityExploitation({
1326
- content: args.content,
1327
- context: args.context,
1328
- includeEvidence: args.include_evidence,
1329
- external_id: args.external_id,
1330
- customer_id: args.customer_id,
1331
- });
1332
- return { content: [{ type: 'text', text: formatDetectionResult(result) }] };
1333
- }
1334
- case 'detect_radicalisation': {
1335
- const result = await client.detectRadicalisation({
1336
- content: args.content,
1337
- context: args.context,
1338
- includeEvidence: args.include_evidence,
1339
- external_id: args.external_id,
1340
- customer_id: args.customer_id,
1341
- });
1342
- return { content: [{ type: 'text', text: formatDetectionResult(result) }] };
1343
- }
1344
- // =====================================================================
1345
- // Multi-Endpoint Analysis
1346
- // =====================================================================
1347
- case 'analyse_multi': {
1348
- const result = await client.analyseMulti({
1349
- content: args.content,
1350
- detections: args.endpoints,
1351
- context: args.context,
1352
- includeEvidence: args.include_evidence,
1353
- external_id: args.external_id,
1354
- customer_id: args.customer_id,
1355
- });
1356
- return { content: [{ type: 'text', text: formatMultiResult(result) }] };
1357
- }
1358
- // =====================================================================
1359
- // Video Analysis
1360
- // =====================================================================
1361
- case 'analyze_video': {
1362
- const filePath = args.file_path;
1363
- const buffer = (0, fs_1.readFileSync)(filePath);
1364
- const filename = filenameFromPath(filePath);
1365
- const result = await client.analyzeVideo({
1366
- file: buffer,
1367
- filename,
1368
- ageGroup: args.age_group,
1369
- });
1370
- return { content: [{ type: 'text', text: formatVideoResult(result) }] };
1371
- }
1372
- default:
1373
- return {
1374
- content: [{ type: 'text', text: `Unknown tool: ${name}` }],
1375
- isError: true,
1376
- };
1377
- }
1378
- }
1379
- catch (error) {
1380
- const message = error instanceof Error ? error.message : 'Unknown error';
1381
- return {
1382
- content: [{ type: 'text', text: `Error: ${message}` }],
1383
- isError: true,
1384
- };
1385
- }
1386
- });
1387
- // Start server
1388
- async function main() {
1389
- const transport = new stdio_js_1.StdioServerTransport();
1390
- await server.connect(transport);
1391
- console.error('Tuteliq MCP server running on stdio');
1392
- }
1393
- main().catch((error) => {
1394
- console.error('Fatal error:', error);
1395
- process.exit(1);
1396
- });