@tuteliq/mcp 3.5.0 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -120,6 +120,62 @@ These tools are available via the [REST API](https://docs.tuteliq.ai) and the [@
120
120
 
121
121
  ---
122
122
 
123
+ ## Common Parameters
124
+
125
+ ### Context Fields
126
+
127
+ All detection tools accept an optional `context` object. These fields influence severity scoring and classification:
128
+
129
+ | Field | Type | Description |
130
+ |-------|------|-------------|
131
+ | `language` | `string` | ISO 639-1 code (e.g., `"en"`, `"sv"`). Auto-detected if omitted. |
132
+ | `ageGroup` | `string` | Age group (e.g., `"10-12"`, `"13-15"`, `"under 18"`). Triggers age-calibrated scoring. |
133
+ | `platform` | `string` | Platform name (e.g., `"Discord"`, `"Roblox"`). Adjusts detection for platform norms. |
134
+ | `relationship` | `string` | Relationship context (e.g., `"classmates"`, `"stranger"`). |
135
+ | `sender_trust` | `string` | Sender verification status: `"verified"`, `"trusted"`, or `"unknown"`. |
136
+ | `sender_name` | `string` | Name of the sender (used with `sender_trust`). |
137
+
138
+ #### `sender_trust` Behavior
139
+
140
+ When `sender_trust` is set to `"verified"` or `"trusted"`:
141
+ - **AUTH_IMPERSONATION** is fully suppressed — a verified sender cannot be impersonating an authority
142
+ - **URGENCY_FABRICATION** is suppressed for routine time-sensitive information (schedules, deadlines, appointments)
143
+ - Content is only flagged if it contains genuinely malicious elements (credential theft, phishing links, financial demands)
144
+ - This prevents false positives on legitimate institutional messages (school notifications, hospital reminders, government advisories)
145
+
146
+ ### `support_threshold`
147
+
148
+ Controls when crisis support resources (helplines, text lines, web resources) are included in the response:
149
+
150
+ | Value | Behavior |
151
+ |-------|----------|
152
+ | `low` | Include support for Low severity and above |
153
+ | `medium` | Include support for Medium severity and above |
154
+ | `high` | **(Default)** Include support for High severity and above |
155
+ | `critical` | Include support only for Critical severity |
156
+
157
+ > **Note:** Critical severity **always** includes support resources regardless of the threshold setting.
158
+
159
+ ### `analyse_multi` Endpoint Values
160
+
161
+ The `analyse_multi` tool accepts up to 10 endpoints per call. Valid endpoint values:
162
+
163
+ | Endpoint ID | Description |
164
+ |-------------|-------------|
165
+ | `bullying` | Bullying and harassment detection |
166
+ | `grooming` | Grooming pattern detection |
167
+ | `unsafe` | Unsafe content detection (self-harm, violence, explicit material) |
168
+ | `social-engineering` | Social engineering and pretexting |
169
+ | `app-fraud` | App-based fraud patterns |
170
+ | `romance-scam` | Romance scam patterns |
171
+ | `mule-recruitment` | Money mule recruitment |
172
+ | `gambling-harm` | Gambling-related harm |
173
+ | `coercive-control` | Coercive control patterns |
174
+ | `vulnerability-exploitation` | Exploitation of vulnerable individuals |
175
+ | `radicalisation` | Radicalisation indicators |
176
+
177
+ ---
178
+
123
179
  ## Installation
124
180
 
125
181
  ### Claude Desktop (Recommended)
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":";AAEA,OAAO,EAAE,SAAS,EAAE,MAAM,yCAAyC,CAAC;AAUpE,wBAAgB,YAAY,CAAC,cAAc,CAAC,EAAE,MAAM,GAAG,SAAS,CAqB/D"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":";AAEA,OAAO,EAAE,SAAS,EAAE,MAAM,yCAAyC,CAAC;AAWpE,wBAAgB,YAAY,CAAC,cAAc,CAAC,EAAE,MAAM,GAAG,SAAS,CAsB/D"}
package/dist/src/index.js CHANGED
@@ -6,6 +6,7 @@ import { registerFraudTools } from './tools/fraud.js';
6
6
  import { registerMediaTools } from './tools/media.js';
7
7
  import { registerAnalysisTools } from './tools/analysis.js';
8
8
  import { registerAdminTools } from './tools/admin.js';
9
+ import { registerResources } from './tools/resources.js';
9
10
  import { getTransportMode, startStdio } from './transport.js';
10
11
  export function createServer(apiKeyOverride) {
11
12
  const apiKey = apiKeyOverride || process.env.TUTELIQ_API_KEY;
@@ -23,6 +24,7 @@ export function createServer(apiKeyOverride) {
23
24
  registerMediaTools(server, client);
24
25
  registerAnalysisTools(server, client);
25
26
  registerAdminTools(server, client);
27
+ registerResources(server);
26
28
  return server;
27
29
  }
28
30
  // Direct execution: stdio mode
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerResources(server: McpServer): void;
3
+ //# sourceMappingURL=resources.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"resources.d.ts","sourceRoot":"","sources":["../../../src/tools/resources.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,yCAAyC,CAAC;AAsKzE,wBAAgB,iBAAiB,CAAC,MAAM,EAAE,SAAS,GAAG,IAAI,CAezD"}
@@ -0,0 +1,162 @@
1
+ const KOSA_CATEGORIES = `# KOSA Harm Categories
2
+
3
+ Tuteliq detects content across nine categories defined by the Kids Online Safety Act (KOSA):
4
+
5
+ 1. **Self-Harm & Suicidal Ideation** — Crisis language, passive ideation, planning indicators, self-injury references
6
+ 2. **Bullying & Harassment** — Direct insults, social exclusion, intimidation, cyberstalking, identity-based attacks
7
+ 3. **Sexual Exploitation** — Explicit solicitation, sextortion, inappropriate sexual content directed at minors
8
+ 4. **Substance Use** — Promotion, solicitation, or normalization of drug/alcohol use toward minors
9
+ 5. **Eating Disorders** — Pro-anorexia/bulimia content, body dysmorphia triggers, dangerous diet promotion
10
+ 6. **Depression & Anxiety** — Persistent mood indicators, hopelessness patterns, withdrawal signals
11
+ 7. **Compulsive Usage** — Engagement manipulation, addiction-pattern reinforcement, dark patterns targeting minors
12
+ 8. **Violence** — Violent threats, glorification, graphic content, weapons promotion
13
+ 9. **Grooming** — Trust escalation, secrecy requests, isolation attempts, boundary testing, gift/reward patterns
14
+
15
+ Each detection response includes which categories were triggered, with confidence scores per category.`;
16
+ const AGE_GROUPS = `# Age Groups & Calibration
17
+
18
+ Tuteliq adjusts severity scoring based on the child's age group. Pass \`ageGroup\` (or \`age_group\`) in the \`context\` object.
19
+
20
+ | Value | Calibration |
21
+ |-------|-------------|
22
+ | \`"under 10"\` | Highest sensitivity. Almost any exposure to harmful content is flagged at elevated severity. |
23
+ | \`"10-12"\` | High sensitivity. Distinguishes normal peer friction from targeted harassment. |
24
+ | \`"13-15"\` | Moderate sensitivity. Accounts for typical teen communication while remaining alert to genuine risk. |
25
+ | \`"16-17"\` | Adjusted sensitivity. Recognizes greater autonomy while maintaining protection against grooming, exploitation, and crisis signals. |
26
+ | \`"under 18"\` | Default bracket when specific age is unknown. Uses protective defaults. |
27
+
28
+ If \`ageGroup\` is omitted, Tuteliq defaults to the most protective bracket.
29
+
30
+ The response includes an \`age_calibration\` object showing: \`applied\` (boolean), \`age_group\` (string), \`multiplier\` (number).`;
31
+ const CREDIT_COSTS = `# Credit Costs Per Endpoint
32
+
33
+ | Endpoint | Credits |
34
+ |----------|---------|
35
+ | detect_unsafe | 1 |
36
+ | detect_bullying | 1 |
37
+ | detect_grooming | 1 |
38
+ | detect_social_engineering | 1 |
39
+ | detect_app_fraud | 1 |
40
+ | detect_romance_scam | 1 |
41
+ | detect_mule_recruitment | 1 |
42
+ | detect_gambling_harm | 1 |
43
+ | detect_coercive_control | 1 |
44
+ | detect_vulnerability_exploitation | 1 |
45
+ | detect_radicalisation | 1 |
46
+ | analyse_multi | Sum of individual endpoints |
47
+ | analyze_voice | 3 |
48
+ | analyze_image | 3 |
49
+ | analyze_video | 10 |
50
+ | analyze_emotions | 1 |
51
+ | get_action_plan | 1 |
52
+ | generate_report | 2 |
53
+ | age_verification | 5 |
54
+ | identity_verification | 10 |`;
55
+ const CONTEXT_FIELDS = `# Context Fields Reference
56
+
57
+ Pass a \`context\` object with any detection tool to improve accuracy.
58
+
59
+ | Field | Type | Description |
60
+ |-------|------|-------------|
61
+ | \`ageGroup\` / \`age_group\` | string | Age bracket for calibrated scoring: \`"under 10"\`, \`"10-12"\`, \`"13-15"\`, \`"16-17"\`, \`"under 18"\` |
62
+ | \`language\` | string | ISO 639-1 code (e.g. \`"en"\`, \`"de"\`, \`"sv"\`). Auto-detected if omitted. 27 languages supported. |
63
+ | \`platform\` | string | Platform name (e.g. \`"Discord"\`, \`"Roblox"\`, \`"WhatsApp"\`). Adjusts for platform-specific norms and slang. |
64
+ | \`conversation_history\` | array | Prior messages for context-aware analysis. Each entry: \`{ sender: string, content: string }\`. Returns per-message \`message_analysis\`. |
65
+ | \`sender_trust\` | string | \`"verified"\`, \`"trusted"\`, or \`"unknown"\`. When \`"verified"\`, AUTH_IMPERSONATION is fully suppressed. |
66
+ | \`sender_name\` | string | Sender identifier. Used with \`sender_trust\` for impersonation scoring. |
67
+
68
+ ## Common Parameters (all detection tools)
69
+
70
+ | Parameter | Type | Description |
71
+ |-----------|------|-------------|
72
+ | \`content\` | string | **Required.** The text to analyze. |
73
+ | \`context\` | object | Optional context object (see above). |
74
+ | \`include_evidence\` | boolean | When \`true\`, returns flagged phrases with tactic labels and weights. |
75
+ | \`support_threshold\` | string | Minimum severity to include crisis helplines: \`"low"\`, \`"medium"\`, \`"high"\` (default), \`"critical"\`. Critical severity always includes support resources. |
76
+ | \`external_id\` | string | Your external tracking ID (echoed in response). |
77
+ | \`customer_id\` | string | Your customer identifier (echoed in response). |
78
+
79
+ ## analyse_multi Parameters
80
+
81
+ | Parameter | Type | Description |
82
+ |-----------|------|-------------|
83
+ | \`content\` | string | **Required.** Text to analyze. |
84
+ | \`endpoints\` | string[] | **Required.** Endpoint IDs to run. Valid values: \`bullying\`, \`grooming\`, \`unsafe\`, \`social-engineering\`, \`app-fraud\`, \`romance-scam\`, \`mule-recruitment\`, \`gambling-harm\`, \`coercive-control\`, \`vulnerability-exploitation\`, \`radicalisation\` |
85
+ | \`context\` | object | Optional context object. |
86
+ | \`include_evidence\` | boolean | Include evidence in each result. |
87
+ | \`support_threshold\` | string | Crisis helpline threshold. |`;
88
+ const DOCUMENTATION = `# Tuteliq MCP — Quick Reference
89
+
90
+ ## What is Tuteliq?
91
+ Tuteliq is a child safety API that detects grooming, bullying, self-harm, fraud, radicalisation, and 10+ other harms across text, voice, image, and video. Sub-400ms response times. Zero data retention. KOSA, COPPA, and DSA compliant.
92
+
93
+ ## Full Documentation
94
+ - API Reference: https://docs.tuteliq.ai/api-reference/introduction
95
+ - How It Works: https://docs.tuteliq.ai/how-it-works
96
+ - MCP Setup: https://docs.tuteliq.ai/sdks/mcp
97
+ - Language Support: https://docs.tuteliq.ai/languages
98
+ - KOSA Compliance: https://docs.tuteliq.ai/kosa-compliance
99
+
100
+ ## Detection Response Shape
101
+ All detection tools return:
102
+ - \`detected\` (boolean) — whether harmful content was found
103
+ - \`level\` (string) — \`"none"\`, \`"low"\`, \`"medium"\`, \`"high"\`, or \`"critical"\`
104
+ - \`risk_score\` (float, 0.0–1.0) — granular score for threshold automation
105
+ - \`confidence\` (float, 0.0–1.0) — model confidence
106
+ - \`categories\` (array) — triggered harm categories with tags, labels, and confidence
107
+ - \`evidence\` (array, when \`include_evidence: true\`) — flagged phrases with tactic and weight
108
+ - \`rationale\` (string) — human-readable explanation
109
+ - \`recommended_action\` (string) — suggested next step
110
+ - \`language\` (string) — resolved ISO 639-1 language code
111
+ - \`age_calibration\` (object) — age group applied, multiplier used
112
+ - \`support\` (object, when threshold met) — crisis helplines and guidance
113
+
114
+ ## Tips for Best Results
115
+ 1. Always pass \`ageGroup\` in context — it significantly affects scoring calibration
116
+ 2. Use \`include_evidence: true\` to get flagged phrases with weights for audit trails
117
+ 3. Use \`analyse_multi\` to run multiple classifiers in a single call (saves latency)
118
+ 4. For conversations, pass \`conversation_history\` to enable multi-turn pattern detection
119
+ 5. Set \`support_threshold: "low"\` to always include crisis resources in responses`;
120
+ const RESOURCES = [
121
+ {
122
+ uri: 'tuteliq://documentation',
123
+ name: 'Tuteliq Documentation',
124
+ description: 'Quick reference guide for Tuteliq MCP tools, response shapes, and usage tips',
125
+ content: DOCUMENTATION,
126
+ },
127
+ {
128
+ uri: 'tuteliq://context-fields',
129
+ name: 'Context Fields & Parameters',
130
+ description: 'Complete reference for all context fields, common parameters, and analyse_multi configuration',
131
+ content: CONTEXT_FIELDS,
132
+ },
133
+ {
134
+ uri: 'tuteliq://kosa-categories',
135
+ name: 'KOSA Harm Categories',
136
+ description: 'List of all nine KOSA harm categories with descriptions',
137
+ content: KOSA_CATEGORIES,
138
+ },
139
+ {
140
+ uri: 'tuteliq://age-groups',
141
+ name: 'Age Groups & Calibration',
142
+ description: 'Available age group brackets, their calibration, and how to use them',
143
+ content: AGE_GROUPS,
144
+ },
145
+ {
146
+ uri: 'tuteliq://credit-costs',
147
+ name: 'Credit Costs',
148
+ description: 'Per-endpoint credit costs for billing',
149
+ content: CREDIT_COSTS,
150
+ },
151
+ ];
152
+ export function registerResources(server) {
153
+ for (const res of RESOURCES) {
154
+ server.resource(res.name, res.uri, { description: res.description, mimeType: 'text/markdown' }, async () => ({
155
+ contents: [{
156
+ uri: res.uri,
157
+ mimeType: 'text/markdown',
158
+ text: res.content,
159
+ }],
160
+ }));
161
+ }
162
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tuteliq/mcp",
3
- "version": "3.5.0",
3
+ "version": "3.6.0",
4
4
  "description": "MCP server for Tuteliq — 41 AI-powered tools for child safety, fraud detection, grooming, bullying, sextortion, and content moderation. Interactive UI widgets for Claude, Cursor, and MCP-compatible AI assistants.",
5
5
  "type": "module",
6
6
  "main": "./dist/src/index.js",