@csgaglobal/explainability-engine 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +8 -0
- package/README.md +48 -0
- package/dist/index.js +44 -0
- package/dist/tools/explainability-engine-compliance.js +86 -0
- package/package.json +46 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
Creative Commons Legal Code
|
|
2
|
+
|
|
3
|
+
CC0 1.0 Universal
|
|
4
|
+
|
|
5
|
+
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
|
|
6
|
+
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
|
|
7
|
+
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
|
|
8
|
+
INFORMATION ON AN "AS-IS" BASIS.
|
package/README.md
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# @csgaglobal/explainability-engine
|
|
2
|
+
|
|
3
|
+
AI decision explainability and interpretability tools. Provides SHAP, LIME, counterfactual explanations, and stakeholder-appropriate explanation generation.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npx @csgaglobal/explainability-engine
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## MCP Configuration
|
|
12
|
+
|
|
13
|
+
```json
|
|
14
|
+
{
|
|
15
|
+
"mcpServers": {
|
|
16
|
+
"explainability-engine": {
|
|
17
|
+
"command": "npx",
|
|
18
|
+
"args": ["-y", "@csgaglobal/explainability-engine"]
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## Tool: explainability_manage
|
|
25
|
+
|
|
26
|
+
Manage AI explainability and interpretability
|
|
27
|
+
|
|
28
|
+
### Parameters
|
|
29
|
+
|
|
30
|
+
- **system_name**: AI system to explain
|
|
31
|
+
- **operation**: Operation (explain-decision, feature-importance, counterfactual, model-global, generate-report)
|
|
32
|
+
- **explanation_audience**: Audience (technical, business, affected-individual, regulator, auditor)
|
|
33
|
+
- **explanation_method**: Method (SHAP, LIME, counterfactual, attention, rule-extraction)
|
|
34
|
+
- **jurisdiction**: Operating jurisdiction or region
|
|
35
|
+
|
|
36
|
+
## Category
|
|
37
|
+
|
|
38
|
+
- **Category:** DevOps
|
|
39
|
+
- **CA3O Level:** DevOps
|
|
40
|
+
|
|
41
|
+
## License
|
|
42
|
+
|
|
43
|
+
CC0-1.0 — Creative Commons Zero v1.0 Universal
|
|
44
|
+
|
|
45
|
+
## Author
|
|
46
|
+
|
|
47
|
+
CSGA Global — Cyber Security Global Alliance
|
|
48
|
+
https://csga-global.vercel.app/
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* ═══════════════════════════════════════════════════════════════════════════════
|
|
4
|
+
* @csgaglobal/explainability-engine
|
|
5
|
+
* ═══════════════════════════════════════════════════════════════════════════════
|
|
6
|
+
*
|
|
7
|
+
* Copyright (c) 2026 CSGA Global. All rights reserved.
|
|
8
|
+
* Part of the CSGA Global MCP Ecosystem.
|
|
9
|
+
*
|
|
10
|
+
* LEGAL NOTICE: This software is provided for informational and advisory
|
|
11
|
+
* purposes only. It does not constitute legal, regulatory, or professional
|
|
12
|
+
* compliance advice. Users should consult qualified legal counsel for
|
|
13
|
+
* jurisdiction-specific compliance requirements.
|
|
14
|
+
*
|
|
15
|
+
* License: CC0-1.0 (Creative Commons Zero v1.0 Universal)
|
|
16
|
+
* SPDX-License-Identifier: CC0-1.0
|
|
17
|
+
*
|
|
18
|
+
* Build Timestamp: 2026-03-02T10:00:00Z
|
|
19
|
+
* ═══════════════════════════════════════════════════════════════════════════════
|
|
20
|
+
*/
|
|
21
|
+
import { z } from "zod";
|
|
22
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
23
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
24
|
+
import { handleExplainabilityEngineCompliance } from "./tools/explainability-engine-compliance.js";
|
|
25
|
+
const server = new McpServer({
|
|
26
|
+
name: "csoai-explainability-engine-mcp",
|
|
27
|
+
version: "1.0.0"
|
|
28
|
+
});
|
|
29
|
+
const ComplianceShape = {
|
|
30
|
+
system_name: z.string().describe("AI system to explain"),
|
|
31
|
+
operation: z.string().describe("Operation (explain-decision, feature-importance, counterfactual, model-global, generate-report)"),
|
|
32
|
+
explanation_audience: z.string().describe("Audience (technical, business, affected-individual, regulator, auditor)"),
|
|
33
|
+
explanation_method: z.string().describe("Method (SHAP, LIME, counterfactual, attention, rule-extraction)"),
|
|
34
|
+
jurisdiction: z.string().describe("Operating jurisdiction or region")
|
|
35
|
+
};
|
|
36
|
+
server.tool("explainability_manage", "Manage AI explainability and interpretability", ComplianceShape, async (args) => {
|
|
37
|
+
const result = handleExplainabilityEngineCompliance(args.system_name, args.operation, args.explanation_audience, args.explanation_method, args.jurisdiction);
|
|
38
|
+
return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] };
|
|
39
|
+
});
|
|
40
|
+
async function main() {
|
|
41
|
+
const transport = new StdioServerTransport();
|
|
42
|
+
await server.connect(transport);
|
|
43
|
+
}
|
|
44
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* explainability-engine-compliance.js — Part of @csgaglobal MCP Ecosystem
|
|
3
|
+
* Copyright (c) 2026 CSGA Global. All rights reserved.
|
|
4
|
+
* License: CC0-1.0 | Build: 2026-03-02T10:00:00Z
|
|
5
|
+
* LEGAL NOTICE: Advisory only. Not legal or compliance advice.
|
|
6
|
+
*/
|
|
7
|
+
export function handleExplainabilityEngineCompliance(system_name, operation, explanation_audience, explanation_method, jurisdiction) {
|
|
8
|
+
const jurLower = jurisdiction.toLowerCase();
|
|
9
|
+
const fnLower = operation.toLowerCase();
|
|
10
|
+
let riskClassification = "Standard explainability engine AI use";
|
|
11
|
+
let riskLevel = "MEDIUM";
|
|
12
|
+
if (fnLower.includes("autonomous") || fnLower.includes("automated") || fnLower.includes("decision")) {
|
|
13
|
+
riskClassification = "HIGH RISK — Autonomous/automated decision-making requires enhanced oversight";
|
|
14
|
+
riskLevel = "HIGH";
|
|
15
|
+
}
|
|
16
|
+
if (fnLower.includes("surveillance") || fnLower.includes("biometric") || fnLower.includes("facial")) {
|
|
17
|
+
riskClassification = "CRITICAL RISK — Biometric/surveillance AI triggers strictest regulatory requirements";
|
|
18
|
+
riskLevel = "CRITICAL";
|
|
19
|
+
}
|
|
20
|
+
const regulations = [];
|
|
21
|
+
if (jurLower.includes("eu") || jurLower.includes("europe")) {
|
|
22
|
+
regulations.push("EU AI Act Art. 13 — Transparency for high-risk AI");
|
|
23
|
+
regulations.push("GDPR Art. 22 — Right to explanation");
|
|
24
|
+
regulations.push("EU AI Act Art. 86 — Right to explanation of AI decisions");
|
|
25
|
+
}
|
|
26
|
+
if (jurLower.includes("us") || jurLower.includes("united states")) {
|
|
27
|
+
regulations.push("ECOA — Adverse action notice requirements");
|
|
28
|
+
regulations.push("FCRA — Reason code requirements for AI decisions");
|
|
29
|
+
regulations.push("FTC — AI transparency expectations");
|
|
30
|
+
}
|
|
31
|
+
if (jurLower.includes("uk")) {
|
|
32
|
+
regulations.push("ICO — AI explainability guidance");
|
|
33
|
+
regulations.push("UK AI White Paper — Transparency principle");
|
|
34
|
+
regulations.push("FCA — AI model explainability for financial services");
|
|
35
|
+
}
|
|
36
|
+
if (regulations.length === 0) {
|
|
37
|
+
regulations.push("General consumer protection and data privacy laws apply");
|
|
38
|
+
regulations.push("Industry-specific regulations for explainability engine");
|
|
39
|
+
}
|
|
40
|
+
const compliance = [
|
|
41
|
+
"Implement multi-method explainability framework",
|
|
42
|
+
"Deploy stakeholder-appropriate explanation generation",
|
|
43
|
+
"Establish right-to-explanation compliance workflow",
|
|
44
|
+
"Create explanation quality metrics and monitoring",
|
|
45
|
+
"Generate regulatory explainability documentation",
|
|
46
|
+
];
|
|
47
|
+
if (riskLevel === "CRITICAL" || riskLevel === "HIGH") {
|
|
48
|
+
compliance.push("EU AI Act conformity assessment required for high-risk classification");
|
|
49
|
+
compliance.push("Data Protection Impact Assessment (DPIA) mandatory");
|
|
50
|
+
compliance.push("Appoint AI governance officer or responsible person");
|
|
51
|
+
}
|
|
52
|
+
const technical = [
|
|
53
|
+
"Multi-method XAI engine (SHAP, LIME, counterfactual)",
|
|
54
|
+
"Stakeholder explanation generator",
|
|
55
|
+
"Right-to-explanation workflow",
|
|
56
|
+
"Explanation quality monitoring",
|
|
57
|
+
"Regulatory explainability documentation",
|
|
58
|
+
];
|
|
59
|
+
const remediation = [];
|
|
60
|
+
if (riskLevel === "CRITICAL") {
|
|
61
|
+
remediation.push("URGENT: Conduct comprehensive regulatory review before deployment");
|
|
62
|
+
remediation.push("Commission independent third-party AI safety audit");
|
|
63
|
+
remediation.push("Implement mandatory human-in-the-loop for all critical decisions");
|
|
64
|
+
} else if (riskLevel === "HIGH") {
|
|
65
|
+
remediation.push("Conduct DPIA and update data processing agreements");
|
|
66
|
+
remediation.push("Implement enhanced monitoring and alerting for AI decisions");
|
|
67
|
+
remediation.push("Establish regular (quarterly) compliance review cycle");
|
|
68
|
+
}
|
|
69
|
+
remediation.push("Maintain comprehensive AI system documentation per EU AI Act Art. 11");
|
|
70
|
+
remediation.push("Establish stakeholder engagement process for affected communities");
|
|
71
|
+
remediation.push("Monitor evolving regulatory requirements in operating jurisdictions");
|
|
72
|
+
let casaTier = "CASA Tier 1 — Startup ($5K-$25K/yr)";
|
|
73
|
+
if (riskLevel === "CRITICAL") casaTier = "CASA Tier 3 — Enterprise ($75K-$200K/yr)";
|
|
74
|
+
else if (riskLevel === "HIGH") casaTier = "CASA Tier 2 — Professional ($25K-$75K/yr)";
|
|
75
|
+
else if (riskLevel === "MEDIUM") casaTier = "CASA Tier 2 — Professional ($25K-$75K/yr)";
|
|
76
|
+
return {
|
|
77
|
+
system_name: system_name,
|
|
78
|
+
risk_classification: riskClassification,
|
|
79
|
+
risk_level: riskLevel,
|
|
80
|
+
applicable_regulations: regulations,
|
|
81
|
+
compliance_requirements: compliance,
|
|
82
|
+
technical_requirements: technical,
|
|
83
|
+
remediation,
|
|
84
|
+
casa_tier: casaTier
|
|
85
|
+
};
|
|
86
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@csgaglobal/explainability-engine",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "AI decision explainability and interpretability tools. Provides SHAP, LIME, counterfactual explanations, and stakeholder-appropriate explanation generation.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"files": [
|
|
8
|
+
"dist",
|
|
9
|
+
"README.md",
|
|
10
|
+
"LICENSE"
|
|
11
|
+
],
|
|
12
|
+
"bin": {
|
|
13
|
+
"explainability-engine-mcp": "dist/index.js"
|
|
14
|
+
},
|
|
15
|
+
"scripts": {
|
|
16
|
+
"start": "node dist/index.js",
|
|
17
|
+
"build": "tsc"
|
|
18
|
+
},
|
|
19
|
+
"dependencies": {
|
|
20
|
+
"@modelcontextprotocol/sdk": "^1.6.1",
|
|
21
|
+
"zod": "^3.24.1"
|
|
22
|
+
},
|
|
23
|
+
"devDependencies": {
|
|
24
|
+
"typescript": "^5.7.3",
|
|
25
|
+
"@types/node": "^22.12.0"
|
|
26
|
+
},
|
|
27
|
+
"license": "CC0-1.0",
|
|
28
|
+
"repository": {
|
|
29
|
+
"type": "git",
|
|
30
|
+
"url": "https://github.com/csga-global/mcp-servers.git",
|
|
31
|
+
"directory": "packages/explainability-engine"
|
|
32
|
+
},
|
|
33
|
+
"author": "CSGA Global \u2014 Cyber Security Global Alliance",
|
|
34
|
+
"keywords": [
|
|
35
|
+
"mcp",
|
|
36
|
+
"ai-governance",
|
|
37
|
+
"compliance",
|
|
38
|
+
"csoai",
|
|
39
|
+
"explainability",
|
|
40
|
+
"interpretability",
|
|
41
|
+
"shap",
|
|
42
|
+
"lime",
|
|
43
|
+
"counterfactual",
|
|
44
|
+
"xai"
|
|
45
|
+
]
|
|
46
|
+
}
|