tech-hub-skills 1.2.0 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/{LICENSE → .claude/LICENSE} +21 -21
- package/.claude/README.md +291 -0
- package/.claude/bin/cli.js +266 -0
- package/{bin → .claude/bin}/copilot.js +182 -182
- package/{bin → .claude/bin}/postinstall.js +42 -42
- package/{tech_hub_skills/skills → .claude/commands}/README.md +336 -336
- package/{tech_hub_skills/skills → .claude/commands}/ai-engineer.md +104 -104
- package/{tech_hub_skills/skills → .claude/commands}/aws.md +143 -143
- package/{tech_hub_skills/skills → .claude/commands}/azure.md +149 -149
- package/{tech_hub_skills/skills → .claude/commands}/backend-developer.md +108 -108
- package/{tech_hub_skills/skills → .claude/commands}/code-review.md +399 -399
- package/{tech_hub_skills/skills → .claude/commands}/compliance-automation.md +747 -747
- package/{tech_hub_skills/skills → .claude/commands}/compliance-officer.md +108 -108
- package/{tech_hub_skills/skills → .claude/commands}/data-engineer.md +113 -113
- package/{tech_hub_skills/skills → .claude/commands}/data-governance.md +102 -102
- package/{tech_hub_skills/skills → .claude/commands}/data-scientist.md +123 -123
- package/{tech_hub_skills/skills → .claude/commands}/database-admin.md +109 -109
- package/{tech_hub_skills/skills → .claude/commands}/devops.md +160 -160
- package/{tech_hub_skills/skills → .claude/commands}/docker.md +160 -160
- package/{tech_hub_skills/skills → .claude/commands}/enterprise-dashboard.md +613 -613
- package/{tech_hub_skills/skills → .claude/commands}/finops.md +184 -184
- package/{tech_hub_skills/skills → .claude/commands}/frontend-developer.md +108 -108
- package/{tech_hub_skills/skills → .claude/commands}/gcp.md +143 -143
- package/{tech_hub_skills/skills → .claude/commands}/ml-engineer.md +115 -115
- package/{tech_hub_skills/skills → .claude/commands}/mlops.md +187 -187
- package/{tech_hub_skills/skills → .claude/commands}/network-engineer.md +109 -109
- package/{tech_hub_skills/skills → .claude/commands}/optimization-advisor.md +329 -329
- package/{tech_hub_skills/skills → .claude/commands}/orchestrator.md +623 -623
- package/{tech_hub_skills/skills → .claude/commands}/platform-engineer.md +102 -102
- package/{tech_hub_skills/skills → .claude/commands}/process-automation.md +226 -226
- package/{tech_hub_skills/skills → .claude/commands}/process-changelog.md +184 -184
- package/{tech_hub_skills/skills → .claude/commands}/process-documentation.md +484 -484
- package/{tech_hub_skills/skills → .claude/commands}/process-kanban.md +324 -324
- package/{tech_hub_skills/skills → .claude/commands}/process-versioning.md +214 -214
- package/{tech_hub_skills/skills → .claude/commands}/product-designer.md +104 -104
- package/{tech_hub_skills/skills → .claude/commands}/project-starter.md +443 -443
- package/{tech_hub_skills/skills → .claude/commands}/qa-engineer.md +109 -109
- package/{tech_hub_skills/skills → .claude/commands}/security-architect.md +135 -135
- package/{tech_hub_skills/skills → .claude/commands}/sre.md +109 -109
- package/{tech_hub_skills/skills → .claude/commands}/system-design.md +126 -126
- package/{tech_hub_skills/skills → .claude/commands}/technical-writer.md +101 -101
- package/.claude/package.json +46 -0
- package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/01-prompt-engineering/README.md +252 -252
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/prompt_ab_tester.py +356 -0
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/prompt_template_manager.py +274 -0
- package/.claude/roles/ai-engineer/skills/01-prompt-engineering/token_cost_estimator.py +324 -0
- package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/02-rag-pipeline/README.md +448 -448
- package/.claude/roles/ai-engineer/skills/02-rag-pipeline/document_chunker.py +336 -0
- package/.claude/roles/ai-engineer/skills/02-rag-pipeline/rag_pipeline.sql +213 -0
- package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/03-agent-orchestration/README.md +599 -599
- package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/04-llm-guardrails/README.md +735 -735
- package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/05-vector-embeddings/README.md +711 -711
- package/{tech_hub_skills → .claude}/roles/ai-engineer/skills/06-llm-evaluation/README.md +777 -777
- package/{tech_hub_skills → .claude}/roles/azure/skills/01-infrastructure-fundamentals/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/02-data-factory/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/03-synapse-analytics/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/04-databricks/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/05-functions/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/06-kubernetes-service/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/07-openai-service/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/08-machine-learning/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/09-storage-adls/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/10-networking/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/11-sql-cosmos/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/azure/skills/12-event-hubs/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/code-review/skills/01-automated-code-review/README.md +394 -394
- package/{tech_hub_skills → .claude}/roles/code-review/skills/02-pr-review-workflow/README.md +427 -427
- package/{tech_hub_skills → .claude}/roles/code-review/skills/03-code-quality-gates/README.md +518 -518
- package/{tech_hub_skills → .claude}/roles/code-review/skills/04-reviewer-assignment/README.md +504 -504
- package/{tech_hub_skills → .claude}/roles/code-review/skills/05-review-analytics/README.md +540 -540
- package/{tech_hub_skills → .claude}/roles/data-engineer/skills/01-lakehouse-architecture/README.md +550 -550
- package/.claude/roles/data-engineer/skills/01-lakehouse-architecture/bronze_ingestion.py +337 -0
- package/.claude/roles/data-engineer/skills/01-lakehouse-architecture/medallion_queries.sql +300 -0
- package/{tech_hub_skills → .claude}/roles/data-engineer/skills/02-etl-pipeline/README.md +580 -580
- package/{tech_hub_skills → .claude}/roles/data-engineer/skills/03-data-quality/README.md +579 -579
- package/{tech_hub_skills → .claude}/roles/data-engineer/skills/04-streaming-pipelines/README.md +608 -608
- package/{tech_hub_skills → .claude}/roles/data-engineer/skills/05-performance-optimization/README.md +547 -547
- package/{tech_hub_skills → .claude}/roles/data-governance/skills/01-data-catalog/README.md +112 -112
- package/{tech_hub_skills → .claude}/roles/data-governance/skills/02-data-lineage/README.md +129 -129
- package/{tech_hub_skills → .claude}/roles/data-governance/skills/03-data-quality-framework/README.md +182 -182
- package/{tech_hub_skills → .claude}/roles/data-governance/skills/04-access-control/README.md +39 -39
- package/{tech_hub_skills → .claude}/roles/data-governance/skills/05-master-data-management/README.md +40 -40
- package/{tech_hub_skills → .claude}/roles/data-governance/skills/06-compliance-privacy/README.md +46 -46
- package/{tech_hub_skills → .claude}/roles/data-scientist/skills/01-eda-automation/README.md +230 -230
- package/.claude/roles/data-scientist/skills/01-eda-automation/eda_generator.py +446 -0
- package/{tech_hub_skills → .claude}/roles/data-scientist/skills/02-statistical-modeling/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/data-scientist/skills/03-feature-engineering/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/data-scientist/skills/04-predictive-modeling/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/data-scientist/skills/05-customer-analytics/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/data-scientist/skills/06-campaign-analysis/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/data-scientist/skills/07-experimentation/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/data-scientist/skills/08-data-visualization/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/01-cicd-pipeline/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/02-container-orchestration/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/03-infrastructure-as-code/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/04-gitops/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/05-environment-management/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/06-automated-testing/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/07-release-management/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/08-monitoring-alerting/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/devops/skills/09-devsecops/README.md +265 -265
- package/{tech_hub_skills → .claude}/roles/finops/skills/01-cost-visibility/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/finops/skills/02-resource-tagging/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/finops/skills/03-budget-management/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/finops/skills/04-reserved-instances/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/finops/skills/05-spot-optimization/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/finops/skills/06-storage-tiering/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/finops/skills/07-compute-rightsizing/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/finops/skills/08-chargeback/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/01-mlops-pipeline/README.md +566 -566
- package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/02-feature-engineering/README.md +655 -655
- package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/03-model-training/README.md +704 -704
- package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/04-model-serving/README.md +845 -845
- package/{tech_hub_skills → .claude}/roles/ml-engineer/skills/05-model-monitoring/README.md +874 -874
- package/{tech_hub_skills → .claude}/roles/mlops/skills/01-ml-pipeline-orchestration/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/mlops/skills/02-experiment-tracking/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/mlops/skills/03-model-registry/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/mlops/skills/04-feature-store/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/mlops/skills/05-model-deployment/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/mlops/skills/06-model-observability/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/mlops/skills/07-data-versioning/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/mlops/skills/08-ab-testing/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/mlops/skills/09-automated-retraining/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/01-internal-developer-platform/README.md +153 -153
- package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/02-self-service-infrastructure/README.md +57 -57
- package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/03-slo-sli-management/README.md +59 -59
- package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/04-developer-experience/README.md +57 -57
- package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/05-incident-management/README.md +73 -73
- package/{tech_hub_skills → .claude}/roles/platform-engineer/skills/06-capacity-management/README.md +59 -59
- package/{tech_hub_skills → .claude}/roles/product-designer/skills/01-requirements-discovery/README.md +407 -407
- package/{tech_hub_skills → .claude}/roles/product-designer/skills/02-user-research/README.md +382 -382
- package/{tech_hub_skills → .claude}/roles/product-designer/skills/03-brainstorming-ideation/README.md +437 -437
- package/{tech_hub_skills → .claude}/roles/product-designer/skills/04-ux-design/README.md +496 -496
- package/{tech_hub_skills → .claude}/roles/product-designer/skills/05-product-market-fit/README.md +376 -376
- package/{tech_hub_skills → .claude}/roles/product-designer/skills/06-stakeholder-management/README.md +412 -412
- package/{tech_hub_skills → .claude}/roles/security-architect/skills/01-pii-detection/README.md +319 -319
- package/{tech_hub_skills → .claude}/roles/security-architect/skills/02-threat-modeling/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/security-architect/skills/03-infrastructure-security/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/security-architect/skills/04-iam/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/security-architect/skills/05-application-security/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/security-architect/skills/06-secrets-management/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/security-architect/skills/07-security-monitoring/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/system-design/skills/01-architecture-patterns/README.md +337 -337
- package/{tech_hub_skills → .claude}/roles/system-design/skills/02-requirements-engineering/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/system-design/skills/03-scalability/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/system-design/skills/04-high-availability/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/system-design/skills/05-cost-optimization-design/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/system-design/skills/06-api-design/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/system-design/skills/07-observability-architecture/README.md +264 -264
- package/{tech_hub_skills → .claude}/roles/system-design/skills/08-process-automation/PROCESS_TEMPLATE.md +336 -336
- package/{tech_hub_skills → .claude}/roles/system-design/skills/08-process-automation/README.md +521 -521
- package/.claude/roles/system-design/skills/08-process-automation/ai_prompt_generator.py +744 -0
- package/.claude/roles/system-design/skills/08-process-automation/automation_recommender.py +688 -0
- package/.claude/roles/system-design/skills/08-process-automation/plan_generator.py +679 -0
- package/.claude/roles/system-design/skills/08-process-automation/process_analyzer.py +528 -0
- package/.claude/roles/system-design/skills/08-process-automation/process_parser.py +684 -0
- package/.claude/roles/system-design/skills/08-process-automation/role_matcher.py +615 -0
- package/.claude/skills/README.md +336 -0
- package/.claude/skills/ai-engineer.md +104 -0
- package/.claude/skills/aws.md +143 -0
- package/.claude/skills/azure.md +149 -0
- package/.claude/skills/backend-developer.md +108 -0
- package/.claude/skills/code-review.md +399 -0
- package/.claude/skills/compliance-automation.md +747 -0
- package/.claude/skills/compliance-officer.md +108 -0
- package/.claude/skills/data-engineer.md +113 -0
- package/.claude/skills/data-governance.md +102 -0
- package/.claude/skills/data-scientist.md +123 -0
- package/.claude/skills/database-admin.md +109 -0
- package/.claude/skills/devops.md +160 -0
- package/.claude/skills/docker.md +160 -0
- package/.claude/skills/enterprise-dashboard.md +613 -0
- package/.claude/skills/finops.md +184 -0
- package/.claude/skills/frontend-developer.md +108 -0
- package/.claude/skills/gcp.md +143 -0
- package/.claude/skills/ml-engineer.md +115 -0
- package/.claude/skills/mlops.md +187 -0
- package/.claude/skills/network-engineer.md +109 -0
- package/.claude/skills/optimization-advisor.md +329 -0
- package/.claude/skills/orchestrator.md +623 -0
- package/.claude/skills/platform-engineer.md +102 -0
- package/.claude/skills/process-automation.md +226 -0
- package/.claude/skills/process-changelog.md +184 -0
- package/.claude/skills/process-documentation.md +484 -0
- package/.claude/skills/process-kanban.md +324 -0
- package/.claude/skills/process-versioning.md +214 -0
- package/.claude/skills/product-designer.md +104 -0
- package/.claude/skills/project-starter.md +443 -0
- package/.claude/skills/qa-engineer.md +109 -0
- package/.claude/skills/security-architect.md +135 -0
- package/.claude/skills/sre.md +109 -0
- package/.claude/skills/system-design.md +126 -0
- package/.claude/skills/technical-writer.md +101 -0
- package/.gitattributes +2 -0
- package/GITHUB_COPILOT.md +106 -0
- package/README.md +192 -291
- package/package.json +16 -46
- package/bin/cli.js +0 -241
|
@@ -1,747 +1,747 @@
|
|
|
1
|
-
# Compliance Automation
|
|
2
|
-
|
|
3
|
-
Automated compliance checking, audit trails, and regulatory requirement validation for enterprise software delivery.
|
|
4
|
-
|
|
5
|
-
## Role Overview
|
|
6
|
-
|
|
7
|
-
**Agent**: Compliance Automation Specialist
|
|
8
|
-
**Focus**: Regulatory compliance, audit trails, policy enforcement, certification readiness
|
|
9
|
-
**Skills**: Integrated compliance checking throughout the SDLC
|
|
10
|
-
|
|
11
|
-
## When to Use
|
|
12
|
-
|
|
13
|
-
Invoke this role when you need to:
|
|
14
|
-
- Automate compliance checks in CI/CD
|
|
15
|
-
- Generate audit trails and evidence
|
|
16
|
-
- Validate against regulatory frameworks (GDPR, SOC 2, HIPAA, PCI-DSS)
|
|
17
|
-
- Prepare for compliance audits
|
|
18
|
-
- Implement policy-as-code
|
|
19
|
-
|
|
20
|
-
## Compliance Frameworks Supported
|
|
21
|
-
|
|
22
|
-
| Framework | Focus | Key Requirements |
|
|
23
|
-
|-----------|-------|------------------|
|
|
24
|
-
| SOC 2 | Security Controls | Access control, encryption, monitoring |
|
|
25
|
-
| GDPR | Data Privacy | Consent, data rights, breach notification |
|
|
26
|
-
| HIPAA | Healthcare Data | PHI protection, access logs, encryption |
|
|
27
|
-
| PCI-DSS | Payment Data | Cardholder data protection, network security |
|
|
28
|
-
| ISO 27001 | InfoSec Management | Risk assessment, security controls |
|
|
29
|
-
| FedRAMP | Government Cloud | Security authorization, continuous monitoring |
|
|
30
|
-
|
|
31
|
-
## Compliance as Code
|
|
32
|
-
|
|
33
|
-
### Policy Engine Configuration
|
|
34
|
-
|
|
35
|
-
```yaml
|
|
36
|
-
# .compliance/policies.yml
|
|
37
|
-
version: 1
|
|
38
|
-
framework: soc2
|
|
39
|
-
|
|
40
|
-
policies:
|
|
41
|
-
# CC6.1 - Logical and Physical Access Controls
|
|
42
|
-
access-control:
|
|
43
|
-
enabled: true
|
|
44
|
-
rules:
|
|
45
|
-
- id: AC-001
|
|
46
|
-
name: "No hardcoded credentials"
|
|
47
|
-
check: secrets-scan
|
|
48
|
-
severity: critical
|
|
49
|
-
remediation: "Use secrets manager (Azure Key Vault, AWS Secrets Manager)"
|
|
50
|
-
|
|
51
|
-
- id: AC-002
|
|
52
|
-
name: "Enforce MFA"
|
|
53
|
-
check: iam-policy
|
|
54
|
-
severity: high
|
|
55
|
-
remediation: "Configure MFA for all user accounts"
|
|
56
|
-
|
|
57
|
-
- id: AC-003
|
|
58
|
-
name: "Principle of least privilege"
|
|
59
|
-
check: rbac-review
|
|
60
|
-
severity: high
|
|
61
|
-
remediation: "Review and minimize role permissions"
|
|
62
|
-
|
|
63
|
-
# CC6.7 - Encryption in Transit and at Rest
|
|
64
|
-
encryption:
|
|
65
|
-
enabled: true
|
|
66
|
-
rules:
|
|
67
|
-
- id: EN-001
|
|
68
|
-
name: "TLS 1.2+ required"
|
|
69
|
-
check: tls-version
|
|
70
|
-
severity: critical
|
|
71
|
-
remediation: "Upgrade to TLS 1.2 or higher"
|
|
72
|
-
|
|
73
|
-
- id: EN-002
|
|
74
|
-
name: "Data at rest encryption"
|
|
75
|
-
check: storage-encryption
|
|
76
|
-
severity: high
|
|
77
|
-
remediation: "Enable encryption for all storage resources"
|
|
78
|
-
|
|
79
|
-
# CC7.2 - Monitoring and Logging
|
|
80
|
-
monitoring:
|
|
81
|
-
enabled: true
|
|
82
|
-
rules:
|
|
83
|
-
- id: MO-001
|
|
84
|
-
name: "Audit logging enabled"
|
|
85
|
-
check: audit-logs
|
|
86
|
-
severity: high
|
|
87
|
-
remediation: "Enable audit logging for all services"
|
|
88
|
-
|
|
89
|
-
- id: MO-002
|
|
90
|
-
name: "Log retention >= 1 year"
|
|
91
|
-
check: log-retention
|
|
92
|
-
severity: medium
|
|
93
|
-
remediation: "Configure log retention to 365 days minimum"
|
|
94
|
-
|
|
95
|
-
# CC8.1 - Change Management
|
|
96
|
-
change-management:
|
|
97
|
-
enabled: true
|
|
98
|
-
rules:
|
|
99
|
-
- id: CM-001
|
|
100
|
-
name: "All changes via PR"
|
|
101
|
-
check: branch-protection
|
|
102
|
-
severity: high
|
|
103
|
-
remediation: "Enable branch protection with required reviews"
|
|
104
|
-
|
|
105
|
-
- id: CM-002
|
|
106
|
-
name: "Approved before deploy"
|
|
107
|
-
check: deployment-approvals
|
|
108
|
-
severity: high
|
|
109
|
-
remediation: "Configure environment protection rules"
|
|
110
|
-
```
|
|
111
|
-
|
|
112
|
-
### GitHub Actions Compliance Workflow
|
|
113
|
-
|
|
114
|
-
```yaml
|
|
115
|
-
# .github/workflows/compliance-check.yml
|
|
116
|
-
name: Compliance Verification
|
|
117
|
-
on:
|
|
118
|
-
pull_request:
|
|
119
|
-
branches: [main]
|
|
120
|
-
push:
|
|
121
|
-
branches: [main]
|
|
122
|
-
schedule:
|
|
123
|
-
- cron: '0 0 * * 0' # Weekly audit
|
|
124
|
-
|
|
125
|
-
permissions:
|
|
126
|
-
contents: read
|
|
127
|
-
security-events: write
|
|
128
|
-
pull-requests: write
|
|
129
|
-
|
|
130
|
-
jobs:
|
|
131
|
-
soc2-controls:
|
|
132
|
-
name: SOC 2 Control Verification
|
|
133
|
-
runs-on: ubuntu-latest
|
|
134
|
-
steps:
|
|
135
|
-
- uses: actions/checkout@v4
|
|
136
|
-
|
|
137
|
-
# CC6.1 - Access Control
|
|
138
|
-
- name: Check for hardcoded secrets
|
|
139
|
-
uses: gitleaks/gitleaks-action@v2
|
|
140
|
-
env:
|
|
141
|
-
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
142
|
-
|
|
143
|
-
- name: Verify branch protection
|
|
144
|
-
uses: actions/github-script@v7
|
|
145
|
-
with:
|
|
146
|
-
script: |
|
|
147
|
-
const { data: protection } = await github.rest.repos.getBranchProtection({
|
|
148
|
-
owner: context.repo.owner,
|
|
149
|
-
repo: context.repo.repo,
|
|
150
|
-
branch: 'main'
|
|
151
|
-
}).catch(() => ({ data: null }));
|
|
152
|
-
|
|
153
|
-
const checks = {
|
|
154
|
-
'Required reviews': protection?.required_pull_request_reviews?.required_approving_review_count >= 1,
|
|
155
|
-
'Dismiss stale reviews': protection?.required_pull_request_reviews?.dismiss_stale_reviews,
|
|
156
|
-
'Require status checks': protection?.required_status_checks?.strict,
|
|
157
|
-
'Enforce admins': protection?.enforce_admins?.enabled
|
|
158
|
-
};
|
|
159
|
-
|
|
160
|
-
let passed = true;
|
|
161
|
-
for (const [check, result] of Object.entries(checks)) {
|
|
162
|
-
console.log(`${check}: ${result ? '✅' : '❌'}`);
|
|
163
|
-
if (!result) passed = false;
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
if (!passed) {
|
|
167
|
-
core.setFailed('Branch protection does not meet SOC 2 requirements');
|
|
168
|
-
}
|
|
169
|
-
|
|
170
|
-
# CC6.7 - Encryption verification
|
|
171
|
-
- name: Check TLS configuration
|
|
172
|
-
run: |
|
|
173
|
-
# Verify TLS version in code
|
|
174
|
-
if grep -r "ssl_version.*SSLv" --include="*.py" .; then
|
|
175
|
-
echo "::error::SSLv2/v3 detected - upgrade to TLS 1.2+"
|
|
176
|
-
exit 1
|
|
177
|
-
fi
|
|
178
|
-
|
|
179
|
-
# CC7.2 - Logging verification
|
|
180
|
-
- name: Verify logging configuration
|
|
181
|
-
run: |
|
|
182
|
-
# Check for logging configuration
|
|
183
|
-
if ! grep -r "logging\|logger\|audit" --include="*.py" --include="*.ts" .; then
|
|
184
|
-
echo "::warning::No logging configuration detected"
|
|
185
|
-
fi
|
|
186
|
-
|
|
187
|
-
# Generate compliance evidence
|
|
188
|
-
- name: Generate compliance report
|
|
189
|
-
run: |
|
|
190
|
-
mkdir -p compliance-evidence
|
|
191
|
-
echo "# SOC 2 Compliance Evidence" > compliance-evidence/report.md
|
|
192
|
-
echo "Generated: $(date -u +"%Y-%m-%dT%H:%M:%SZ")" >> compliance-evidence/report.md
|
|
193
|
-
echo "Commit: ${{ github.sha }}" >> compliance-evidence/report.md
|
|
194
|
-
echo "" >> compliance-evidence/report.md
|
|
195
|
-
echo "## Control Verification" >> compliance-evidence/report.md
|
|
196
|
-
echo "- CC6.1 Access Control: VERIFIED" >> compliance-evidence/report.md
|
|
197
|
-
echo "- CC6.7 Encryption: VERIFIED" >> compliance-evidence/report.md
|
|
198
|
-
echo "- CC7.2 Monitoring: VERIFIED" >> compliance-evidence/report.md
|
|
199
|
-
echo "- CC8.1 Change Management: VERIFIED" >> compliance-evidence/report.md
|
|
200
|
-
|
|
201
|
-
- name: Upload compliance evidence
|
|
202
|
-
uses: actions/upload-artifact@v4
|
|
203
|
-
with:
|
|
204
|
-
name: compliance-evidence-${{ github.sha }}
|
|
205
|
-
path: compliance-evidence/
|
|
206
|
-
retention-days: 365 # Keep for audit purposes
|
|
207
|
-
|
|
208
|
-
gdpr-checks:
|
|
209
|
-
name: GDPR Compliance Checks
|
|
210
|
-
runs-on: ubuntu-latest
|
|
211
|
-
steps:
|
|
212
|
-
- uses: actions/checkout@v4
|
|
213
|
-
|
|
214
|
-
- name: Check for PII handling
|
|
215
|
-
run: |
|
|
216
|
-
# Scan for potential PII fields without proper handling
|
|
217
|
-
PII_PATTERNS="email|phone|address|ssn|social.security|credit.card|password"
|
|
218
|
-
FINDINGS=$(grep -rn -E "$PII_PATTERNS" --include="*.ts" --include="*.py" --include="*.js" . || true)
|
|
219
|
-
|
|
220
|
-
if [ -n "$FINDINGS" ]; then
|
|
221
|
-
echo "::warning::Potential PII fields detected - ensure proper handling"
|
|
222
|
-
echo "$FINDINGS"
|
|
223
|
-
fi
|
|
224
|
-
|
|
225
|
-
- name: Verify data retention policies
|
|
226
|
-
run: |
|
|
227
|
-
# Check for data retention configuration
|
|
228
|
-
if ! grep -r "retention\|delete.*days\|ttl\|expir" --include="*.yml" --include="*.yaml" --include="*.json" .; then
|
|
229
|
-
echo "::warning::No data retention policy configuration detected"
|
|
230
|
-
fi
|
|
231
|
-
|
|
232
|
-
- name: Check consent management
|
|
233
|
-
run: |
|
|
234
|
-
# Look for consent handling code
|
|
235
|
-
if grep -r "consent\|gdpr\|opt.in\|opt.out" --include="*.ts" --include="*.py" .; then
|
|
236
|
-
echo "Consent management code detected"
|
|
237
|
-
else
|
|
238
|
-
echo "::warning::No consent management code detected"
|
|
239
|
-
fi
|
|
240
|
-
|
|
241
|
-
license-compliance:
|
|
242
|
-
name: License Compliance
|
|
243
|
-
runs-on: ubuntu-latest
|
|
244
|
-
steps:
|
|
245
|
-
- uses: actions/checkout@v4
|
|
246
|
-
|
|
247
|
-
- name: Setup Node.js
|
|
248
|
-
uses: actions/setup-node@v4
|
|
249
|
-
with:
|
|
250
|
-
node-version: '20'
|
|
251
|
-
|
|
252
|
-
- name: Install dependencies
|
|
253
|
-
run: npm ci
|
|
254
|
-
|
|
255
|
-
- name: Check licenses
|
|
256
|
-
run: |
|
|
257
|
-
npx license-checker --json --out licenses.json
|
|
258
|
-
|
|
259
|
-
# Define prohibited licenses
|
|
260
|
-
PROHIBITED="GPL|AGPL|SSPL|BUSL"
|
|
261
|
-
|
|
262
|
-
# Check for prohibited licenses
|
|
263
|
-
VIOLATIONS=$(jq -r 'to_entries[] | select(.value.licenses | test("'"$PROHIBITED"'")) | "\(.key): \(.value.licenses)"' licenses.json || true)
|
|
264
|
-
|
|
265
|
-
if [ -n "$VIOLATIONS" ]; then
|
|
266
|
-
echo "::error::Prohibited license detected:"
|
|
267
|
-
echo "$VIOLATIONS"
|
|
268
|
-
exit 1
|
|
269
|
-
fi
|
|
270
|
-
|
|
271
|
-
- name: Generate SBOM
|
|
272
|
-
run: |
|
|
273
|
-
npx @cyclonedx/cyclonedx-npm --output-file sbom.json
|
|
274
|
-
|
|
275
|
-
- name: Upload SBOM
|
|
276
|
-
uses: actions/upload-artifact@v4
|
|
277
|
-
with:
|
|
278
|
-
name: sbom
|
|
279
|
-
path: sbom.json
|
|
280
|
-
|
|
281
|
-
audit-trail:
|
|
282
|
-
name: Generate Audit Trail
|
|
283
|
-
runs-on: ubuntu-latest
|
|
284
|
-
needs: [soc2-controls, gdpr-checks, license-compliance]
|
|
285
|
-
steps:
|
|
286
|
-
- uses: actions/checkout@v4
|
|
287
|
-
|
|
288
|
-
- name: Generate audit entry
|
|
289
|
-
run: |
|
|
290
|
-
cat > audit-entry.json << EOF
|
|
291
|
-
{
|
|
292
|
-
"timestamp": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
|
|
293
|
-
"event": "compliance_verification",
|
|
294
|
-
"repository": "${{ github.repository }}",
|
|
295
|
-
"commit": "${{ github.sha }}",
|
|
296
|
-
"actor": "${{ github.actor }}",
|
|
297
|
-
"workflow_run": "${{ github.run_id }}",
|
|
298
|
-
"results": {
|
|
299
|
-
"soc2": "PASSED",
|
|
300
|
-
"gdpr": "PASSED",
|
|
301
|
-
"license": "PASSED"
|
|
302
|
-
},
|
|
303
|
-
"evidence_location": "compliance-evidence-${{ github.sha }}"
|
|
304
|
-
}
|
|
305
|
-
EOF
|
|
306
|
-
|
|
307
|
-
- name: Upload audit entry
|
|
308
|
-
uses: actions/upload-artifact@v4
|
|
309
|
-
with:
|
|
310
|
-
name: audit-trail
|
|
311
|
-
path: audit-entry.json
|
|
312
|
-
```
|
|
313
|
-
|
|
314
|
-
### Compliance Checker Script
|
|
315
|
-
|
|
316
|
-
```python
|
|
317
|
-
#!/usr/bin/env python3
|
|
318
|
-
"""Enterprise compliance checker for CI/CD integration."""
|
|
319
|
-
|
|
320
|
-
import json
|
|
321
|
-
import os
|
|
322
|
-
import re
|
|
323
|
-
import subprocess
|
|
324
|
-
import sys
|
|
325
|
-
from dataclasses import dataclass, field
|
|
326
|
-
from datetime import datetime
|
|
327
|
-
from enum import Enum
|
|
328
|
-
from pathlib import Path
|
|
329
|
-
from typing import List, Dict, Optional, Any
|
|
330
|
-
|
|
331
|
-
class Severity(Enum):
|
|
332
|
-
CRITICAL = "critical"
|
|
333
|
-
HIGH = "high"
|
|
334
|
-
MEDIUM = "medium"
|
|
335
|
-
LOW = "low"
|
|
336
|
-
INFO = "info"
|
|
337
|
-
|
|
338
|
-
class ComplianceStatus(Enum):
|
|
339
|
-
PASSED = "passed"
|
|
340
|
-
FAILED = "failed"
|
|
341
|
-
WARNING = "warning"
|
|
342
|
-
SKIPPED = "skipped"
|
|
343
|
-
|
|
344
|
-
@dataclass
|
|
345
|
-
class ComplianceFinding:
|
|
346
|
-
"""A compliance check finding."""
|
|
347
|
-
rule_id: str
|
|
348
|
-
rule_name: str
|
|
349
|
-
framework: str
|
|
350
|
-
control: str
|
|
351
|
-
status: ComplianceStatus
|
|
352
|
-
severity: Severity
|
|
353
|
-
message: str
|
|
354
|
-
evidence: Optional[str] = None
|
|
355
|
-
remediation: Optional[str] = None
|
|
356
|
-
|
|
357
|
-
@dataclass
|
|
358
|
-
class ComplianceReport:
|
|
359
|
-
"""Complete compliance report."""
|
|
360
|
-
timestamp: str
|
|
361
|
-
repository: str
|
|
362
|
-
commit: str
|
|
363
|
-
frameworks: List[str]
|
|
364
|
-
findings: List[ComplianceFinding] = field(default_factory=list)
|
|
365
|
-
overall_status: ComplianceStatus = ComplianceStatus.PASSED
|
|
366
|
-
|
|
367
|
-
def add_finding(self, finding: ComplianceFinding) -> None:
|
|
368
|
-
"""Add a finding and update overall status."""
|
|
369
|
-
self.findings.append(finding)
|
|
370
|
-
if finding.status == ComplianceStatus.FAILED:
|
|
371
|
-
if finding.severity in (Severity.CRITICAL, Severity.HIGH):
|
|
372
|
-
self.overall_status = ComplianceStatus.FAILED
|
|
373
|
-
|
|
374
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
375
|
-
"""Convert to dictionary for JSON serialization."""
|
|
376
|
-
return {
|
|
377
|
-
"timestamp": self.timestamp,
|
|
378
|
-
"repository": self.repository,
|
|
379
|
-
"commit": self.commit,
|
|
380
|
-
"frameworks": self.frameworks,
|
|
381
|
-
"overall_status": self.overall_status.value,
|
|
382
|
-
"summary": {
|
|
383
|
-
"total": len(self.findings),
|
|
384
|
-
"passed": len([f for f in self.findings if f.status == ComplianceStatus.PASSED]),
|
|
385
|
-
"failed": len([f for f in self.findings if f.status == ComplianceStatus.FAILED]),
|
|
386
|
-
"warnings": len([f for f in self.findings if f.status == ComplianceStatus.WARNING]),
|
|
387
|
-
},
|
|
388
|
-
"findings": [
|
|
389
|
-
{
|
|
390
|
-
"rule_id": f.rule_id,
|
|
391
|
-
"rule_name": f.rule_name,
|
|
392
|
-
"framework": f.framework,
|
|
393
|
-
"control": f.control,
|
|
394
|
-
"status": f.status.value,
|
|
395
|
-
"severity": f.severity.value,
|
|
396
|
-
"message": f.message,
|
|
397
|
-
"evidence": f.evidence,
|
|
398
|
-
"remediation": f.remediation,
|
|
399
|
-
}
|
|
400
|
-
for f in self.findings
|
|
401
|
-
]
|
|
402
|
-
}
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
class ComplianceChecker:
|
|
406
|
-
"""Enterprise compliance checker."""
|
|
407
|
-
|
|
408
|
-
def __init__(self, project_path: str = "."):
|
|
409
|
-
self.project_path = Path(project_path)
|
|
410
|
-
self.report = ComplianceReport(
|
|
411
|
-
timestamp=datetime.utcnow().isoformat() + "Z",
|
|
412
|
-
repository=os.getenv("GITHUB_REPOSITORY", "unknown"),
|
|
413
|
-
commit=os.getenv("GITHUB_SHA", "unknown"),
|
|
414
|
-
frameworks=["SOC2", "GDPR", "LICENSE"]
|
|
415
|
-
)
|
|
416
|
-
|
|
417
|
-
def run_all_checks(self) -> ComplianceReport:
|
|
418
|
-
"""Run all compliance checks."""
|
|
419
|
-
self.check_secrets()
|
|
420
|
-
self.check_encryption()
|
|
421
|
-
self.check_logging()
|
|
422
|
-
self.check_pii_handling()
|
|
423
|
-
self.check_licenses()
|
|
424
|
-
self.check_dependencies()
|
|
425
|
-
return self.report
|
|
426
|
-
|
|
427
|
-
def check_secrets(self) -> None:
|
|
428
|
-
"""Check for hardcoded secrets (SOC 2 CC6.1)."""
|
|
429
|
-
secret_patterns = [
|
|
430
|
-
r'password\s*=\s*["\'][^"\']+["\']',
|
|
431
|
-
r'api[_-]?key\s*=\s*["\'][^"\']+["\']',
|
|
432
|
-
r'secret\s*=\s*["\'][^"\']+["\']',
|
|
433
|
-
r'token\s*=\s*["\'][^"\']+["\']',
|
|
434
|
-
r'-----BEGIN\s+(?:RSA\s+)?PRIVATE\s+KEY-----',
|
|
435
|
-
]
|
|
436
|
-
|
|
437
|
-
findings = []
|
|
438
|
-
for pattern in secret_patterns:
|
|
439
|
-
for file in self.project_path.rglob("*"):
|
|
440
|
-
if file.is_file() and file.suffix in (".py", ".js", ".ts", ".yaml", ".yml", ".json"):
|
|
441
|
-
try:
|
|
442
|
-
content = file.read_text(errors="ignore")
|
|
443
|
-
matches = re.findall(pattern, content, re.IGNORECASE)
|
|
444
|
-
if matches:
|
|
445
|
-
findings.append(f"{file}: {len(matches)} potential secrets")
|
|
446
|
-
except Exception:
|
|
447
|
-
pass
|
|
448
|
-
|
|
449
|
-
if findings:
|
|
450
|
-
self.report.add_finding(ComplianceFinding(
|
|
451
|
-
rule_id="AC-001",
|
|
452
|
-
rule_name="No hardcoded credentials",
|
|
453
|
-
framework="SOC2",
|
|
454
|
-
control="CC6.1",
|
|
455
|
-
status=ComplianceStatus.FAILED,
|
|
456
|
-
severity=Severity.CRITICAL,
|
|
457
|
-
message=f"Found {len(findings)} files with potential secrets",
|
|
458
|
-
evidence="\n".join(findings[:5]),
|
|
459
|
-
remediation="Use environment variables or secrets manager"
|
|
460
|
-
))
|
|
461
|
-
else:
|
|
462
|
-
self.report.add_finding(ComplianceFinding(
|
|
463
|
-
rule_id="AC-001",
|
|
464
|
-
rule_name="No hardcoded credentials",
|
|
465
|
-
framework="SOC2",
|
|
466
|
-
control="CC6.1",
|
|
467
|
-
status=ComplianceStatus.PASSED,
|
|
468
|
-
severity=Severity.CRITICAL,
|
|
469
|
-
message="No hardcoded credentials detected"
|
|
470
|
-
))
|
|
471
|
-
|
|
472
|
-
def check_encryption(self) -> None:
|
|
473
|
-
"""Check encryption configuration (SOC 2 CC6.7)."""
|
|
474
|
-
insecure_patterns = [
|
|
475
|
-
r'ssl_version.*SSLv[23]',
|
|
476
|
-
r'TLSv1[^.]',
|
|
477
|
-
r'MD5|SHA1',
|
|
478
|
-
r'DES|RC4',
|
|
479
|
-
]
|
|
480
|
-
|
|
481
|
-
issues = []
|
|
482
|
-
for file in self.project_path.rglob("*.py"):
|
|
483
|
-
try:
|
|
484
|
-
content = file.read_text(errors="ignore")
|
|
485
|
-
for pattern in insecure_patterns:
|
|
486
|
-
if re.search(pattern, content):
|
|
487
|
-
issues.append(f"{file}: Insecure crypto detected")
|
|
488
|
-
except Exception:
|
|
489
|
-
pass
|
|
490
|
-
|
|
491
|
-
if issues:
|
|
492
|
-
self.report.add_finding(ComplianceFinding(
|
|
493
|
-
rule_id="EN-001",
|
|
494
|
-
rule_name="Secure encryption",
|
|
495
|
-
framework="SOC2",
|
|
496
|
-
control="CC6.7",
|
|
497
|
-
status=ComplianceStatus.FAILED,
|
|
498
|
-
severity=Severity.HIGH,
|
|
499
|
-
message=f"Found {len(issues)} insecure encryption usages",
|
|
500
|
-
evidence="\n".join(issues[:5]),
|
|
501
|
-
remediation="Use TLS 1.2+, AES-256, SHA-256 or stronger"
|
|
502
|
-
))
|
|
503
|
-
else:
|
|
504
|
-
self.report.add_finding(ComplianceFinding(
|
|
505
|
-
rule_id="EN-001",
|
|
506
|
-
rule_name="Secure encryption",
|
|
507
|
-
framework="SOC2",
|
|
508
|
-
control="CC6.7",
|
|
509
|
-
status=ComplianceStatus.PASSED,
|
|
510
|
-
severity=Severity.HIGH,
|
|
511
|
-
message="Encryption configuration appears secure"
|
|
512
|
-
))
|
|
513
|
-
|
|
514
|
-
def check_logging(self) -> None:
|
|
515
|
-
"""Check logging configuration (SOC 2 CC7.2)."""
|
|
516
|
-
has_logging = False
|
|
517
|
-
for file in self.project_path.rglob("*.py"):
|
|
518
|
-
try:
|
|
519
|
-
content = file.read_text(errors="ignore")
|
|
520
|
-
if re.search(r'import\s+logging|from\s+logging', content):
|
|
521
|
-
has_logging = True
|
|
522
|
-
break
|
|
523
|
-
except Exception:
|
|
524
|
-
pass
|
|
525
|
-
|
|
526
|
-
self.report.add_finding(ComplianceFinding(
|
|
527
|
-
rule_id="MO-001",
|
|
528
|
-
rule_name="Audit logging enabled",
|
|
529
|
-
framework="SOC2",
|
|
530
|
-
control="CC7.2",
|
|
531
|
-
status=ComplianceStatus.PASSED if has_logging else ComplianceStatus.WARNING,
|
|
532
|
-
severity=Severity.HIGH,
|
|
533
|
-
message="Logging configuration found" if has_logging else "No logging detected",
|
|
534
|
-
remediation=None if has_logging else "Implement structured logging"
|
|
535
|
-
))
|
|
536
|
-
|
|
537
|
-
def check_pii_handling(self) -> None:
|
|
538
|
-
"""Check for PII handling (GDPR)."""
|
|
539
|
-
pii_fields = [
|
|
540
|
-
"email", "phone", "address", "ssn", "date_of_birth",
|
|
541
|
-
"credit_card", "passport", "driver_license"
|
|
542
|
-
]
|
|
543
|
-
|
|
544
|
-
pii_locations = []
|
|
545
|
-
for file in self.project_path.rglob("*"):
|
|
546
|
-
if file.suffix in (".py", ".ts", ".js"):
|
|
547
|
-
try:
|
|
548
|
-
content = file.read_text(errors="ignore")
|
|
549
|
-
for field in pii_fields:
|
|
550
|
-
if re.search(rf'\b{field}\b', content, re.IGNORECASE):
|
|
551
|
-
pii_locations.append(f"{file}: {field}")
|
|
552
|
-
except Exception:
|
|
553
|
-
pass
|
|
554
|
-
|
|
555
|
-
if pii_locations:
|
|
556
|
-
self.report.add_finding(ComplianceFinding(
|
|
557
|
-
rule_id="GDPR-001",
|
|
558
|
-
rule_name="PII data handling",
|
|
559
|
-
framework="GDPR",
|
|
560
|
-
control="Article 5",
|
|
561
|
-
status=ComplianceStatus.WARNING,
|
|
562
|
-
severity=Severity.HIGH,
|
|
563
|
-
message=f"Found {len(pii_locations)} potential PII fields",
|
|
564
|
-
evidence="\n".join(pii_locations[:10]),
|
|
565
|
-
remediation="Ensure PII is encrypted, has retention policy, and consent is captured"
|
|
566
|
-
))
|
|
567
|
-
|
|
568
|
-
def check_licenses(self) -> None:
|
|
569
|
-
"""Check dependency licenses."""
|
|
570
|
-
prohibited = ["GPL", "AGPL", "SSPL"]
|
|
571
|
-
|
|
572
|
-
# Run license check if package.json exists
|
|
573
|
-
package_json = self.project_path / "package.json"
|
|
574
|
-
if package_json.exists():
|
|
575
|
-
try:
|
|
576
|
-
result = subprocess.run(
|
|
577
|
-
["npx", "license-checker", "--json"],
|
|
578
|
-
capture_output=True,
|
|
579
|
-
text=True,
|
|
580
|
-
cwd=self.project_path,
|
|
581
|
-
timeout=60
|
|
582
|
-
)
|
|
583
|
-
if result.returncode == 0:
|
|
584
|
-
licenses = json.loads(result.stdout)
|
|
585
|
-
violations = [
|
|
586
|
-
f"{pkg}: {info.get('licenses', 'unknown')}"
|
|
587
|
-
for pkg, info in licenses.items()
|
|
588
|
-
if any(p in str(info.get('licenses', '')) for p in prohibited)
|
|
589
|
-
]
|
|
590
|
-
|
|
591
|
-
if violations:
|
|
592
|
-
self.report.add_finding(ComplianceFinding(
|
|
593
|
-
rule_id="LIC-001",
|
|
594
|
-
rule_name="License compliance",
|
|
595
|
-
framework="LICENSE",
|
|
596
|
-
control="OSS Policy",
|
|
597
|
-
status=ComplianceStatus.FAILED,
|
|
598
|
-
severity=Severity.HIGH,
|
|
599
|
-
message=f"Found {len(violations)} prohibited licenses",
|
|
600
|
-
evidence="\n".join(violations[:5]),
|
|
601
|
-
remediation="Replace dependencies with permissively licensed alternatives"
|
|
602
|
-
))
|
|
603
|
-
return
|
|
604
|
-
except Exception as e:
|
|
605
|
-
print(f"License check error: {e}")
|
|
606
|
-
|
|
607
|
-
self.report.add_finding(ComplianceFinding(
|
|
608
|
-
rule_id="LIC-001",
|
|
609
|
-
rule_name="License compliance",
|
|
610
|
-
framework="LICENSE",
|
|
611
|
-
control="OSS Policy",
|
|
612
|
-
status=ComplianceStatus.PASSED,
|
|
613
|
-
severity=Severity.HIGH,
|
|
614
|
-
message="No prohibited licenses detected"
|
|
615
|
-
))
|
|
616
|
-
|
|
617
|
-
def check_dependencies(self) -> None:
|
|
618
|
-
"""Check for vulnerable dependencies."""
|
|
619
|
-
package_json = self.project_path / "package.json"
|
|
620
|
-
if package_json.exists():
|
|
621
|
-
try:
|
|
622
|
-
result = subprocess.run(
|
|
623
|
-
["npm", "audit", "--json"],
|
|
624
|
-
capture_output=True,
|
|
625
|
-
text=True,
|
|
626
|
-
cwd=self.project_path,
|
|
627
|
-
timeout=120
|
|
628
|
-
)
|
|
629
|
-
audit = json.loads(result.stdout)
|
|
630
|
-
vulnerabilities = audit.get("metadata", {}).get("vulnerabilities", {})
|
|
631
|
-
critical = vulnerabilities.get("critical", 0)
|
|
632
|
-
high = vulnerabilities.get("high", 0)
|
|
633
|
-
|
|
634
|
-
if critical > 0 or high > 0:
|
|
635
|
-
self.report.add_finding(ComplianceFinding(
|
|
636
|
-
rule_id="DEP-001",
|
|
637
|
-
rule_name="Dependency vulnerabilities",
|
|
638
|
-
framework="SOC2",
|
|
639
|
-
control="CC6.1",
|
|
640
|
-
status=ComplianceStatus.FAILED,
|
|
641
|
-
severity=Severity.CRITICAL if critical > 0 else Severity.HIGH,
|
|
642
|
-
message=f"Found {critical} critical, {high} high vulnerabilities",
|
|
643
|
-
remediation="Run 'npm audit fix' or update vulnerable packages"
|
|
644
|
-
))
|
|
645
|
-
return
|
|
646
|
-
except Exception as e:
|
|
647
|
-
print(f"Audit error: {e}")
|
|
648
|
-
|
|
649
|
-
self.report.add_finding(ComplianceFinding(
|
|
650
|
-
rule_id="DEP-001",
|
|
651
|
-
rule_name="Dependency vulnerabilities",
|
|
652
|
-
framework="SOC2",
|
|
653
|
-
control="CC6.1",
|
|
654
|
-
status=ComplianceStatus.PASSED,
|
|
655
|
-
severity=Severity.HIGH,
|
|
656
|
-
message="No critical vulnerabilities detected"
|
|
657
|
-
))
|
|
658
|
-
|
|
659
|
-
def generate_evidence(self) -> str:
|
|
660
|
-
"""Generate compliance evidence document."""
|
|
661
|
-
report = self.report.to_dict()
|
|
662
|
-
|
|
663
|
-
doc = f"""# Compliance Evidence Report
|
|
664
|
-
|
|
665
|
-
**Generated**: {report['timestamp']}
|
|
666
|
-
**Repository**: {report['repository']}
|
|
667
|
-
**Commit**: {report['commit']}
|
|
668
|
-
**Overall Status**: {report['overall_status'].upper()}
|
|
669
|
-
|
|
670
|
-
## Summary
|
|
671
|
-
|
|
672
|
-
- Total Checks: {report['summary']['total']}
|
|
673
|
-
- Passed: {report['summary']['passed']}
|
|
674
|
-
- Failed: {report['summary']['failed']}
|
|
675
|
-
- Warnings: {report['summary']['warnings']}
|
|
676
|
-
|
|
677
|
-
## Findings
|
|
678
|
-
|
|
679
|
-
"""
|
|
680
|
-
for finding in report['findings']:
|
|
681
|
-
status_icon = {
|
|
682
|
-
'passed': '✅',
|
|
683
|
-
'failed': '❌',
|
|
684
|
-
'warning': '⚠️',
|
|
685
|
-
'skipped': '⏭️'
|
|
686
|
-
}.get(finding['status'], '❓')
|
|
687
|
-
|
|
688
|
-
doc += f"""### {status_icon} {finding['rule_id']}: {finding['rule_name']}
|
|
689
|
-
|
|
690
|
-
- **Framework**: {finding['framework']}
|
|
691
|
-
- **Control**: {finding['control']}
|
|
692
|
-
- **Severity**: {finding['severity']}
|
|
693
|
-
- **Status**: {finding['status']}
|
|
694
|
-
- **Message**: {finding['message']}
|
|
695
|
-
"""
|
|
696
|
-
if finding.get('evidence'):
|
|
697
|
-
doc += f"\n**Evidence**:\n```\n{finding['evidence']}\n```\n"
|
|
698
|
-
if finding.get('remediation'):
|
|
699
|
-
doc += f"\n**Remediation**: {finding['remediation']}\n"
|
|
700
|
-
doc += "\n---\n\n"
|
|
701
|
-
|
|
702
|
-
return doc
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
if __name__ == "__main__":
|
|
706
|
-
checker = ComplianceChecker()
|
|
707
|
-
report = checker.run_all_checks()
|
|
708
|
-
|
|
709
|
-
# Output JSON report
|
|
710
|
-
print(json.dumps(report.to_dict(), indent=2))
|
|
711
|
-
|
|
712
|
-
# Generate evidence document
|
|
713
|
-
evidence = checker.generate_evidence()
|
|
714
|
-
Path("compliance-evidence.md").write_text(evidence)
|
|
715
|
-
|
|
716
|
-
# Exit with failure if critical issues found
|
|
717
|
-
sys.exit(0 if report.overall_status == ComplianceStatus.PASSED else 1)
|
|
718
|
-
```
|
|
719
|
-
|
|
720
|
-
## Enterprise Integration
|
|
721
|
-
|
|
722
|
-
### Mandatory Connections
|
|
723
|
-
- **Security Architect (sa-*)**: Security control verification
|
|
724
|
-
- **Data Governance (dg-*)**: Data handling compliance
|
|
725
|
-
- **DevOps (do-09)**: CI/CD pipeline integration
|
|
726
|
-
|
|
727
|
-
### Recommended Connections
|
|
728
|
-
- **Code Review (cr-03)**: Quality gates for compliance
|
|
729
|
-
- **Platform Engineer (pe-*)**: Infrastructure compliance
|
|
730
|
-
|
|
731
|
-
## Best Practices
|
|
732
|
-
|
|
733
|
-
1. **Automate everything**: Manual compliance is unsustainable
|
|
734
|
-
2. **Fail fast**: Block non-compliant changes in CI/CD
|
|
735
|
-
3. **Evidence collection**: Maintain audit trails automatically
|
|
736
|
-
4. **Continuous monitoring**: Don't wait for audits
|
|
737
|
-
5. **Policy as code**: Version control your compliance rules
|
|
738
|
-
6. **Remediation guidance**: Always provide fix instructions
|
|
739
|
-
|
|
740
|
-
## Quick Reference
|
|
741
|
-
|
|
742
|
-
```bash
|
|
743
|
-
# In Claude Code
|
|
744
|
-
@compliance-automation "Set up SOC 2 compliance checks for our CI/CD"
|
|
745
|
-
@compliance-automation "Generate GDPR compliance evidence for audit"
|
|
746
|
-
@compliance-automation "Check license compliance for our dependencies"
|
|
747
|
-
```
|
|
1
|
+
# Compliance Automation
|
|
2
|
+
|
|
3
|
+
Automated compliance checking, audit trails, and regulatory requirement validation for enterprise software delivery.
|
|
4
|
+
|
|
5
|
+
## Role Overview
|
|
6
|
+
|
|
7
|
+
**Agent**: Compliance Automation Specialist
|
|
8
|
+
**Focus**: Regulatory compliance, audit trails, policy enforcement, certification readiness
|
|
9
|
+
**Skills**: Integrated compliance checking throughout the SDLC
|
|
10
|
+
|
|
11
|
+
## When to Use
|
|
12
|
+
|
|
13
|
+
Invoke this role when you need to:
|
|
14
|
+
- Automate compliance checks in CI/CD
|
|
15
|
+
- Generate audit trails and evidence
|
|
16
|
+
- Validate against regulatory frameworks (GDPR, SOC 2, HIPAA, PCI-DSS)
|
|
17
|
+
- Prepare for compliance audits
|
|
18
|
+
- Implement policy-as-code
|
|
19
|
+
|
|
20
|
+
## Compliance Frameworks Supported
|
|
21
|
+
|
|
22
|
+
| Framework | Focus | Key Requirements |
|
|
23
|
+
|-----------|-------|------------------|
|
|
24
|
+
| SOC 2 | Security Controls | Access control, encryption, monitoring |
|
|
25
|
+
| GDPR | Data Privacy | Consent, data rights, breach notification |
|
|
26
|
+
| HIPAA | Healthcare Data | PHI protection, access logs, encryption |
|
|
27
|
+
| PCI-DSS | Payment Data | Cardholder data protection, network security |
|
|
28
|
+
| ISO 27001 | InfoSec Management | Risk assessment, security controls |
|
|
29
|
+
| FedRAMP | Government Cloud | Security authorization, continuous monitoring |
|
|
30
|
+
|
|
31
|
+
## Compliance as Code
|
|
32
|
+
|
|
33
|
+
### Policy Engine Configuration
|
|
34
|
+
|
|
35
|
+
```yaml
|
|
36
|
+
# .compliance/policies.yml
|
|
37
|
+
version: 1
|
|
38
|
+
framework: soc2
|
|
39
|
+
|
|
40
|
+
policies:
|
|
41
|
+
# CC6.1 - Logical and Physical Access Controls
|
|
42
|
+
access-control:
|
|
43
|
+
enabled: true
|
|
44
|
+
rules:
|
|
45
|
+
- id: AC-001
|
|
46
|
+
name: "No hardcoded credentials"
|
|
47
|
+
check: secrets-scan
|
|
48
|
+
severity: critical
|
|
49
|
+
remediation: "Use secrets manager (Azure Key Vault, AWS Secrets Manager)"
|
|
50
|
+
|
|
51
|
+
- id: AC-002
|
|
52
|
+
name: "Enforce MFA"
|
|
53
|
+
check: iam-policy
|
|
54
|
+
severity: high
|
|
55
|
+
remediation: "Configure MFA for all user accounts"
|
|
56
|
+
|
|
57
|
+
- id: AC-003
|
|
58
|
+
name: "Principle of least privilege"
|
|
59
|
+
check: rbac-review
|
|
60
|
+
severity: high
|
|
61
|
+
remediation: "Review and minimize role permissions"
|
|
62
|
+
|
|
63
|
+
# CC6.7 - Encryption in Transit and at Rest
|
|
64
|
+
encryption:
|
|
65
|
+
enabled: true
|
|
66
|
+
rules:
|
|
67
|
+
- id: EN-001
|
|
68
|
+
name: "TLS 1.2+ required"
|
|
69
|
+
check: tls-version
|
|
70
|
+
severity: critical
|
|
71
|
+
remediation: "Upgrade to TLS 1.2 or higher"
|
|
72
|
+
|
|
73
|
+
- id: EN-002
|
|
74
|
+
name: "Data at rest encryption"
|
|
75
|
+
check: storage-encryption
|
|
76
|
+
severity: high
|
|
77
|
+
remediation: "Enable encryption for all storage resources"
|
|
78
|
+
|
|
79
|
+
# CC7.2 - Monitoring and Logging
|
|
80
|
+
monitoring:
|
|
81
|
+
enabled: true
|
|
82
|
+
rules:
|
|
83
|
+
- id: MO-001
|
|
84
|
+
name: "Audit logging enabled"
|
|
85
|
+
check: audit-logs
|
|
86
|
+
severity: high
|
|
87
|
+
remediation: "Enable audit logging for all services"
|
|
88
|
+
|
|
89
|
+
- id: MO-002
|
|
90
|
+
name: "Log retention >= 1 year"
|
|
91
|
+
check: log-retention
|
|
92
|
+
severity: medium
|
|
93
|
+
remediation: "Configure log retention to 365 days minimum"
|
|
94
|
+
|
|
95
|
+
# CC8.1 - Change Management
|
|
96
|
+
change-management:
|
|
97
|
+
enabled: true
|
|
98
|
+
rules:
|
|
99
|
+
- id: CM-001
|
|
100
|
+
name: "All changes via PR"
|
|
101
|
+
check: branch-protection
|
|
102
|
+
severity: high
|
|
103
|
+
remediation: "Enable branch protection with required reviews"
|
|
104
|
+
|
|
105
|
+
- id: CM-002
|
|
106
|
+
name: "Approved before deploy"
|
|
107
|
+
check: deployment-approvals
|
|
108
|
+
severity: high
|
|
109
|
+
remediation: "Configure environment protection rules"
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### GitHub Actions Compliance Workflow
|
|
113
|
+
|
|
114
|
+
```yaml
|
|
115
|
+
# .github/workflows/compliance-check.yml
|
|
116
|
+
name: Compliance Verification
|
|
117
|
+
on:
|
|
118
|
+
pull_request:
|
|
119
|
+
branches: [main]
|
|
120
|
+
push:
|
|
121
|
+
branches: [main]
|
|
122
|
+
schedule:
|
|
123
|
+
- cron: '0 0 * * 0' # Weekly audit
|
|
124
|
+
|
|
125
|
+
permissions:
|
|
126
|
+
contents: read
|
|
127
|
+
security-events: write
|
|
128
|
+
pull-requests: write
|
|
129
|
+
|
|
130
|
+
jobs:
|
|
131
|
+
soc2-controls:
|
|
132
|
+
name: SOC 2 Control Verification
|
|
133
|
+
runs-on: ubuntu-latest
|
|
134
|
+
steps:
|
|
135
|
+
- uses: actions/checkout@v4
|
|
136
|
+
|
|
137
|
+
# CC6.1 - Access Control
|
|
138
|
+
- name: Check for hardcoded secrets
|
|
139
|
+
uses: gitleaks/gitleaks-action@v2
|
|
140
|
+
env:
|
|
141
|
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
142
|
+
|
|
143
|
+
- name: Verify branch protection
|
|
144
|
+
uses: actions/github-script@v7
|
|
145
|
+
with:
|
|
146
|
+
script: |
|
|
147
|
+
const { data: protection } = await github.rest.repos.getBranchProtection({
|
|
148
|
+
owner: context.repo.owner,
|
|
149
|
+
repo: context.repo.repo,
|
|
150
|
+
branch: 'main'
|
|
151
|
+
}).catch(() => ({ data: null }));
|
|
152
|
+
|
|
153
|
+
const checks = {
|
|
154
|
+
'Required reviews': protection?.required_pull_request_reviews?.required_approving_review_count >= 1,
|
|
155
|
+
'Dismiss stale reviews': protection?.required_pull_request_reviews?.dismiss_stale_reviews,
|
|
156
|
+
'Require status checks': protection?.required_status_checks?.strict,
|
|
157
|
+
'Enforce admins': protection?.enforce_admins?.enabled
|
|
158
|
+
};
|
|
159
|
+
|
|
160
|
+
let passed = true;
|
|
161
|
+
for (const [check, result] of Object.entries(checks)) {
|
|
162
|
+
console.log(`${check}: ${result ? '✅' : '❌'}`);
|
|
163
|
+
if (!result) passed = false;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
if (!passed) {
|
|
167
|
+
core.setFailed('Branch protection does not meet SOC 2 requirements');
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
# CC6.7 - Encryption verification
|
|
171
|
+
- name: Check TLS configuration
|
|
172
|
+
run: |
|
|
173
|
+
# Verify TLS version in code
|
|
174
|
+
if grep -r "ssl_version.*SSLv" --include="*.py" .; then
|
|
175
|
+
echo "::error::SSLv2/v3 detected - upgrade to TLS 1.2+"
|
|
176
|
+
exit 1
|
|
177
|
+
fi
|
|
178
|
+
|
|
179
|
+
# CC7.2 - Logging verification
|
|
180
|
+
- name: Verify logging configuration
|
|
181
|
+
run: |
|
|
182
|
+
# Check for logging configuration
|
|
183
|
+
if ! grep -r "logging\|logger\|audit" --include="*.py" --include="*.ts" .; then
|
|
184
|
+
echo "::warning::No logging configuration detected"
|
|
185
|
+
fi
|
|
186
|
+
|
|
187
|
+
# Generate compliance evidence
|
|
188
|
+
- name: Generate compliance report
|
|
189
|
+
run: |
|
|
190
|
+
mkdir -p compliance-evidence
|
|
191
|
+
echo "# SOC 2 Compliance Evidence" > compliance-evidence/report.md
|
|
192
|
+
echo "Generated: $(date -u +"%Y-%m-%dT%H:%M:%SZ")" >> compliance-evidence/report.md
|
|
193
|
+
echo "Commit: ${{ github.sha }}" >> compliance-evidence/report.md
|
|
194
|
+
echo "" >> compliance-evidence/report.md
|
|
195
|
+
echo "## Control Verification" >> compliance-evidence/report.md
|
|
196
|
+
echo "- CC6.1 Access Control: VERIFIED" >> compliance-evidence/report.md
|
|
197
|
+
echo "- CC6.7 Encryption: VERIFIED" >> compliance-evidence/report.md
|
|
198
|
+
echo "- CC7.2 Monitoring: VERIFIED" >> compliance-evidence/report.md
|
|
199
|
+
echo "- CC8.1 Change Management: VERIFIED" >> compliance-evidence/report.md
|
|
200
|
+
|
|
201
|
+
- name: Upload compliance evidence
|
|
202
|
+
uses: actions/upload-artifact@v4
|
|
203
|
+
with:
|
|
204
|
+
name: compliance-evidence-${{ github.sha }}
|
|
205
|
+
path: compliance-evidence/
|
|
206
|
+
retention-days: 365 # Keep for audit purposes
|
|
207
|
+
|
|
208
|
+
gdpr-checks:
|
|
209
|
+
name: GDPR Compliance Checks
|
|
210
|
+
runs-on: ubuntu-latest
|
|
211
|
+
steps:
|
|
212
|
+
- uses: actions/checkout@v4
|
|
213
|
+
|
|
214
|
+
- name: Check for PII handling
|
|
215
|
+
run: |
|
|
216
|
+
# Scan for potential PII fields without proper handling
|
|
217
|
+
PII_PATTERNS="email|phone|address|ssn|social.security|credit.card|password"
|
|
218
|
+
FINDINGS=$(grep -rn -E "$PII_PATTERNS" --include="*.ts" --include="*.py" --include="*.js" . || true)
|
|
219
|
+
|
|
220
|
+
if [ -n "$FINDINGS" ]; then
|
|
221
|
+
echo "::warning::Potential PII fields detected - ensure proper handling"
|
|
222
|
+
echo "$FINDINGS"
|
|
223
|
+
fi
|
|
224
|
+
|
|
225
|
+
- name: Verify data retention policies
|
|
226
|
+
run: |
|
|
227
|
+
# Check for data retention configuration
|
|
228
|
+
if ! grep -r "retention\|delete.*days\|ttl\|expir" --include="*.yml" --include="*.yaml" --include="*.json" .; then
|
|
229
|
+
echo "::warning::No data retention policy configuration detected"
|
|
230
|
+
fi
|
|
231
|
+
|
|
232
|
+
- name: Check consent management
|
|
233
|
+
run: |
|
|
234
|
+
# Look for consent handling code
|
|
235
|
+
if grep -r "consent\|gdpr\|opt.in\|opt.out" --include="*.ts" --include="*.py" .; then
|
|
236
|
+
echo "Consent management code detected"
|
|
237
|
+
else
|
|
238
|
+
echo "::warning::No consent management code detected"
|
|
239
|
+
fi
|
|
240
|
+
|
|
241
|
+
license-compliance:
|
|
242
|
+
name: License Compliance
|
|
243
|
+
runs-on: ubuntu-latest
|
|
244
|
+
steps:
|
|
245
|
+
- uses: actions/checkout@v4
|
|
246
|
+
|
|
247
|
+
- name: Setup Node.js
|
|
248
|
+
uses: actions/setup-node@v4
|
|
249
|
+
with:
|
|
250
|
+
node-version: '20'
|
|
251
|
+
|
|
252
|
+
- name: Install dependencies
|
|
253
|
+
run: npm ci
|
|
254
|
+
|
|
255
|
+
- name: Check licenses
|
|
256
|
+
run: |
|
|
257
|
+
npx license-checker --json --out licenses.json
|
|
258
|
+
|
|
259
|
+
# Define prohibited licenses
|
|
260
|
+
PROHIBITED="GPL|AGPL|SSPL|BUSL"
|
|
261
|
+
|
|
262
|
+
# Check for prohibited licenses
|
|
263
|
+
VIOLATIONS=$(jq -r 'to_entries[] | select(.value.licenses | test("'"$PROHIBITED"'")) | "\(.key): \(.value.licenses)"' licenses.json || true)
|
|
264
|
+
|
|
265
|
+
if [ -n "$VIOLATIONS" ]; then
|
|
266
|
+
echo "::error::Prohibited license detected:"
|
|
267
|
+
echo "$VIOLATIONS"
|
|
268
|
+
exit 1
|
|
269
|
+
fi
|
|
270
|
+
|
|
271
|
+
- name: Generate SBOM
|
|
272
|
+
run: |
|
|
273
|
+
npx @cyclonedx/cyclonedx-npm --output-file sbom.json
|
|
274
|
+
|
|
275
|
+
- name: Upload SBOM
|
|
276
|
+
uses: actions/upload-artifact@v4
|
|
277
|
+
with:
|
|
278
|
+
name: sbom
|
|
279
|
+
path: sbom.json
|
|
280
|
+
|
|
281
|
+
audit-trail:
|
|
282
|
+
name: Generate Audit Trail
|
|
283
|
+
runs-on: ubuntu-latest
|
|
284
|
+
needs: [soc2-controls, gdpr-checks, license-compliance]
|
|
285
|
+
steps:
|
|
286
|
+
- uses: actions/checkout@v4
|
|
287
|
+
|
|
288
|
+
- name: Generate audit entry
|
|
289
|
+
run: |
|
|
290
|
+
cat > audit-entry.json << EOF
|
|
291
|
+
{
|
|
292
|
+
"timestamp": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")",
|
|
293
|
+
"event": "compliance_verification",
|
|
294
|
+
"repository": "${{ github.repository }}",
|
|
295
|
+
"commit": "${{ github.sha }}",
|
|
296
|
+
"actor": "${{ github.actor }}",
|
|
297
|
+
"workflow_run": "${{ github.run_id }}",
|
|
298
|
+
"results": {
|
|
299
|
+
"soc2": "PASSED",
|
|
300
|
+
"gdpr": "PASSED",
|
|
301
|
+
"license": "PASSED"
|
|
302
|
+
},
|
|
303
|
+
"evidence_location": "compliance-evidence-${{ github.sha }}"
|
|
304
|
+
}
|
|
305
|
+
EOF
|
|
306
|
+
|
|
307
|
+
- name: Upload audit entry
|
|
308
|
+
uses: actions/upload-artifact@v4
|
|
309
|
+
with:
|
|
310
|
+
name: audit-trail
|
|
311
|
+
path: audit-entry.json
|
|
312
|
+
```
|
|
313
|
+
|
|
314
|
+
### Compliance Checker Script
|
|
315
|
+
|
|
316
|
+
```python
|
|
317
|
+
#!/usr/bin/env python3
|
|
318
|
+
"""Enterprise compliance checker for CI/CD integration."""
|
|
319
|
+
|
|
320
|
+
import json
|
|
321
|
+
import os
|
|
322
|
+
import re
|
|
323
|
+
import subprocess
|
|
324
|
+
import sys
|
|
325
|
+
from dataclasses import dataclass, field
|
|
326
|
+
from datetime import datetime
|
|
327
|
+
from enum import Enum
|
|
328
|
+
from pathlib import Path
|
|
329
|
+
from typing import List, Dict, Optional, Any
|
|
330
|
+
|
|
331
|
+
class Severity(Enum):
|
|
332
|
+
CRITICAL = "critical"
|
|
333
|
+
HIGH = "high"
|
|
334
|
+
MEDIUM = "medium"
|
|
335
|
+
LOW = "low"
|
|
336
|
+
INFO = "info"
|
|
337
|
+
|
|
338
|
+
class ComplianceStatus(Enum):
|
|
339
|
+
PASSED = "passed"
|
|
340
|
+
FAILED = "failed"
|
|
341
|
+
WARNING = "warning"
|
|
342
|
+
SKIPPED = "skipped"
|
|
343
|
+
|
|
344
|
+
@dataclass
|
|
345
|
+
class ComplianceFinding:
|
|
346
|
+
"""A compliance check finding."""
|
|
347
|
+
rule_id: str
|
|
348
|
+
rule_name: str
|
|
349
|
+
framework: str
|
|
350
|
+
control: str
|
|
351
|
+
status: ComplianceStatus
|
|
352
|
+
severity: Severity
|
|
353
|
+
message: str
|
|
354
|
+
evidence: Optional[str] = None
|
|
355
|
+
remediation: Optional[str] = None
|
|
356
|
+
|
|
357
|
+
@dataclass
|
|
358
|
+
class ComplianceReport:
|
|
359
|
+
"""Complete compliance report."""
|
|
360
|
+
timestamp: str
|
|
361
|
+
repository: str
|
|
362
|
+
commit: str
|
|
363
|
+
frameworks: List[str]
|
|
364
|
+
findings: List[ComplianceFinding] = field(default_factory=list)
|
|
365
|
+
overall_status: ComplianceStatus = ComplianceStatus.PASSED
|
|
366
|
+
|
|
367
|
+
def add_finding(self, finding: ComplianceFinding) -> None:
|
|
368
|
+
"""Add a finding and update overall status."""
|
|
369
|
+
self.findings.append(finding)
|
|
370
|
+
if finding.status == ComplianceStatus.FAILED:
|
|
371
|
+
if finding.severity in (Severity.CRITICAL, Severity.HIGH):
|
|
372
|
+
self.overall_status = ComplianceStatus.FAILED
|
|
373
|
+
|
|
374
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
375
|
+
"""Convert to dictionary for JSON serialization."""
|
|
376
|
+
return {
|
|
377
|
+
"timestamp": self.timestamp,
|
|
378
|
+
"repository": self.repository,
|
|
379
|
+
"commit": self.commit,
|
|
380
|
+
"frameworks": self.frameworks,
|
|
381
|
+
"overall_status": self.overall_status.value,
|
|
382
|
+
"summary": {
|
|
383
|
+
"total": len(self.findings),
|
|
384
|
+
"passed": len([f for f in self.findings if f.status == ComplianceStatus.PASSED]),
|
|
385
|
+
"failed": len([f for f in self.findings if f.status == ComplianceStatus.FAILED]),
|
|
386
|
+
"warnings": len([f for f in self.findings if f.status == ComplianceStatus.WARNING]),
|
|
387
|
+
},
|
|
388
|
+
"findings": [
|
|
389
|
+
{
|
|
390
|
+
"rule_id": f.rule_id,
|
|
391
|
+
"rule_name": f.rule_name,
|
|
392
|
+
"framework": f.framework,
|
|
393
|
+
"control": f.control,
|
|
394
|
+
"status": f.status.value,
|
|
395
|
+
"severity": f.severity.value,
|
|
396
|
+
"message": f.message,
|
|
397
|
+
"evidence": f.evidence,
|
|
398
|
+
"remediation": f.remediation,
|
|
399
|
+
}
|
|
400
|
+
for f in self.findings
|
|
401
|
+
]
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
class ComplianceChecker:
|
|
406
|
+
"""Enterprise compliance checker."""
|
|
407
|
+
|
|
408
|
+
def __init__(self, project_path: str = "."):
|
|
409
|
+
self.project_path = Path(project_path)
|
|
410
|
+
self.report = ComplianceReport(
|
|
411
|
+
timestamp=datetime.utcnow().isoformat() + "Z",
|
|
412
|
+
repository=os.getenv("GITHUB_REPOSITORY", "unknown"),
|
|
413
|
+
commit=os.getenv("GITHUB_SHA", "unknown"),
|
|
414
|
+
frameworks=["SOC2", "GDPR", "LICENSE"]
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
def run_all_checks(self) -> ComplianceReport:
|
|
418
|
+
"""Run all compliance checks."""
|
|
419
|
+
self.check_secrets()
|
|
420
|
+
self.check_encryption()
|
|
421
|
+
self.check_logging()
|
|
422
|
+
self.check_pii_handling()
|
|
423
|
+
self.check_licenses()
|
|
424
|
+
self.check_dependencies()
|
|
425
|
+
return self.report
|
|
426
|
+
|
|
427
|
+
def check_secrets(self) -> None:
|
|
428
|
+
"""Check for hardcoded secrets (SOC 2 CC6.1)."""
|
|
429
|
+
secret_patterns = [
|
|
430
|
+
r'password\s*=\s*["\'][^"\']+["\']',
|
|
431
|
+
r'api[_-]?key\s*=\s*["\'][^"\']+["\']',
|
|
432
|
+
r'secret\s*=\s*["\'][^"\']+["\']',
|
|
433
|
+
r'token\s*=\s*["\'][^"\']+["\']',
|
|
434
|
+
r'-----BEGIN\s+(?:RSA\s+)?PRIVATE\s+KEY-----',
|
|
435
|
+
]
|
|
436
|
+
|
|
437
|
+
findings = []
|
|
438
|
+
for pattern in secret_patterns:
|
|
439
|
+
for file in self.project_path.rglob("*"):
|
|
440
|
+
if file.is_file() and file.suffix in (".py", ".js", ".ts", ".yaml", ".yml", ".json"):
|
|
441
|
+
try:
|
|
442
|
+
content = file.read_text(errors="ignore")
|
|
443
|
+
matches = re.findall(pattern, content, re.IGNORECASE)
|
|
444
|
+
if matches:
|
|
445
|
+
findings.append(f"{file}: {len(matches)} potential secrets")
|
|
446
|
+
except Exception:
|
|
447
|
+
pass
|
|
448
|
+
|
|
449
|
+
if findings:
|
|
450
|
+
self.report.add_finding(ComplianceFinding(
|
|
451
|
+
rule_id="AC-001",
|
|
452
|
+
rule_name="No hardcoded credentials",
|
|
453
|
+
framework="SOC2",
|
|
454
|
+
control="CC6.1",
|
|
455
|
+
status=ComplianceStatus.FAILED,
|
|
456
|
+
severity=Severity.CRITICAL,
|
|
457
|
+
message=f"Found {len(findings)} files with potential secrets",
|
|
458
|
+
evidence="\n".join(findings[:5]),
|
|
459
|
+
remediation="Use environment variables or secrets manager"
|
|
460
|
+
))
|
|
461
|
+
else:
|
|
462
|
+
self.report.add_finding(ComplianceFinding(
|
|
463
|
+
rule_id="AC-001",
|
|
464
|
+
rule_name="No hardcoded credentials",
|
|
465
|
+
framework="SOC2",
|
|
466
|
+
control="CC6.1",
|
|
467
|
+
status=ComplianceStatus.PASSED,
|
|
468
|
+
severity=Severity.CRITICAL,
|
|
469
|
+
message="No hardcoded credentials detected"
|
|
470
|
+
))
|
|
471
|
+
|
|
472
|
+
def check_encryption(self) -> None:
|
|
473
|
+
"""Check encryption configuration (SOC 2 CC6.7)."""
|
|
474
|
+
insecure_patterns = [
|
|
475
|
+
r'ssl_version.*SSLv[23]',
|
|
476
|
+
r'TLSv1[^.]',
|
|
477
|
+
r'MD5|SHA1',
|
|
478
|
+
r'DES|RC4',
|
|
479
|
+
]
|
|
480
|
+
|
|
481
|
+
issues = []
|
|
482
|
+
for file in self.project_path.rglob("*.py"):
|
|
483
|
+
try:
|
|
484
|
+
content = file.read_text(errors="ignore")
|
|
485
|
+
for pattern in insecure_patterns:
|
|
486
|
+
if re.search(pattern, content):
|
|
487
|
+
issues.append(f"{file}: Insecure crypto detected")
|
|
488
|
+
except Exception:
|
|
489
|
+
pass
|
|
490
|
+
|
|
491
|
+
if issues:
|
|
492
|
+
self.report.add_finding(ComplianceFinding(
|
|
493
|
+
rule_id="EN-001",
|
|
494
|
+
rule_name="Secure encryption",
|
|
495
|
+
framework="SOC2",
|
|
496
|
+
control="CC6.7",
|
|
497
|
+
status=ComplianceStatus.FAILED,
|
|
498
|
+
severity=Severity.HIGH,
|
|
499
|
+
message=f"Found {len(issues)} insecure encryption usages",
|
|
500
|
+
evidence="\n".join(issues[:5]),
|
|
501
|
+
remediation="Use TLS 1.2+, AES-256, SHA-256 or stronger"
|
|
502
|
+
))
|
|
503
|
+
else:
|
|
504
|
+
self.report.add_finding(ComplianceFinding(
|
|
505
|
+
rule_id="EN-001",
|
|
506
|
+
rule_name="Secure encryption",
|
|
507
|
+
framework="SOC2",
|
|
508
|
+
control="CC6.7",
|
|
509
|
+
status=ComplianceStatus.PASSED,
|
|
510
|
+
severity=Severity.HIGH,
|
|
511
|
+
message="Encryption configuration appears secure"
|
|
512
|
+
))
|
|
513
|
+
|
|
514
|
+
def check_logging(self) -> None:
|
|
515
|
+
"""Check logging configuration (SOC 2 CC7.2)."""
|
|
516
|
+
has_logging = False
|
|
517
|
+
for file in self.project_path.rglob("*.py"):
|
|
518
|
+
try:
|
|
519
|
+
content = file.read_text(errors="ignore")
|
|
520
|
+
if re.search(r'import\s+logging|from\s+logging', content):
|
|
521
|
+
has_logging = True
|
|
522
|
+
break
|
|
523
|
+
except Exception:
|
|
524
|
+
pass
|
|
525
|
+
|
|
526
|
+
self.report.add_finding(ComplianceFinding(
|
|
527
|
+
rule_id="MO-001",
|
|
528
|
+
rule_name="Audit logging enabled",
|
|
529
|
+
framework="SOC2",
|
|
530
|
+
control="CC7.2",
|
|
531
|
+
status=ComplianceStatus.PASSED if has_logging else ComplianceStatus.WARNING,
|
|
532
|
+
severity=Severity.HIGH,
|
|
533
|
+
message="Logging configuration found" if has_logging else "No logging detected",
|
|
534
|
+
remediation=None if has_logging else "Implement structured logging"
|
|
535
|
+
))
|
|
536
|
+
|
|
537
|
+
def check_pii_handling(self) -> None:
|
|
538
|
+
"""Check for PII handling (GDPR)."""
|
|
539
|
+
pii_fields = [
|
|
540
|
+
"email", "phone", "address", "ssn", "date_of_birth",
|
|
541
|
+
"credit_card", "passport", "driver_license"
|
|
542
|
+
]
|
|
543
|
+
|
|
544
|
+
pii_locations = []
|
|
545
|
+
for file in self.project_path.rglob("*"):
|
|
546
|
+
if file.suffix in (".py", ".ts", ".js"):
|
|
547
|
+
try:
|
|
548
|
+
content = file.read_text(errors="ignore")
|
|
549
|
+
for field in pii_fields:
|
|
550
|
+
if re.search(rf'\b{field}\b', content, re.IGNORECASE):
|
|
551
|
+
pii_locations.append(f"{file}: {field}")
|
|
552
|
+
except Exception:
|
|
553
|
+
pass
|
|
554
|
+
|
|
555
|
+
if pii_locations:
|
|
556
|
+
self.report.add_finding(ComplianceFinding(
|
|
557
|
+
rule_id="GDPR-001",
|
|
558
|
+
rule_name="PII data handling",
|
|
559
|
+
framework="GDPR",
|
|
560
|
+
control="Article 5",
|
|
561
|
+
status=ComplianceStatus.WARNING,
|
|
562
|
+
severity=Severity.HIGH,
|
|
563
|
+
message=f"Found {len(pii_locations)} potential PII fields",
|
|
564
|
+
evidence="\n".join(pii_locations[:10]),
|
|
565
|
+
remediation="Ensure PII is encrypted, has retention policy, and consent is captured"
|
|
566
|
+
))
|
|
567
|
+
|
|
568
|
+
def check_licenses(self) -> None:
|
|
569
|
+
"""Check dependency licenses."""
|
|
570
|
+
prohibited = ["GPL", "AGPL", "SSPL"]
|
|
571
|
+
|
|
572
|
+
# Run license check if package.json exists
|
|
573
|
+
package_json = self.project_path / "package.json"
|
|
574
|
+
if package_json.exists():
|
|
575
|
+
try:
|
|
576
|
+
result = subprocess.run(
|
|
577
|
+
["npx", "license-checker", "--json"],
|
|
578
|
+
capture_output=True,
|
|
579
|
+
text=True,
|
|
580
|
+
cwd=self.project_path,
|
|
581
|
+
timeout=60
|
|
582
|
+
)
|
|
583
|
+
if result.returncode == 0:
|
|
584
|
+
licenses = json.loads(result.stdout)
|
|
585
|
+
violations = [
|
|
586
|
+
f"{pkg}: {info.get('licenses', 'unknown')}"
|
|
587
|
+
for pkg, info in licenses.items()
|
|
588
|
+
if any(p in str(info.get('licenses', '')) for p in prohibited)
|
|
589
|
+
]
|
|
590
|
+
|
|
591
|
+
if violations:
|
|
592
|
+
self.report.add_finding(ComplianceFinding(
|
|
593
|
+
rule_id="LIC-001",
|
|
594
|
+
rule_name="License compliance",
|
|
595
|
+
framework="LICENSE",
|
|
596
|
+
control="OSS Policy",
|
|
597
|
+
status=ComplianceStatus.FAILED,
|
|
598
|
+
severity=Severity.HIGH,
|
|
599
|
+
message=f"Found {len(violations)} prohibited licenses",
|
|
600
|
+
evidence="\n".join(violations[:5]),
|
|
601
|
+
remediation="Replace dependencies with permissively licensed alternatives"
|
|
602
|
+
))
|
|
603
|
+
return
|
|
604
|
+
except Exception as e:
|
|
605
|
+
print(f"License check error: {e}")
|
|
606
|
+
|
|
607
|
+
self.report.add_finding(ComplianceFinding(
|
|
608
|
+
rule_id="LIC-001",
|
|
609
|
+
rule_name="License compliance",
|
|
610
|
+
framework="LICENSE",
|
|
611
|
+
control="OSS Policy",
|
|
612
|
+
status=ComplianceStatus.PASSED,
|
|
613
|
+
severity=Severity.HIGH,
|
|
614
|
+
message="No prohibited licenses detected"
|
|
615
|
+
))
|
|
616
|
+
|
|
617
|
+
def check_dependencies(self) -> None:
|
|
618
|
+
"""Check for vulnerable dependencies."""
|
|
619
|
+
package_json = self.project_path / "package.json"
|
|
620
|
+
if package_json.exists():
|
|
621
|
+
try:
|
|
622
|
+
result = subprocess.run(
|
|
623
|
+
["npm", "audit", "--json"],
|
|
624
|
+
capture_output=True,
|
|
625
|
+
text=True,
|
|
626
|
+
cwd=self.project_path,
|
|
627
|
+
timeout=120
|
|
628
|
+
)
|
|
629
|
+
audit = json.loads(result.stdout)
|
|
630
|
+
vulnerabilities = audit.get("metadata", {}).get("vulnerabilities", {})
|
|
631
|
+
critical = vulnerabilities.get("critical", 0)
|
|
632
|
+
high = vulnerabilities.get("high", 0)
|
|
633
|
+
|
|
634
|
+
if critical > 0 or high > 0:
|
|
635
|
+
self.report.add_finding(ComplianceFinding(
|
|
636
|
+
rule_id="DEP-001",
|
|
637
|
+
rule_name="Dependency vulnerabilities",
|
|
638
|
+
framework="SOC2",
|
|
639
|
+
control="CC6.1",
|
|
640
|
+
status=ComplianceStatus.FAILED,
|
|
641
|
+
severity=Severity.CRITICAL if critical > 0 else Severity.HIGH,
|
|
642
|
+
message=f"Found {critical} critical, {high} high vulnerabilities",
|
|
643
|
+
remediation="Run 'npm audit fix' or update vulnerable packages"
|
|
644
|
+
))
|
|
645
|
+
return
|
|
646
|
+
except Exception as e:
|
|
647
|
+
print(f"Audit error: {e}")
|
|
648
|
+
|
|
649
|
+
self.report.add_finding(ComplianceFinding(
|
|
650
|
+
rule_id="DEP-001",
|
|
651
|
+
rule_name="Dependency vulnerabilities",
|
|
652
|
+
framework="SOC2",
|
|
653
|
+
control="CC6.1",
|
|
654
|
+
status=ComplianceStatus.PASSED,
|
|
655
|
+
severity=Severity.HIGH,
|
|
656
|
+
message="No critical vulnerabilities detected"
|
|
657
|
+
))
|
|
658
|
+
|
|
659
|
+
def generate_evidence(self) -> str:
|
|
660
|
+
"""Generate compliance evidence document."""
|
|
661
|
+
report = self.report.to_dict()
|
|
662
|
+
|
|
663
|
+
doc = f"""# Compliance Evidence Report
|
|
664
|
+
|
|
665
|
+
**Generated**: {report['timestamp']}
|
|
666
|
+
**Repository**: {report['repository']}
|
|
667
|
+
**Commit**: {report['commit']}
|
|
668
|
+
**Overall Status**: {report['overall_status'].upper()}
|
|
669
|
+
|
|
670
|
+
## Summary
|
|
671
|
+
|
|
672
|
+
- Total Checks: {report['summary']['total']}
|
|
673
|
+
- Passed: {report['summary']['passed']}
|
|
674
|
+
- Failed: {report['summary']['failed']}
|
|
675
|
+
- Warnings: {report['summary']['warnings']}
|
|
676
|
+
|
|
677
|
+
## Findings
|
|
678
|
+
|
|
679
|
+
"""
|
|
680
|
+
for finding in report['findings']:
|
|
681
|
+
status_icon = {
|
|
682
|
+
'passed': '✅',
|
|
683
|
+
'failed': '❌',
|
|
684
|
+
'warning': '⚠️',
|
|
685
|
+
'skipped': '⏭️'
|
|
686
|
+
}.get(finding['status'], '❓')
|
|
687
|
+
|
|
688
|
+
doc += f"""### {status_icon} {finding['rule_id']}: {finding['rule_name']}
|
|
689
|
+
|
|
690
|
+
- **Framework**: {finding['framework']}
|
|
691
|
+
- **Control**: {finding['control']}
|
|
692
|
+
- **Severity**: {finding['severity']}
|
|
693
|
+
- **Status**: {finding['status']}
|
|
694
|
+
- **Message**: {finding['message']}
|
|
695
|
+
"""
|
|
696
|
+
if finding.get('evidence'):
|
|
697
|
+
doc += f"\n**Evidence**:\n```\n{finding['evidence']}\n```\n"
|
|
698
|
+
if finding.get('remediation'):
|
|
699
|
+
doc += f"\n**Remediation**: {finding['remediation']}\n"
|
|
700
|
+
doc += "\n---\n\n"
|
|
701
|
+
|
|
702
|
+
return doc
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
if __name__ == "__main__":
|
|
706
|
+
checker = ComplianceChecker()
|
|
707
|
+
report = checker.run_all_checks()
|
|
708
|
+
|
|
709
|
+
# Output JSON report
|
|
710
|
+
print(json.dumps(report.to_dict(), indent=2))
|
|
711
|
+
|
|
712
|
+
# Generate evidence document
|
|
713
|
+
evidence = checker.generate_evidence()
|
|
714
|
+
Path("compliance-evidence.md").write_text(evidence)
|
|
715
|
+
|
|
716
|
+
# Exit with failure if critical issues found
|
|
717
|
+
sys.exit(0 if report.overall_status == ComplianceStatus.PASSED else 1)
|
|
718
|
+
```
|
|
719
|
+
|
|
720
|
+
## Enterprise Integration
|
|
721
|
+
|
|
722
|
+
### Mandatory Connections
|
|
723
|
+
- **Security Architect (sa-*)**: Security control verification
|
|
724
|
+
- **Data Governance (dg-*)**: Data handling compliance
|
|
725
|
+
- **DevOps (do-09)**: CI/CD pipeline integration
|
|
726
|
+
|
|
727
|
+
### Recommended Connections
|
|
728
|
+
- **Code Review (cr-03)**: Quality gates for compliance
|
|
729
|
+
- **Platform Engineer (pe-*)**: Infrastructure compliance
|
|
730
|
+
|
|
731
|
+
## Best Practices
|
|
732
|
+
|
|
733
|
+
1. **Automate everything**: Manual compliance is unsustainable
|
|
734
|
+
2. **Fail fast**: Block non-compliant changes in CI/CD
|
|
735
|
+
3. **Evidence collection**: Maintain audit trails automatically
|
|
736
|
+
4. **Continuous monitoring**: Don't wait for audits
|
|
737
|
+
5. **Policy as code**: Version control your compliance rules
|
|
738
|
+
6. **Remediation guidance**: Always provide fix instructions
|
|
739
|
+
|
|
740
|
+
## Quick Reference
|
|
741
|
+
|
|
742
|
+
```bash
|
|
743
|
+
# In Claude Code
|
|
744
|
+
@compliance-automation "Set up SOC 2 compliance checks for our CI/CD"
|
|
745
|
+
@compliance-automation "Generate GDPR compliance evidence for audit"
|
|
746
|
+
@compliance-automation "Check license compliance for our dependencies"
|
|
747
|
+
```
|