federated-graph 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- federated_graph-0.1.0/.gitignore +18 -0
- federated_graph-0.1.0/LICENSE +21 -0
- federated_graph-0.1.0/PKG-INFO +135 -0
- federated_graph-0.1.0/README.md +82 -0
- federated_graph-0.1.0/pyproject.toml +60 -0
- federated_graph-0.1.0/src/federated_graph/__init__.py +6 -0
- federated_graph-0.1.0/src/federated_graph/_version.py +1 -0
- federated_graph-0.1.0/src/federated_graph/aggregation/__init__.py +4 -0
- federated_graph-0.1.0/src/federated_graph/aggregation/base.py +13 -0
- federated_graph-0.1.0/src/federated_graph/aggregation/fedavg.py +23 -0
- federated_graph-0.1.0/src/federated_graph/aggregation/robust.py +162 -0
- federated_graph-0.1.0/src/federated_graph/aggregation/trust.py +49 -0
- federated_graph-0.1.0/src/federated_graph/compliance/__init__.py +4 -0
- federated_graph-0.1.0/src/federated_graph/compliance/audit.py +64 -0
- federated_graph-0.1.0/src/federated_graph/compliance/metrics.py +68 -0
- federated_graph-0.1.0/src/federated_graph/compliance/sar.py +83 -0
- federated_graph-0.1.0/src/federated_graph/compliance/serialization.py +57 -0
- federated_graph-0.1.0/src/federated_graph/config.py +92 -0
- federated_graph-0.1.0/src/federated_graph/features/__init__.py +5 -0
- federated_graph-0.1.0/src/federated_graph/features/edge_features.py +33 -0
- federated_graph-0.1.0/src/federated_graph/features/missing.py +42 -0
- federated_graph-0.1.0/src/federated_graph/features/node_features.py +48 -0
- federated_graph-0.1.0/src/federated_graph/features/registry.py +28 -0
- federated_graph-0.1.0/src/federated_graph/features/windows.py +44 -0
- federated_graph-0.1.0/src/federated_graph/federation/__init__.py +5 -0
- federated_graph-0.1.0/src/federated_graph/federation/client.py +50 -0
- federated_graph-0.1.0/src/federated_graph/federation/comm/__init__.py +2 -0
- federated_graph-0.1.0/src/federated_graph/federation/comm/base.py +28 -0
- federated_graph-0.1.0/src/federated_graph/federation/comm/grpc_backend.py +37 -0
- federated_graph-0.1.0/src/federated_graph/federation/comm/http_backend.py +25 -0
- federated_graph-0.1.0/src/federated_graph/federation/comm/inprocess.py +45 -0
- federated_graph-0.1.0/src/federated_graph/federation/protocol.py +45 -0
- federated_graph-0.1.0/src/federated_graph/federation/serialization.py +45 -0
- federated_graph-0.1.0/src/federated_graph/federation/server.py +93 -0
- federated_graph-0.1.0/src/federated_graph/federation/update.py +41 -0
- federated_graph-0.1.0/src/federated_graph/graph/__init__.py +16 -0
- federated_graph-0.1.0/src/federated_graph/graph/builder.py +110 -0
- federated_graph-0.1.0/src/federated_graph/graph/io.py +37 -0
- federated_graph-0.1.0/src/federated_graph/graph/schema.py +37 -0
- federated_graph-0.1.0/src/federated_graph/graph/temporal.py +42 -0
- federated_graph-0.1.0/src/federated_graph/models/__init__.py +4 -0
- federated_graph-0.1.0/src/federated_graph/models/encoder.py +51 -0
- federated_graph-0.1.0/src/federated_graph/models/heads.py +51 -0
- federated_graph-0.1.0/src/federated_graph/models/layers.py +102 -0
- federated_graph-0.1.0/src/federated_graph/models/model.py +62 -0
- federated_graph-0.1.0/src/federated_graph/models/utils.py +24 -0
- federated_graph-0.1.0/src/federated_graph/scoring/__init__.py +4 -0
- federated_graph-0.1.0/src/federated_graph/scoring/cold_start.py +55 -0
- federated_graph-0.1.0/src/federated_graph/scoring/embeddings.py +84 -0
- federated_graph-0.1.0/src/federated_graph/scoring/scorer.py +71 -0
- federated_graph-0.1.0/src/federated_graph/scoring/store/__init__.py +2 -0
- federated_graph-0.1.0/src/federated_graph/scoring/store/base.py +32 -0
- federated_graph-0.1.0/src/federated_graph/scoring/store/memory.py +28 -0
- federated_graph-0.1.0/src/federated_graph/scoring/store/redis.py +51 -0
- federated_graph-0.1.0/src/federated_graph/security/__init__.py +3 -0
- federated_graph-0.1.0/src/federated_graph/security/identity.py +46 -0
- federated_graph-0.1.0/src/federated_graph/security/secure_agg.py +50 -0
- federated_graph-0.1.0/src/federated_graph/security/verification.py +22 -0
- federated_graph-0.1.0/src/federated_graph/sentinel/__init__.py +5 -0
- federated_graph-0.1.0/src/federated_graph/sentinel/adjudicator.py +96 -0
- federated_graph-0.1.0/src/federated_graph/sentinel/canary.py +99 -0
- federated_graph-0.1.0/src/federated_graph/sentinel/conformance.py +48 -0
- federated_graph-0.1.0/src/federated_graph/sentinel/contract.py +48 -0
- federated_graph-0.1.0/src/federated_graph/sentinel/controls.py +46 -0
- federated_graph-0.1.0/src/federated_graph/training/__init__.py +5 -0
- federated_graph-0.1.0/src/federated_graph/training/augmentations.py +51 -0
- federated_graph-0.1.0/src/federated_graph/training/losses.py +118 -0
- federated_graph-0.1.0/src/federated_graph/training/metrics.py +51 -0
- federated_graph-0.1.0/src/federated_graph/training/mixed_loss.py +65 -0
- federated_graph-0.1.0/src/federated_graph/training/sampler.py +44 -0
- federated_graph-0.1.0/src/federated_graph/training/trainer.py +124 -0
- federated_graph-0.1.0/src/federated_graph/types.py +50 -0
- federated_graph-0.1.0/tests/__init__.py +0 -0
- federated_graph-0.1.0/tests/conftest.py +69 -0
- federated_graph-0.1.0/tests/integration/__init__.py +0 -0
- federated_graph-0.1.0/tests/integration/test_federation_loop.py +71 -0
- federated_graph-0.1.0/tests/integration/test_local_pipeline.py +63 -0
- federated_graph-0.1.0/tests/test_aggregation/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_aggregation/test_aggregation.py +46 -0
- federated_graph-0.1.0/tests/test_compliance/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_compliance/test_compliance.py +54 -0
- federated_graph-0.1.0/tests/test_features/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_features/test_features.py +38 -0
- federated_graph-0.1.0/tests/test_federation/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_federation/test_federation.py +67 -0
- federated_graph-0.1.0/tests/test_graph/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_graph/test_builder.py +40 -0
- federated_graph-0.1.0/tests/test_graph/test_schema.py +25 -0
- federated_graph-0.1.0/tests/test_graph/test_temporal.py +28 -0
- federated_graph-0.1.0/tests/test_models/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_models/test_models.py +57 -0
- federated_graph-0.1.0/tests/test_scoring/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_scoring/test_scoring.py +50 -0
- federated_graph-0.1.0/tests/test_security/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_security/test_security.py +43 -0
- federated_graph-0.1.0/tests/test_sentinel/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_sentinel/test_sentinel.py +65 -0
- federated_graph-0.1.0/tests/test_training/__init__.py +0 -0
- federated_graph-0.1.0/tests/test_training/test_training.py +58 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Temitope Adeyeha
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: federated-graph
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Federated Graph Intelligence SDK for financial fraud detection
|
|
5
|
+
Project-URL: Repository, https://github.com/Adeyeha/federated_graph
|
|
6
|
+
Project-URL: Issues, https://github.com/Adeyeha/federated_graph/issues
|
|
7
|
+
Author-email: Temitope Adeyeha <temitope.adeyeha@yahoo.com>
|
|
8
|
+
License-Expression: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Keywords: federated-learning,fraud-detection,gnn,graph-neural-network,privacy
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Intended Audience :: Science/Research
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
19
|
+
Classifier: Topic :: Security
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
|
+
Requires-Dist: cryptography>=41.0
|
|
22
|
+
Requires-Dist: numpy>=1.24
|
|
23
|
+
Requires-Dist: pydantic>=2.0
|
|
24
|
+
Requires-Dist: structlog>=23.0
|
|
25
|
+
Requires-Dist: torch-geometric>=2.4
|
|
26
|
+
Requires-Dist: torch>=2.1
|
|
27
|
+
Provides-Extra: all
|
|
28
|
+
Requires-Dist: grpcio-tools>=1.60; extra == 'all'
|
|
29
|
+
Requires-Dist: grpcio>=1.60; extra == 'all'
|
|
30
|
+
Requires-Dist: mypy; extra == 'all'
|
|
31
|
+
Requires-Dist: protobuf>=4.25; extra == 'all'
|
|
32
|
+
Requires-Dist: pytest-asyncio; extra == 'all'
|
|
33
|
+
Requires-Dist: pytest-cov; extra == 'all'
|
|
34
|
+
Requires-Dist: pytest>=7.4; extra == 'all'
|
|
35
|
+
Requires-Dist: redis>=5.0; extra == 'all'
|
|
36
|
+
Requires-Dist: ruff; extra == 'all'
|
|
37
|
+
Requires-Dist: scikit-learn>=1.3; extra == 'all'
|
|
38
|
+
Provides-Extra: dev
|
|
39
|
+
Requires-Dist: mypy; extra == 'dev'
|
|
40
|
+
Requires-Dist: pytest-asyncio; extra == 'dev'
|
|
41
|
+
Requires-Dist: pytest-cov; extra == 'dev'
|
|
42
|
+
Requires-Dist: pytest>=7.4; extra == 'dev'
|
|
43
|
+
Requires-Dist: ruff; extra == 'dev'
|
|
44
|
+
Provides-Extra: grpc
|
|
45
|
+
Requires-Dist: grpcio-tools>=1.60; extra == 'grpc'
|
|
46
|
+
Requires-Dist: grpcio>=1.60; extra == 'grpc'
|
|
47
|
+
Requires-Dist: protobuf>=4.25; extra == 'grpc'
|
|
48
|
+
Provides-Extra: redis
|
|
49
|
+
Requires-Dist: redis>=5.0; extra == 'redis'
|
|
50
|
+
Provides-Extra: scoring
|
|
51
|
+
Requires-Dist: scikit-learn>=1.3; extra == 'scoring'
|
|
52
|
+
Description-Content-Type: text/markdown
|
|
53
|
+
|
|
54
|
+
# Federated Graph
|
|
55
|
+
|
|
56
|
+
A privacy-preserving Graph Neural Network framework for financial fraud detection using federated learning. Federated Graph enables organizations to collaboratively train fraud detection models on their transaction graphs without sharing raw data.
|
|
57
|
+
|
|
58
|
+
## Features
|
|
59
|
+
|
|
60
|
+
- **Federated Learning** — Train models collaboratively across multiple clients with privacy-preserving aggregation
|
|
61
|
+
- **Temporal Graph Neural Networks** — Capture temporal dynamics in transaction networks with decay-aware convolutions
|
|
62
|
+
- **Dual-Head Risk Scoring** — Entity-level and transaction-level fraud risk assessment
|
|
63
|
+
- **Privacy & Security** — Differential privacy, secure aggregation, and cryptographic verification
|
|
64
|
+
- **Feature Engineering** — Node and edge feature extraction with temporal windowing and missing data handling
|
|
65
|
+
- **Robust Aggregation** — Multiple strategies including FedAvg, robust aggregation, and trust-weighted
|
|
66
|
+
- **Model Validation** — Canary generators, conformance checking, and model contract validation
|
|
67
|
+
- **Score Calibration** — Isotonic regression-based calibration with cold-start fallback
|
|
68
|
+
- **Multiple Backends** — In-process, HTTP, and gRPC communication options
|
|
69
|
+
- **Compliance Ready** — Built-in audit trails, SAR support, and serialization for regulatory requirements
|
|
70
|
+
|
|
71
|
+
## Installation
|
|
72
|
+
|
|
73
|
+
```bash
|
|
74
|
+
pip install federated-graph
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
Optional extras:
|
|
78
|
+
|
|
79
|
+
```bash
|
|
80
|
+
pip install federated-graph[grpc] # gRPC communication backend
|
|
81
|
+
pip install federated-graph[redis] # Redis-based embedding store
|
|
82
|
+
pip install federated-graph[scoring] # Score calibration with scikit-learn
|
|
83
|
+
pip install federated-graph[dev] # Development tools
|
|
84
|
+
pip install federated-graph[all] # Everything
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## Quick Start
|
|
88
|
+
|
|
89
|
+
```python
|
|
90
|
+
import torch
|
|
91
|
+
from torch_geometric.data import Data
|
|
92
|
+
from federated_graph import FGIConfig
|
|
93
|
+
from federated_graph.models import FGIModel
|
|
94
|
+
|
|
95
|
+
# Create configuration
|
|
96
|
+
config = FGIConfig()
|
|
97
|
+
|
|
98
|
+
# Initialize model
|
|
99
|
+
model = FGIModel(in_channels=10, config=config.model)
|
|
100
|
+
|
|
101
|
+
# Prepare graph data
|
|
102
|
+
x = torch.randn(100, 10)
|
|
103
|
+
edge_index = torch.randint(0, 100, (2, 500))
|
|
104
|
+
|
|
105
|
+
# Forward pass returns embeddings and risk scores
|
|
106
|
+
embeddings, entity_scores, txn_scores = model(x, edge_index)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
### Federated Training
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
from federated_graph.federation import FederationServer, FederationClient
|
|
113
|
+
|
|
114
|
+
server = FederationServer(
|
|
115
|
+
num_rounds=config.federation.num_rounds,
|
|
116
|
+
min_clients=config.federation.min_clients,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
client = FederationClient(client_id="client_1", model=model, config=config)
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
### Configuration
|
|
123
|
+
|
|
124
|
+
Pydantic-based configuration with sensible defaults:
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
from federated_graph import FGIConfig
|
|
128
|
+
|
|
129
|
+
config = FGIConfig.from_yaml("config.yaml")
|
|
130
|
+
config.to_yaml("config_output.yaml")
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
## License
|
|
134
|
+
|
|
135
|
+
MIT
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# Federated Graph
|
|
2
|
+
|
|
3
|
+
A privacy-preserving Graph Neural Network framework for financial fraud detection using federated learning. Federated Graph enables organizations to collaboratively train fraud detection models on their transaction graphs without sharing raw data.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Federated Learning** — Train models collaboratively across multiple clients with privacy-preserving aggregation
|
|
8
|
+
- **Temporal Graph Neural Networks** — Capture temporal dynamics in transaction networks with decay-aware convolutions
|
|
9
|
+
- **Dual-Head Risk Scoring** — Entity-level and transaction-level fraud risk assessment
|
|
10
|
+
- **Privacy & Security** — Differential privacy, secure aggregation, and cryptographic verification
|
|
11
|
+
- **Feature Engineering** — Node and edge feature extraction with temporal windowing and missing data handling
|
|
12
|
+
- **Robust Aggregation** — Multiple strategies including FedAvg, robust aggregation, and trust-weighted
|
|
13
|
+
- **Model Validation** — Canary generators, conformance checking, and model contract validation
|
|
14
|
+
- **Score Calibration** — Isotonic regression-based calibration with cold-start fallback
|
|
15
|
+
- **Multiple Backends** — In-process, HTTP, and gRPC communication options
|
|
16
|
+
- **Compliance Ready** — Built-in audit trails, SAR support, and serialization for regulatory requirements
|
|
17
|
+
|
|
18
|
+
## Installation
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
pip install federated-graph
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
Optional extras:
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
pip install federated-graph[grpc] # gRPC communication backend
|
|
28
|
+
pip install federated-graph[redis] # Redis-based embedding store
|
|
29
|
+
pip install federated-graph[scoring] # Score calibration with scikit-learn
|
|
30
|
+
pip install federated-graph[dev] # Development tools
|
|
31
|
+
pip install federated-graph[all] # Everything
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Quick Start
|
|
35
|
+
|
|
36
|
+
```python
|
|
37
|
+
import torch
|
|
38
|
+
from torch_geometric.data import Data
|
|
39
|
+
from federated_graph import FGIConfig
|
|
40
|
+
from federated_graph.models import FGIModel
|
|
41
|
+
|
|
42
|
+
# Create configuration
|
|
43
|
+
config = FGIConfig()
|
|
44
|
+
|
|
45
|
+
# Initialize model
|
|
46
|
+
model = FGIModel(in_channels=10, config=config.model)
|
|
47
|
+
|
|
48
|
+
# Prepare graph data
|
|
49
|
+
x = torch.randn(100, 10)
|
|
50
|
+
edge_index = torch.randint(0, 100, (2, 500))
|
|
51
|
+
|
|
52
|
+
# Forward pass returns embeddings and risk scores
|
|
53
|
+
embeddings, entity_scores, txn_scores = model(x, edge_index)
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
### Federated Training
|
|
57
|
+
|
|
58
|
+
```python
|
|
59
|
+
from federated_graph.federation import FederationServer, FederationClient
|
|
60
|
+
|
|
61
|
+
server = FederationServer(
|
|
62
|
+
num_rounds=config.federation.num_rounds,
|
|
63
|
+
min_clients=config.federation.min_clients,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
client = FederationClient(client_id="client_1", model=model, config=config)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Configuration
|
|
70
|
+
|
|
71
|
+
Pydantic-based configuration with sensible defaults:
|
|
72
|
+
|
|
73
|
+
```python
|
|
74
|
+
from federated_graph import FGIConfig
|
|
75
|
+
|
|
76
|
+
config = FGIConfig.from_yaml("config.yaml")
|
|
77
|
+
config.to_yaml("config_output.yaml")
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## License
|
|
81
|
+
|
|
82
|
+
MIT
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "federated-graph"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Federated Graph Intelligence SDK for financial fraud detection"
|
|
9
|
+
requires-python = ">=3.10"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
readme = "README.md"
|
|
12
|
+
authors = [{ name = "Temitope Adeyeha", email = "temitope.adeyeha@yahoo.com" }]
|
|
13
|
+
keywords = ["federated-learning", "graph-neural-network", "fraud-detection", "gnn", "privacy"]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"Intended Audience :: Science/Research",
|
|
18
|
+
"License :: OSI Approved :: MIT License",
|
|
19
|
+
"Programming Language :: Python :: 3.10",
|
|
20
|
+
"Programming Language :: Python :: 3.11",
|
|
21
|
+
"Programming Language :: Python :: 3.12",
|
|
22
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
23
|
+
"Topic :: Security",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
dependencies = [
|
|
27
|
+
"torch>=2.1",
|
|
28
|
+
"torch-geometric>=2.4",
|
|
29
|
+
"pydantic>=2.0",
|
|
30
|
+
"numpy>=1.24",
|
|
31
|
+
"cryptography>=41.0",
|
|
32
|
+
"structlog>=23.0",
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
[project.urls]
|
|
36
|
+
Repository = "https://github.com/Adeyeha/federated_graph"
|
|
37
|
+
Issues = "https://github.com/Adeyeha/federated_graph/issues"
|
|
38
|
+
|
|
39
|
+
[project.optional-dependencies]
|
|
40
|
+
grpc = ["grpcio>=1.60", "grpcio-tools>=1.60", "protobuf>=4.25"]
|
|
41
|
+
redis = ["redis>=5.0"]
|
|
42
|
+
scoring = ["scikit-learn>=1.3"]
|
|
43
|
+
dev = ["pytest>=7.4", "pytest-cov", "pytest-asyncio", "ruff", "mypy"]
|
|
44
|
+
all = ["federated-graph[grpc,redis,scoring,dev]"]
|
|
45
|
+
|
|
46
|
+
[tool.hatch.build.targets.wheel]
|
|
47
|
+
packages = ["src/federated_graph"]
|
|
48
|
+
|
|
49
|
+
[tool.pytest.ini_options]
|
|
50
|
+
testpaths = ["tests"]
|
|
51
|
+
asyncio_mode = "auto"
|
|
52
|
+
|
|
53
|
+
[tool.ruff]
|
|
54
|
+
target-version = "py310"
|
|
55
|
+
line-length = 100
|
|
56
|
+
|
|
57
|
+
[tool.mypy]
|
|
58
|
+
python_version = "3.10"
|
|
59
|
+
strict = true
|
|
60
|
+
warn_return_any = true
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
from federated_graph.aggregation.base import Aggregator
|
|
2
|
+
from federated_graph.aggregation.fedavg import FedAvg
|
|
3
|
+
from federated_graph.aggregation.robust import CoordinateMedian, TrimmedMean, Krum, GeometricMedian
|
|
4
|
+
from federated_graph.aggregation.trust import TrustWeighting
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
import torch
|
|
3
|
+
from federated_graph.federation.update import ModelUpdate
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Aggregator(ABC):
|
|
7
|
+
"""Abstract base class for model update aggregators."""
|
|
8
|
+
|
|
9
|
+
@abstractmethod
|
|
10
|
+
def aggregate(self, updates: list[ModelUpdate],
|
|
11
|
+
global_state: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
|
12
|
+
"""Aggregate client updates into new global state."""
|
|
13
|
+
...
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from federated_graph.aggregation.base import Aggregator
|
|
3
|
+
from federated_graph.federation.update import ModelUpdate
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class FedAvg(Aggregator):
|
|
7
|
+
"""Federated Averaging: weighted average by number of samples."""
|
|
8
|
+
|
|
9
|
+
def aggregate(self, updates: list[ModelUpdate],
|
|
10
|
+
global_state: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
|
11
|
+
total_samples = sum(u.num_samples for u in updates)
|
|
12
|
+
if total_samples == 0:
|
|
13
|
+
return global_state
|
|
14
|
+
|
|
15
|
+
new_state = {}
|
|
16
|
+
for key in global_state:
|
|
17
|
+
weighted_delta = torch.zeros_like(global_state[key].float())
|
|
18
|
+
for update in updates:
|
|
19
|
+
if key in update.state_delta:
|
|
20
|
+
weight = update.num_samples / total_samples
|
|
21
|
+
weighted_delta += update.state_delta[key].float() * weight
|
|
22
|
+
new_state[key] = global_state[key].float() + weighted_delta
|
|
23
|
+
return new_state
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from federated_graph.aggregation.base import Aggregator
|
|
3
|
+
from federated_graph.federation.update import ModelUpdate
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CoordinateMedian(Aggregator):
|
|
7
|
+
"""Coordinate-wise median aggregation (Byzantine-robust)."""
|
|
8
|
+
|
|
9
|
+
def aggregate(self, updates: list[ModelUpdate],
|
|
10
|
+
global_state: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
|
11
|
+
new_state = {}
|
|
12
|
+
for key in global_state:
|
|
13
|
+
deltas = []
|
|
14
|
+
for update in updates:
|
|
15
|
+
if key in update.state_delta:
|
|
16
|
+
deltas.append(update.state_delta[key].float())
|
|
17
|
+
if deltas:
|
|
18
|
+
stacked = torch.stack(deltas, dim=0)
|
|
19
|
+
median_delta = stacked.median(dim=0).values
|
|
20
|
+
new_state[key] = global_state[key].float() + median_delta
|
|
21
|
+
else:
|
|
22
|
+
new_state[key] = global_state[key].clone()
|
|
23
|
+
return new_state
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class TrimmedMean(Aggregator):
|
|
27
|
+
"""Trimmed mean aggregation: remove top/bottom fraction before averaging."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, trim_fraction: float = 0.1):
|
|
30
|
+
self.trim_fraction = trim_fraction
|
|
31
|
+
|
|
32
|
+
def aggregate(self, updates: list[ModelUpdate],
|
|
33
|
+
global_state: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
|
34
|
+
n = len(updates)
|
|
35
|
+
k = max(1, int(n * self.trim_fraction))
|
|
36
|
+
|
|
37
|
+
new_state = {}
|
|
38
|
+
for key in global_state:
|
|
39
|
+
deltas = []
|
|
40
|
+
for update in updates:
|
|
41
|
+
if key in update.state_delta:
|
|
42
|
+
deltas.append(update.state_delta[key].float())
|
|
43
|
+
if len(deltas) > 2 * k:
|
|
44
|
+
stacked = torch.stack(deltas, dim=0)
|
|
45
|
+
sorted_deltas, _ = stacked.sort(dim=0)
|
|
46
|
+
trimmed = sorted_deltas[k:n-k]
|
|
47
|
+
mean_delta = trimmed.mean(dim=0)
|
|
48
|
+
new_state[key] = global_state[key].float() + mean_delta
|
|
49
|
+
elif deltas:
|
|
50
|
+
stacked = torch.stack(deltas, dim=0)
|
|
51
|
+
new_state[key] = global_state[key].float() + stacked.mean(dim=0)
|
|
52
|
+
else:
|
|
53
|
+
new_state[key] = global_state[key].clone()
|
|
54
|
+
return new_state
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class Krum(Aggregator):
|
|
58
|
+
"""
|
|
59
|
+
Krum aggregation: select the update closest to all others.
|
|
60
|
+
Byzantine-robust when f < n/2 - 1.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(self, num_byzantine: int = 1):
|
|
64
|
+
self.num_byzantine = num_byzantine
|
|
65
|
+
|
|
66
|
+
def _flatten(self, state_delta: dict[str, torch.Tensor]) -> torch.Tensor:
|
|
67
|
+
"""Flatten a state dict into a single vector."""
|
|
68
|
+
return torch.cat([v.float().flatten() for v in sorted(state_delta.items(), key=lambda x: x[0]) for v in [v[1]]])
|
|
69
|
+
|
|
70
|
+
def aggregate(self, updates: list[ModelUpdate],
|
|
71
|
+
global_state: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
|
72
|
+
n = len(updates)
|
|
73
|
+
if n == 0:
|
|
74
|
+
return global_state
|
|
75
|
+
if n == 1:
|
|
76
|
+
new_state = {}
|
|
77
|
+
for key in global_state:
|
|
78
|
+
delta = updates[0].state_delta.get(key, torch.zeros_like(global_state[key]))
|
|
79
|
+
new_state[key] = global_state[key].float() + delta.float()
|
|
80
|
+
return new_state
|
|
81
|
+
|
|
82
|
+
# Flatten all updates
|
|
83
|
+
flat_updates = []
|
|
84
|
+
for u in updates:
|
|
85
|
+
parts = []
|
|
86
|
+
for key in sorted(global_state.keys()):
|
|
87
|
+
if key in u.state_delta:
|
|
88
|
+
parts.append(u.state_delta[key].float().flatten())
|
|
89
|
+
else:
|
|
90
|
+
parts.append(torch.zeros(global_state[key].numel()))
|
|
91
|
+
flat_updates.append(torch.cat(parts))
|
|
92
|
+
|
|
93
|
+
# Compute pairwise distances
|
|
94
|
+
n_select = n - self.num_byzantine - 2
|
|
95
|
+
n_select = max(1, n_select)
|
|
96
|
+
|
|
97
|
+
scores = []
|
|
98
|
+
for i in range(n):
|
|
99
|
+
dists = []
|
|
100
|
+
for j in range(n):
|
|
101
|
+
if i != j:
|
|
102
|
+
dists.append((flat_updates[i] - flat_updates[j]).pow(2).sum().item())
|
|
103
|
+
dists.sort()
|
|
104
|
+
scores.append(sum(dists[:n_select]))
|
|
105
|
+
|
|
106
|
+
# Select update with minimum score
|
|
107
|
+
best_idx = scores.index(min(scores))
|
|
108
|
+
best_update = updates[best_idx]
|
|
109
|
+
|
|
110
|
+
new_state = {}
|
|
111
|
+
for key in global_state:
|
|
112
|
+
delta = best_update.state_delta.get(key, torch.zeros_like(global_state[key]))
|
|
113
|
+
new_state[key] = global_state[key].float() + delta.float()
|
|
114
|
+
return new_state
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class GeometricMedian(Aggregator):
|
|
118
|
+
"""Geometric median aggregation via Weiszfeld's algorithm."""
|
|
119
|
+
|
|
120
|
+
def __init__(self, max_iter: int = 100, tol: float = 1e-6):
|
|
121
|
+
self.max_iter = max_iter
|
|
122
|
+
self.tol = tol
|
|
123
|
+
|
|
124
|
+
def aggregate(self, updates: list[ModelUpdate],
|
|
125
|
+
global_state: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
|
126
|
+
n = len(updates)
|
|
127
|
+
if n == 0:
|
|
128
|
+
return global_state
|
|
129
|
+
|
|
130
|
+
# Flatten updates
|
|
131
|
+
flat_updates = []
|
|
132
|
+
for u in updates:
|
|
133
|
+
parts = []
|
|
134
|
+
for key in sorted(global_state.keys()):
|
|
135
|
+
if key in u.state_delta:
|
|
136
|
+
parts.append(u.state_delta[key].float().flatten())
|
|
137
|
+
else:
|
|
138
|
+
parts.append(torch.zeros(global_state[key].numel()))
|
|
139
|
+
flat_updates.append(torch.cat(parts))
|
|
140
|
+
|
|
141
|
+
stacked = torch.stack(flat_updates, dim=0)
|
|
142
|
+
|
|
143
|
+
# Weiszfeld's algorithm
|
|
144
|
+
median = stacked.mean(dim=0)
|
|
145
|
+
for _ in range(self.max_iter):
|
|
146
|
+
dists = (stacked - median.unsqueeze(0)).pow(2).sum(dim=1).sqrt().clamp(min=1e-8)
|
|
147
|
+
weights = 1.0 / dists
|
|
148
|
+
weights = weights / weights.sum()
|
|
149
|
+
new_median = (stacked * weights.unsqueeze(1)).sum(dim=0)
|
|
150
|
+
if (new_median - median).pow(2).sum().sqrt() < self.tol:
|
|
151
|
+
break
|
|
152
|
+
median = new_median
|
|
153
|
+
|
|
154
|
+
# Unflatten back to state dict
|
|
155
|
+
new_state = {}
|
|
156
|
+
offset = 0
|
|
157
|
+
for key in sorted(global_state.keys()):
|
|
158
|
+
numel = global_state[key].numel()
|
|
159
|
+
delta = median[offset:offset+numel].reshape(global_state[key].shape)
|
|
160
|
+
new_state[key] = global_state[key].float() + delta
|
|
161
|
+
offset += numel
|
|
162
|
+
return new_state
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from federated_graph.aggregation.base import Aggregator
|
|
3
|
+
from federated_graph.federation.update import ModelUpdate
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class TrustWeighting(Aggregator):
|
|
7
|
+
"""
|
|
8
|
+
Trust-weighted aggregation: w_i = p_i * tau_i
|
|
9
|
+
where p_i = n_i / sum(n) and tau_i is a trust score in [0, 1].
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(self):
|
|
13
|
+
self._trust_scores: dict[str, float] = {}
|
|
14
|
+
|
|
15
|
+
def set_trust(self, client_id: str, trust: float) -> None:
|
|
16
|
+
"""Set trust score for a client (0 to 1)."""
|
|
17
|
+
self._trust_scores[client_id] = max(0.0, min(1.0, trust))
|
|
18
|
+
|
|
19
|
+
def get_trust(self, client_id: str) -> float:
|
|
20
|
+
return self._trust_scores.get(client_id, 1.0)
|
|
21
|
+
|
|
22
|
+
def aggregate(self, updates: list[ModelUpdate],
|
|
23
|
+
global_state: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
|
24
|
+
# Compute weights: w_i = p_i * tau_i
|
|
25
|
+
total_samples = sum(u.num_samples for u in updates)
|
|
26
|
+
if total_samples == 0:
|
|
27
|
+
return global_state
|
|
28
|
+
|
|
29
|
+
weights = []
|
|
30
|
+
for u in updates:
|
|
31
|
+
p_i = u.num_samples / total_samples
|
|
32
|
+
tau_i = self.get_trust(u.client_id)
|
|
33
|
+
weights.append(p_i * tau_i)
|
|
34
|
+
|
|
35
|
+
# Normalize weights
|
|
36
|
+
w_sum = sum(weights)
|
|
37
|
+
if w_sum > 0:
|
|
38
|
+
weights = [w / w_sum for w in weights]
|
|
39
|
+
else:
|
|
40
|
+
weights = [1.0 / len(updates)] * len(updates)
|
|
41
|
+
|
|
42
|
+
new_state = {}
|
|
43
|
+
for key in global_state:
|
|
44
|
+
weighted_delta = torch.zeros_like(global_state[key].float())
|
|
45
|
+
for update, weight in zip(updates, weights):
|
|
46
|
+
if key in update.state_delta:
|
|
47
|
+
weighted_delta += update.state_delta[key].float() * weight
|
|
48
|
+
new_state[key] = global_state[key].float() + weighted_delta
|
|
49
|
+
return new_state
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
from federated_graph.compliance.audit import AuditLogger
|
|
2
|
+
from federated_graph.compliance.serialization import ComplianceRecord, deterministic_json, hash_json
|
|
3
|
+
from federated_graph.compliance.sar import SARNarrativeGenerator, SARPattern
|
|
4
|
+
from federated_graph.compliance.metrics import ComplianceMetrics, compute_compliance_metrics
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
"""Structured JSON audit logging for all federation events."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import structlog
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
logger = structlog.get_logger()
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class AuditLogger:
|
|
13
|
+
"""Structured JSON audit logger for federation events."""
|
|
14
|
+
|
|
15
|
+
def __init__(self, log_dir: str | Path | None = None):
|
|
16
|
+
self.log_dir = Path(log_dir) if log_dir else None
|
|
17
|
+
self._events: list[dict[str, Any]] = []
|
|
18
|
+
if self.log_dir:
|
|
19
|
+
self.log_dir.mkdir(parents=True, exist_ok=True)
|
|
20
|
+
|
|
21
|
+
def log_event(self, event_type: str, **kwargs: Any) -> dict[str, Any]:
|
|
22
|
+
"""Log a structured audit event."""
|
|
23
|
+
event = {
|
|
24
|
+
"timestamp": time.time(),
|
|
25
|
+
"event_type": event_type,
|
|
26
|
+
**kwargs,
|
|
27
|
+
}
|
|
28
|
+
self._events.append(event)
|
|
29
|
+
logger.info(event_type, **kwargs)
|
|
30
|
+
|
|
31
|
+
if self.log_dir:
|
|
32
|
+
log_file = self.log_dir / f"audit_{int(time.time())}.jsonl"
|
|
33
|
+
with open(log_file, "a") as f:
|
|
34
|
+
f.write(json.dumps(event) + "\n")
|
|
35
|
+
|
|
36
|
+
return event
|
|
37
|
+
|
|
38
|
+
def log_round_start(self, round_num: int, num_clients: int) -> dict:
|
|
39
|
+
return self.log_event("round_start", round_num=round_num, num_clients=num_clients)
|
|
40
|
+
|
|
41
|
+
def log_round_end(self, round_num: int, verdict: str, state_hash: str = "") -> dict:
|
|
42
|
+
return self.log_event("round_end", round_num=round_num, verdict=verdict, state_hash=state_hash)
|
|
43
|
+
|
|
44
|
+
def log_client_update(self, round_num: int, client_id: str,
|
|
45
|
+
num_samples: int, update_hash: str = "") -> dict:
|
|
46
|
+
return self.log_event("client_update", round_num=round_num,
|
|
47
|
+
client_id=client_id, num_samples=num_samples, update_hash=update_hash)
|
|
48
|
+
|
|
49
|
+
def log_rejection(self, round_num: int, client_id: str, reason: str) -> dict:
|
|
50
|
+
return self.log_event("rejection", round_num=round_num, client_id=client_id, reason=reason)
|
|
51
|
+
|
|
52
|
+
def log_model_publish(self, round_num: int, state_hash: str, model_version: str = "") -> dict:
|
|
53
|
+
return self.log_event("model_publish", round_num=round_num,
|
|
54
|
+
state_hash=state_hash, model_version=model_version)
|
|
55
|
+
|
|
56
|
+
def get_events(self, event_type: str | None = None) -> list[dict]:
|
|
57
|
+
if event_type:
|
|
58
|
+
return [e for e in self._events if e["event_type"] == event_type]
|
|
59
|
+
return list(self._events)
|
|
60
|
+
|
|
61
|
+
def export_json(self, path: str | Path) -> None:
|
|
62
|
+
"""Export all events to a JSON file."""
|
|
63
|
+
with open(path, "w") as f:
|
|
64
|
+
json.dump(self._events, f, indent=2)
|