flwr-nightly 1.22.0.dev20250917__py3-none-any.whl → 1.22.0.dev20250919__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flwr/cli/new/new.py +2 -0
- flwr/cli/new/templates/app/code/client.xgboost.py.tpl +110 -0
- flwr/cli/new/templates/app/code/server.xgboost.py.tpl +56 -0
- flwr/cli/new/templates/app/code/task.xgboost.py.tpl +67 -0
- flwr/cli/new/templates/app/pyproject.xgboost.toml.tpl +61 -0
- flwr/clientapp/mod/__init__.py +2 -1
- flwr/clientapp/mod/centraldp_mods.py +155 -39
- flwr/clientapp/typing.py +22 -0
- flwr/common/constant.py +1 -0
- flwr/common/exit/exit_code.py +4 -0
- flwr/common/record/typeddict.py +12 -0
- flwr/serverapp/strategy/__init__.py +12 -0
- flwr/serverapp/strategy/dp_adaptive_clipping.py +335 -0
- flwr/serverapp/strategy/dp_fixed_clipping.py +71 -49
- flwr/serverapp/strategy/fedadagrad.py +0 -3
- flwr/serverapp/strategy/fedadam.py +0 -3
- flwr/serverapp/strategy/fedavgm.py +3 -3
- flwr/serverapp/strategy/fedprox.py +1 -1
- flwr/serverapp/strategy/fedtrimmedavg.py +1 -1
- flwr/serverapp/strategy/fedxgb_cyclic.py +220 -0
- flwr/serverapp/strategy/fedyogi.py +0 -3
- flwr/serverapp/strategy/krum.py +230 -0
- flwr/serverapp/strategy/qfedavg.py +252 -0
- flwr/supercore/cli/flower_superexec.py +26 -1
- flwr/supercore/constant.py +19 -0
- flwr/supercore/superexec/plugin/exec_plugin.py +11 -1
- flwr/supercore/superexec/run_superexec.py +16 -2
- {flwr_nightly-1.22.0.dev20250917.dist-info → flwr_nightly-1.22.0.dev20250919.dist-info}/METADATA +1 -1
- {flwr_nightly-1.22.0.dev20250917.dist-info → flwr_nightly-1.22.0.dev20250919.dist-info}/RECORD +31 -23
- flwr/serverapp/dp_fixed_clipping.py +0 -352
- flwr/serverapp/strategy/strategy_utils_tests.py +0 -323
- {flwr_nightly-1.22.0.dev20250917.dist-info → flwr_nightly-1.22.0.dev20250919.dist-info}/WHEEL +0 -0
- {flwr_nightly-1.22.0.dev20250917.dist-info → flwr_nightly-1.22.0.dev20250919.dist-info}/entry_points.txt +0 -0
flwr/cli/new/new.py
CHANGED
@@ -41,6 +41,7 @@ class MlFramework(str, Enum):
|
|
41
41
|
JAX = "JAX"
|
42
42
|
MLX = "MLX"
|
43
43
|
NUMPY = "NumPy"
|
44
|
+
XGBOOST = "XGBoost"
|
44
45
|
FLOWERTUNE = "FlowerTune"
|
45
46
|
BASELINE = "Flower Baseline"
|
46
47
|
PYTORCH_LEGACY_API = "PyTorch (Legacy API, deprecated)"
|
@@ -247,6 +248,7 @@ def new(
|
|
247
248
|
MlFramework.TENSORFLOW.value,
|
248
249
|
MlFramework.SKLEARN.value,
|
249
250
|
MlFramework.NUMPY.value,
|
251
|
+
MlFramework.XGBOOST.value,
|
250
252
|
"pytorch_legacy_api",
|
251
253
|
]
|
252
254
|
if framework_str in frameworks_with_tasks:
|
@@ -0,0 +1,110 @@
|
|
1
|
+
"""$project_name: A Flower / $framework_str app."""
|
2
|
+
|
3
|
+
import warnings
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
import xgboost as xgb
|
7
|
+
from flwr.app import ArrayRecord, Context, Message, MetricRecord, RecordDict
|
8
|
+
from flwr.clientapp import ClientApp
|
9
|
+
from flwr.common.config import unflatten_dict
|
10
|
+
|
11
|
+
from $import_name.task import load_data, replace_keys
|
12
|
+
|
13
|
+
warnings.filterwarnings("ignore", category=UserWarning)
|
14
|
+
|
15
|
+
|
16
|
+
# Flower ClientApp
|
17
|
+
app = ClientApp()
|
18
|
+
|
19
|
+
|
20
|
+
def _local_boost(bst_input, num_local_round, train_dmatrix):
|
21
|
+
# Update trees based on local training data.
|
22
|
+
for i in range(num_local_round):
|
23
|
+
bst_input.update(train_dmatrix, bst_input.num_boosted_rounds())
|
24
|
+
|
25
|
+
# Bagging: extract the last N=num_local_round trees for sever aggregation
|
26
|
+
bst = bst_input[
|
27
|
+
bst_input.num_boosted_rounds()
|
28
|
+
- num_local_round : bst_input.num_boosted_rounds()
|
29
|
+
]
|
30
|
+
return bst
|
31
|
+
|
32
|
+
|
33
|
+
@app.train()
|
34
|
+
def train(msg: Message, context: Context) -> Message:
|
35
|
+
# Load model and data
|
36
|
+
partition_id = context.node_config["partition-id"]
|
37
|
+
num_partitions = context.node_config["num-partitions"]
|
38
|
+
train_dmatrix, _, num_train, _ = load_data(partition_id, num_partitions)
|
39
|
+
|
40
|
+
# Read from run config
|
41
|
+
num_local_round = context.run_config["local-epochs"]
|
42
|
+
# Flatted config dict and replace "-" with "_"
|
43
|
+
cfg = replace_keys(unflatten_dict(context.run_config))
|
44
|
+
params = cfg["params"]
|
45
|
+
|
46
|
+
global_round = msg.content["config"]["server-round"]
|
47
|
+
if global_round == 1:
|
48
|
+
# First round local training
|
49
|
+
bst = xgb.train(
|
50
|
+
params,
|
51
|
+
train_dmatrix,
|
52
|
+
num_boost_round=num_local_round,
|
53
|
+
)
|
54
|
+
else:
|
55
|
+
bst = xgb.Booster(params=params)
|
56
|
+
global_model = bytearray(msg.content["arrays"]["0"].numpy().tobytes())
|
57
|
+
|
58
|
+
# Load global model into booster
|
59
|
+
bst.load_model(global_model)
|
60
|
+
|
61
|
+
# Local training
|
62
|
+
bst = _local_boost(bst, num_local_round, train_dmatrix)
|
63
|
+
|
64
|
+
# Save model
|
65
|
+
local_model = bst.save_raw("json")
|
66
|
+
model_np = np.frombuffer(local_model, dtype=np.uint8)
|
67
|
+
|
68
|
+
# Construct reply message
|
69
|
+
# Note: we store the model as the first item in a list into ArrayRecord,
|
70
|
+
# which can be accessed using index ["0"].
|
71
|
+
model_record = ArrayRecord([model_np])
|
72
|
+
metrics = {
|
73
|
+
"num-examples": num_train,
|
74
|
+
}
|
75
|
+
metric_record = MetricRecord(metrics)
|
76
|
+
content = RecordDict({"arrays": model_record, "metrics": metric_record})
|
77
|
+
return Message(content=content, reply_to=msg)
|
78
|
+
|
79
|
+
|
80
|
+
@app.evaluate()
|
81
|
+
def evaluate(msg: Message, context: Context) -> Message:
|
82
|
+
# Load model and data
|
83
|
+
partition_id = context.node_config["partition-id"]
|
84
|
+
num_partitions = context.node_config["num-partitions"]
|
85
|
+
_, valid_dmatrix, _, num_val = load_data(partition_id, num_partitions)
|
86
|
+
|
87
|
+
# Load config
|
88
|
+
cfg = replace_keys(unflatten_dict(context.run_config))
|
89
|
+
params = cfg["params"]
|
90
|
+
|
91
|
+
# Load global model
|
92
|
+
bst = xgb.Booster(params=params)
|
93
|
+
global_model = bytearray(msg.content["arrays"]["0"].numpy().tobytes())
|
94
|
+
bst.load_model(global_model)
|
95
|
+
|
96
|
+
# Run evaluation
|
97
|
+
eval_results = bst.eval_set(
|
98
|
+
evals=[(valid_dmatrix, "valid")],
|
99
|
+
iteration=bst.num_boosted_rounds() - 1,
|
100
|
+
)
|
101
|
+
auc = float(eval_results.split("\t")[1].split(":")[1])
|
102
|
+
|
103
|
+
# Construct and return reply Message
|
104
|
+
metrics = {
|
105
|
+
"auc": auc,
|
106
|
+
"num-examples": num_val,
|
107
|
+
}
|
108
|
+
metric_record = MetricRecord(metrics)
|
109
|
+
content = RecordDict({"metrics": metric_record})
|
110
|
+
return Message(content=content, reply_to=msg)
|
@@ -0,0 +1,56 @@
|
|
1
|
+
"""$project_name: A Flower / $framework_str app."""
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
import xgboost as xgb
|
5
|
+
from flwr.app import ArrayRecord, Context
|
6
|
+
from flwr.common.config import unflatten_dict
|
7
|
+
from flwr.serverapp import Grid, ServerApp
|
8
|
+
from flwr.serverapp.strategy import FedXgbBagging
|
9
|
+
|
10
|
+
from $import_name.task import replace_keys
|
11
|
+
|
12
|
+
# Create ServerApp
|
13
|
+
app = ServerApp()
|
14
|
+
|
15
|
+
|
16
|
+
@app.main()
|
17
|
+
def main(grid: Grid, context: Context) -> None:
|
18
|
+
# Read run config
|
19
|
+
num_rounds = context.run_config["num-server-rounds"]
|
20
|
+
fraction_train = context.run_config["fraction-train"]
|
21
|
+
fraction_evaluate = context.run_config["fraction-evaluate"]
|
22
|
+
# Flatted config dict and replace "-" with "_"
|
23
|
+
cfg = replace_keys(unflatten_dict(context.run_config))
|
24
|
+
params = cfg["params"]
|
25
|
+
|
26
|
+
# Init global model
|
27
|
+
# Init with an empty object; the XGBooster will be created
|
28
|
+
# and trained on the client side.
|
29
|
+
global_model = b""
|
30
|
+
# Note: we store the model as the first item in a list into ArrayRecord,
|
31
|
+
# which can be accessed using index ["0"].
|
32
|
+
arrays = ArrayRecord([np.frombuffer(global_model, dtype=np.uint8)])
|
33
|
+
|
34
|
+
# Initialize FedXgbBagging strategy
|
35
|
+
strategy = FedXgbBagging(
|
36
|
+
fraction_train=fraction_train,
|
37
|
+
fraction_evaluate=fraction_evaluate,
|
38
|
+
)
|
39
|
+
|
40
|
+
# Start strategy, run FedXgbBagging for `num_rounds`
|
41
|
+
result = strategy.start(
|
42
|
+
grid=grid,
|
43
|
+
initial_arrays=arrays,
|
44
|
+
num_rounds=num_rounds,
|
45
|
+
)
|
46
|
+
|
47
|
+
# Save final model to disk
|
48
|
+
bst = xgb.Booster(params=params)
|
49
|
+
global_model = bytearray(result.arrays["0"].numpy().tobytes())
|
50
|
+
|
51
|
+
# Load global model into booster
|
52
|
+
bst.load_model(global_model)
|
53
|
+
|
54
|
+
# Save model
|
55
|
+
print("\nSaving final model to disk...")
|
56
|
+
bst.save_model("final_model.json")
|
@@ -0,0 +1,67 @@
|
|
1
|
+
"""$project_name: A Flower / $framework_str app."""
|
2
|
+
|
3
|
+
import xgboost as xgb
|
4
|
+
from flwr_datasets import FederatedDataset
|
5
|
+
from flwr_datasets.partitioner import IidPartitioner
|
6
|
+
|
7
|
+
|
8
|
+
def train_test_split(partition, test_fraction, seed):
|
9
|
+
"""Split the data into train and validation set given split rate."""
|
10
|
+
train_test = partition.train_test_split(test_size=test_fraction, seed=seed)
|
11
|
+
partition_train = train_test["train"]
|
12
|
+
partition_test = train_test["test"]
|
13
|
+
|
14
|
+
num_train = len(partition_train)
|
15
|
+
num_test = len(partition_test)
|
16
|
+
|
17
|
+
return partition_train, partition_test, num_train, num_test
|
18
|
+
|
19
|
+
|
20
|
+
def transform_dataset_to_dmatrix(data):
|
21
|
+
"""Transform dataset to DMatrix format for xgboost."""
|
22
|
+
x = data["inputs"]
|
23
|
+
y = data["label"]
|
24
|
+
new_data = xgb.DMatrix(x, label=y)
|
25
|
+
return new_data
|
26
|
+
|
27
|
+
|
28
|
+
fds = None # Cache FederatedDataset
|
29
|
+
|
30
|
+
|
31
|
+
def load_data(partition_id, num_clients):
|
32
|
+
"""Load partition HIGGS data."""
|
33
|
+
# Only initialize `FederatedDataset` once
|
34
|
+
global fds
|
35
|
+
if fds is None:
|
36
|
+
partitioner = IidPartitioner(num_partitions=num_clients)
|
37
|
+
fds = FederatedDataset(
|
38
|
+
dataset="jxie/higgs",
|
39
|
+
partitioners={"train": partitioner},
|
40
|
+
)
|
41
|
+
|
42
|
+
# Load the partition for this `partition_id`
|
43
|
+
partition = fds.load_partition(partition_id, split="train")
|
44
|
+
partition.set_format("numpy")
|
45
|
+
|
46
|
+
# Train/test splitting
|
47
|
+
train_data, valid_data, num_train, num_val = train_test_split(
|
48
|
+
partition, test_fraction=0.2, seed=42
|
49
|
+
)
|
50
|
+
|
51
|
+
# Reformat data to DMatrix for xgboost
|
52
|
+
train_dmatrix = transform_dataset_to_dmatrix(train_data)
|
53
|
+
valid_dmatrix = transform_dataset_to_dmatrix(valid_data)
|
54
|
+
|
55
|
+
return train_dmatrix, valid_dmatrix, num_train, num_val
|
56
|
+
|
57
|
+
|
58
|
+
def replace_keys(input_dict, match="-", target="_"):
|
59
|
+
"""Recursively replace match string with target string in dictionary keys."""
|
60
|
+
new_dict = {}
|
61
|
+
for key, value in input_dict.items():
|
62
|
+
new_key = key.replace(match, target)
|
63
|
+
if isinstance(value, dict):
|
64
|
+
new_dict[new_key] = replace_keys(value, match, target)
|
65
|
+
else:
|
66
|
+
new_dict[new_key] = value
|
67
|
+
return new_dict
|
@@ -0,0 +1,61 @@
|
|
1
|
+
# =====================================================================
|
2
|
+
# For a full TOML configuration guide, check the Flower docs:
|
3
|
+
# https://flower.ai/docs/framework/how-to-configure-pyproject-toml.html
|
4
|
+
# =====================================================================
|
5
|
+
|
6
|
+
[build-system]
|
7
|
+
requires = ["hatchling"]
|
8
|
+
build-backend = "hatchling.build"
|
9
|
+
|
10
|
+
[project]
|
11
|
+
name = "$package_name"
|
12
|
+
version = "1.0.0"
|
13
|
+
description = ""
|
14
|
+
license = "Apache-2.0"
|
15
|
+
# Dependencies for your Flower App
|
16
|
+
dependencies = [
|
17
|
+
"flwr[simulation]>=1.22.0",
|
18
|
+
"flwr-datasets>=0.5.0",
|
19
|
+
"xgboost>=2.0.0",
|
20
|
+
]
|
21
|
+
|
22
|
+
[tool.hatch.build.targets.wheel]
|
23
|
+
packages = ["."]
|
24
|
+
|
25
|
+
[tool.flwr.app]
|
26
|
+
publisher = "$username"
|
27
|
+
|
28
|
+
[tool.flwr.app.components]
|
29
|
+
serverapp = "$import_name.server_app:app"
|
30
|
+
clientapp = "$import_name.client_app:app"
|
31
|
+
|
32
|
+
# Custom config values accessible via `context.run_config`
|
33
|
+
[tool.flwr.app.config]
|
34
|
+
num-server-rounds = 3
|
35
|
+
fraction-train = 0.1
|
36
|
+
fraction-evaluate = 0.1
|
37
|
+
local-epochs = 1
|
38
|
+
|
39
|
+
# XGBoost parameters
|
40
|
+
params.objective = "binary:logistic"
|
41
|
+
params.eta = 0.1 # Learning rate
|
42
|
+
params.max-depth = 8
|
43
|
+
params.eval-metric = "auc"
|
44
|
+
params.nthread = 16
|
45
|
+
params.num-parallel-tree = 1
|
46
|
+
params.subsample = 1
|
47
|
+
params.tree-method = "hist"
|
48
|
+
|
49
|
+
# Default federation to use when running the app
|
50
|
+
[tool.flwr.federations]
|
51
|
+
default = "local-simulation"
|
52
|
+
|
53
|
+
# Local simulation federation with 10 virtual SuperNodes
|
54
|
+
[tool.flwr.federations.local-simulation]
|
55
|
+
options.num-supernodes = 10
|
56
|
+
|
57
|
+
# Remote federation example for use with SuperLink
|
58
|
+
[tool.flwr.federations.remote-federation]
|
59
|
+
address = "<SUPERLINK-ADDRESS>:<PORT>"
|
60
|
+
insecure = true # Remove this line to enable TLS
|
61
|
+
# root-certificates = "<PATH/TO/ca.crt>" # For TLS setup
|
flwr/clientapp/mod/__init__.py
CHANGED
@@ -17,9 +17,10 @@
|
|
17
17
|
|
18
18
|
from flwr.client.mod.comms_mods import arrays_size_mod, message_size_mod
|
19
19
|
|
20
|
-
from .centraldp_mods import fixedclipping_mod
|
20
|
+
from .centraldp_mods import adaptiveclipping_mod, fixedclipping_mod
|
21
21
|
|
22
22
|
__all__ = [
|
23
|
+
"adaptiveclipping_mod",
|
23
24
|
"arrays_size_mod",
|
24
25
|
"fixedclipping_mod",
|
25
26
|
"message_size_mod",
|
@@ -16,13 +16,26 @@
|
|
16
16
|
|
17
17
|
|
18
18
|
from collections import OrderedDict
|
19
|
-
from logging import
|
19
|
+
from logging import ERROR, INFO
|
20
20
|
from typing import cast
|
21
21
|
|
22
|
-
from flwr.
|
23
|
-
from flwr.
|
24
|
-
from flwr.common
|
25
|
-
|
22
|
+
from flwr.app import Error
|
23
|
+
from flwr.clientapp.typing import ClientAppCallable
|
24
|
+
from flwr.common import (
|
25
|
+
Array,
|
26
|
+
ArrayRecord,
|
27
|
+
ConfigRecord,
|
28
|
+
Context,
|
29
|
+
Message,
|
30
|
+
MetricRecord,
|
31
|
+
log,
|
32
|
+
)
|
33
|
+
from flwr.common.constant import ErrorCode
|
34
|
+
from flwr.common.differential_privacy import (
|
35
|
+
compute_adaptive_clip_model_update,
|
36
|
+
compute_clip_model_update,
|
37
|
+
)
|
38
|
+
from flwr.common.differential_privacy_constants import KEY_CLIPPING_NORM, KEY_NORM_BIT
|
26
39
|
|
27
40
|
|
28
41
|
# pylint: disable=too-many-return-statements
|
@@ -46,33 +59,15 @@ def fixedclipping_mod(
|
|
46
59
|
|
47
60
|
Typically, fixedclipping_mod should be the last to operate on params.
|
48
61
|
"""
|
49
|
-
if msg.metadata.message_type != MessageType.TRAIN:
|
50
|
-
return call_next(msg, ctxt)
|
51
|
-
|
52
62
|
if len(msg.content.array_records) != 1:
|
53
|
-
|
54
|
-
WARN,
|
55
|
-
"fixedclipping_mod is designed to work with a single ArrayRecord. "
|
56
|
-
"Skipping.",
|
57
|
-
)
|
58
|
-
return call_next(msg, ctxt)
|
59
|
-
|
63
|
+
return _handle_multi_record_err("fixedclipping_mod", msg, ArrayRecord)
|
60
64
|
if len(msg.content.config_records) != 1:
|
61
|
-
|
62
|
-
WARN,
|
63
|
-
"fixedclipping_mod is designed to work with a single ConfigRecord. "
|
64
|
-
"Skipping.",
|
65
|
-
)
|
66
|
-
return call_next(msg, ctxt)
|
65
|
+
return _handle_multi_record_err("fixedclipping_mod", msg, ConfigRecord)
|
67
66
|
|
68
67
|
# Get keys in the single ConfigRecord
|
69
68
|
keys_in_config = set(next(iter(msg.content.config_records.values())).keys())
|
70
69
|
if KEY_CLIPPING_NORM not in keys_in_config:
|
71
|
-
|
72
|
-
f"The {KEY_CLIPPING_NORM} value is not supplied by the "
|
73
|
-
f"`DifferentialPrivacyClientSideFixedClipping` wrapper at"
|
74
|
-
f" the server side."
|
75
|
-
)
|
70
|
+
return _handle_no_key_err("fixedclipping_mod", msg)
|
76
71
|
# Record array record communicated to client and clipping norm
|
77
72
|
original_array_record = next(iter(msg.content.array_records.values()))
|
78
73
|
clipping_norm = cast(
|
@@ -86,26 +81,16 @@ def fixedclipping_mod(
|
|
86
81
|
if out_msg.has_error():
|
87
82
|
return out_msg
|
88
83
|
|
89
|
-
# Ensure
|
84
|
+
# Ensure reply has a single ArrayRecord
|
90
85
|
if len(out_msg.content.array_records) != 1:
|
91
|
-
|
92
|
-
WARN,
|
93
|
-
"fixedclipping_mod is designed to work with a single ArrayRecord. "
|
94
|
-
"Skipping.",
|
95
|
-
)
|
96
|
-
return out_msg
|
86
|
+
return _handle_multi_record_err("fixedclipping_mod", out_msg, ArrayRecord)
|
97
87
|
|
98
88
|
new_array_record_key, client_to_server_arrecord = next(
|
99
89
|
iter(out_msg.content.array_records.items())
|
100
90
|
)
|
101
91
|
# Ensure keys in returned ArrayRecord match those in the one sent from server
|
102
92
|
if set(original_array_record.keys()) != set(client_to_server_arrecord.keys()):
|
103
|
-
|
104
|
-
WARN,
|
105
|
-
"fixedclipping_mod: Keys in ArrayRecord must match those from the model "
|
106
|
-
"that the ClientApp received. Skipping.",
|
107
|
-
)
|
108
|
-
return out_msg
|
93
|
+
return _handle_array_key_mismatch_err("fixedclipping_mod", out_msg)
|
109
94
|
|
110
95
|
client_to_server_ndarrays = client_to_server_arrecord.to_numpy_ndarrays()
|
111
96
|
# Clip the client update
|
@@ -130,3 +115,134 @@ def fixedclipping_mod(
|
|
130
115
|
)
|
131
116
|
)
|
132
117
|
return out_msg
|
118
|
+
|
119
|
+
|
120
|
+
def adaptiveclipping_mod(
|
121
|
+
msg: Message, ctxt: Context, call_next: ClientAppCallable
|
122
|
+
) -> Message:
|
123
|
+
"""Client-side adaptive clipping modifier.
|
124
|
+
|
125
|
+
This mod needs to be used with the DifferentialPrivacyClientSideAdaptiveClipping
|
126
|
+
server-side strategy wrapper.
|
127
|
+
|
128
|
+
The wrapper sends the clipping_norm value to the client.
|
129
|
+
|
130
|
+
This mod clips the client model updates before sending them to the server.
|
131
|
+
|
132
|
+
It also sends KEY_NORM_BIT to the server for computing the new clipping value.
|
133
|
+
|
134
|
+
It operates on messages of type `MessageType.TRAIN`.
|
135
|
+
|
136
|
+
Notes
|
137
|
+
-----
|
138
|
+
Consider the order of mods when using multiple.
|
139
|
+
|
140
|
+
Typically, adaptiveclipping_mod should be the last to operate on params.
|
141
|
+
"""
|
142
|
+
if len(msg.content.array_records) != 1:
|
143
|
+
return _handle_multi_record_err("adaptiveclipping_mod", msg, ArrayRecord)
|
144
|
+
if len(msg.content.config_records) != 1:
|
145
|
+
return _handle_multi_record_err("adaptiveclipping_mod", msg, ConfigRecord)
|
146
|
+
|
147
|
+
# Get keys in the single ConfigRecord
|
148
|
+
keys_in_config = set(next(iter(msg.content.config_records.values())).keys())
|
149
|
+
if KEY_CLIPPING_NORM not in keys_in_config:
|
150
|
+
return _handle_no_key_err("adaptiveclipping_mod", msg)
|
151
|
+
|
152
|
+
# Record array record communicated to client and clipping norm
|
153
|
+
original_array_record = next(iter(msg.content.array_records.values()))
|
154
|
+
clipping_norm = cast(
|
155
|
+
float, next(iter(msg.content.config_records.values()))[KEY_CLIPPING_NORM]
|
156
|
+
)
|
157
|
+
|
158
|
+
# Call inner app
|
159
|
+
out_msg = call_next(msg, ctxt)
|
160
|
+
|
161
|
+
# Ensure reply has a single ArrayRecord
|
162
|
+
if len(out_msg.content.array_records) != 1:
|
163
|
+
return _handle_multi_record_err("adaptiveclipping_mod", out_msg, ArrayRecord)
|
164
|
+
|
165
|
+
# Ensure reply has a single MetricRecord
|
166
|
+
if len(out_msg.content.metric_records) != 1:
|
167
|
+
return _handle_multi_record_err("adaptiveclipping_mod", out_msg, MetricRecord)
|
168
|
+
|
169
|
+
# Check if the msg has error
|
170
|
+
if out_msg.has_error():
|
171
|
+
return out_msg
|
172
|
+
|
173
|
+
new_array_record_key, client_to_server_arrecord = next(
|
174
|
+
iter(out_msg.content.array_records.items())
|
175
|
+
)
|
176
|
+
|
177
|
+
# Ensure keys in returned ArrayRecord match those in the one sent from server
|
178
|
+
if set(original_array_record.keys()) != set(client_to_server_arrecord.keys()):
|
179
|
+
return _handle_array_key_mismatch_err("adaptiveclipping_mod", out_msg)
|
180
|
+
|
181
|
+
client_to_server_ndarrays = client_to_server_arrecord.to_numpy_ndarrays()
|
182
|
+
# Clip the client update
|
183
|
+
norm_bit = compute_adaptive_clip_model_update(
|
184
|
+
client_to_server_ndarrays,
|
185
|
+
original_array_record.to_numpy_ndarrays(),
|
186
|
+
clipping_norm,
|
187
|
+
)
|
188
|
+
log(
|
189
|
+
INFO,
|
190
|
+
"adaptiveclipping_mod: ndarrays are clipped by value: %.4f.",
|
191
|
+
clipping_norm,
|
192
|
+
)
|
193
|
+
# Replace outgoing ArrayRecord's Array while preserving their keys
|
194
|
+
out_msg.content.array_records[new_array_record_key] = ArrayRecord(
|
195
|
+
OrderedDict(
|
196
|
+
{
|
197
|
+
k: Array(v)
|
198
|
+
for k, v in zip(
|
199
|
+
client_to_server_arrecord.keys(), client_to_server_ndarrays
|
200
|
+
)
|
201
|
+
}
|
202
|
+
)
|
203
|
+
)
|
204
|
+
# Add to the MetricRecords the norm bit (recall reply messages only contain
|
205
|
+
# one MetricRecord)
|
206
|
+
metric_record_key = list(out_msg.content.metric_records.keys())[0]
|
207
|
+
# We cast it to `int` because MetricRecord does not support `bool` values
|
208
|
+
out_msg.content.metric_records[metric_record_key][KEY_NORM_BIT] = int(norm_bit)
|
209
|
+
return out_msg
|
210
|
+
|
211
|
+
|
212
|
+
def _handle_err(msg: Message, reason: str) -> Message:
|
213
|
+
"""Log and return error message."""
|
214
|
+
log(ERROR, reason)
|
215
|
+
return Message(
|
216
|
+
Error(code=ErrorCode.MOD_FAILED_PRECONDITION, reason=reason),
|
217
|
+
reply_to=msg,
|
218
|
+
)
|
219
|
+
|
220
|
+
|
221
|
+
def _handle_multi_record_err(mod_name: str, msg: Message, record_type: type) -> Message:
|
222
|
+
"""Log and return multi-record error."""
|
223
|
+
cnt = sum(isinstance(_, record_type) for _ in msg.content.values())
|
224
|
+
return _handle_err(
|
225
|
+
msg,
|
226
|
+
f"{mod_name} expects exactly one {record_type.__name__}, "
|
227
|
+
f"but found {cnt} {record_type.__name__}(s).",
|
228
|
+
)
|
229
|
+
|
230
|
+
|
231
|
+
def _handle_no_key_err(mod_name: str, msg: Message) -> Message:
|
232
|
+
"""Log and return no-key error."""
|
233
|
+
return _handle_err(
|
234
|
+
msg,
|
235
|
+
f"{mod_name} requires the key '{KEY_CLIPPING_NORM}' to be present in the "
|
236
|
+
"ConfigRecord, but it was not found. "
|
237
|
+
"Please ensure the `DifferentialPrivacyClientSideFixedClipping` wrapper "
|
238
|
+
"is used in the ServerApp.",
|
239
|
+
)
|
240
|
+
|
241
|
+
|
242
|
+
def _handle_array_key_mismatch_err(mod_name: str, msg: Message) -> Message:
|
243
|
+
"""Create array-key-mismatch error reasons."""
|
244
|
+
return _handle_err(
|
245
|
+
msg,
|
246
|
+
f"{mod_name} expects the keys in the ArrayRecord of the reply message to match "
|
247
|
+
"those from the ArrayRecord that the ClientApp received, but they do not.",
|
248
|
+
)
|
flwr/clientapp/typing.py
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
"""Custom types for Flower clients."""
|
16
|
+
|
17
|
+
|
18
|
+
from typing import Callable
|
19
|
+
|
20
|
+
from flwr.common import Context, Message
|
21
|
+
|
22
|
+
ClientAppCallable = Callable[[Message, Context], Message]
|
flwr/common/constant.py
CHANGED
flwr/common/exit/exit_code.py
CHANGED
@@ -45,6 +45,7 @@ class ExitCode:
|
|
45
45
|
SUPERNODE_NODE_AUTH_KEYS_INVALID = 302
|
46
46
|
|
47
47
|
# SuperExec-specific exit codes (400-499)
|
48
|
+
SUPEREXEC_INVALID_PLUGIN_CONFIG = 400
|
48
49
|
|
49
50
|
# Common exit codes (600-699)
|
50
51
|
COMMON_ADDRESS_INVALID = 600
|
@@ -112,6 +113,9 @@ EXIT_CODE_HELP = {
|
|
112
113
|
"file and try again."
|
113
114
|
),
|
114
115
|
# SuperExec-specific exit codes (400-499)
|
116
|
+
ExitCode.SUPEREXEC_INVALID_PLUGIN_CONFIG: (
|
117
|
+
"The YAML configuration for the SuperExec plugin is invalid."
|
118
|
+
),
|
115
119
|
# Common exit codes (600-699)
|
116
120
|
ExitCode.COMMON_ADDRESS_INVALID: (
|
117
121
|
"Please provide a valid URL, IPv4 or IPv6 address."
|
flwr/common/record/typeddict.py
CHANGED
@@ -18,6 +18,8 @@
|
|
18
18
|
from collections.abc import ItemsView, Iterator, KeysView, MutableMapping, ValuesView
|
19
19
|
from typing import Callable, Generic, TypeVar, cast
|
20
20
|
|
21
|
+
from typing_extensions import Self
|
22
|
+
|
21
23
|
K = TypeVar("K") # Key type
|
22
24
|
V = TypeVar("V") # Value type
|
23
25
|
|
@@ -86,3 +88,13 @@ class TypedDict(MutableMapping[K, V], Generic[K, V]):
|
|
86
88
|
def items(self) -> ItemsView[K, V]:
|
87
89
|
"""D.items() -> a set-like object providing a view on D's items."""
|
88
90
|
return cast(dict[K, V], self.__dict__["_data"]).items()
|
91
|
+
|
92
|
+
def copy(self) -> Self:
|
93
|
+
"""Return a shallow copy of the dictionary."""
|
94
|
+
# Allocate instance without going through __init__
|
95
|
+
new = self.__class__.__new__(type(self))
|
96
|
+
# Copy internal state
|
97
|
+
new.__dict__["_check_key_fn"] = self.__dict__["_check_key_fn"]
|
98
|
+
new.__dict__["_check_value_fn"] = self.__dict__["_check_value_fn"]
|
99
|
+
new.__dict__["_data"] = cast(dict[K, V], self.__dict__["_data"]).copy()
|
100
|
+
return new
|
@@ -15,6 +15,10 @@
|
|
15
15
|
"""ServerApp strategies."""
|
16
16
|
|
17
17
|
|
18
|
+
from .dp_adaptive_clipping import (
|
19
|
+
DifferentialPrivacyClientSideAdaptiveClipping,
|
20
|
+
DifferentialPrivacyServerSideAdaptiveClipping,
|
21
|
+
)
|
18
22
|
from .dp_fixed_clipping import (
|
19
23
|
DifferentialPrivacyClientSideFixedClipping,
|
20
24
|
DifferentialPrivacyServerSideFixedClipping,
|
@@ -27,12 +31,17 @@ from .fedmedian import FedMedian
|
|
27
31
|
from .fedprox import FedProx
|
28
32
|
from .fedtrimmedavg import FedTrimmedAvg
|
29
33
|
from .fedxgb_bagging import FedXgbBagging
|
34
|
+
from .fedxgb_cyclic import FedXgbCyclic
|
30
35
|
from .fedyogi import FedYogi
|
36
|
+
from .krum import Krum
|
37
|
+
from .qfedavg import QFedAvg
|
31
38
|
from .result import Result
|
32
39
|
from .strategy import Strategy
|
33
40
|
|
34
41
|
__all__ = [
|
42
|
+
"DifferentialPrivacyClientSideAdaptiveClipping",
|
35
43
|
"DifferentialPrivacyClientSideFixedClipping",
|
44
|
+
"DifferentialPrivacyServerSideAdaptiveClipping",
|
36
45
|
"DifferentialPrivacyServerSideFixedClipping",
|
37
46
|
"FedAdagrad",
|
38
47
|
"FedAdam",
|
@@ -42,7 +51,10 @@ __all__ = [
|
|
42
51
|
"FedProx",
|
43
52
|
"FedTrimmedAvg",
|
44
53
|
"FedXgbBagging",
|
54
|
+
"FedXgbCyclic",
|
45
55
|
"FedYogi",
|
56
|
+
"Krum",
|
57
|
+
"QFedAvg",
|
46
58
|
"Result",
|
47
59
|
"Strategy",
|
48
60
|
]
|