flwr 1.20.0__py3-none-any.whl → 1.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flwr/__init__.py +4 -1
- flwr/app/__init__.py +28 -0
- flwr/app/exception.py +31 -0
- flwr/cli/app.py +2 -0
- flwr/cli/auth_plugin/oidc_cli_plugin.py +4 -4
- flwr/cli/cli_user_auth_interceptor.py +1 -1
- flwr/cli/config_utils.py +3 -3
- flwr/cli/constant.py +25 -8
- flwr/cli/log.py +9 -9
- flwr/cli/login/login.py +3 -3
- flwr/cli/ls.py +5 -5
- flwr/cli/new/new.py +15 -2
- flwr/cli/new/templates/app/README.flowertune.md.tpl +1 -1
- flwr/cli/new/templates/app/code/__init__.pytorch_legacy_api.py.tpl +1 -0
- flwr/cli/new/templates/app/code/client.baseline.py.tpl +64 -47
- flwr/cli/new/templates/app/code/client.huggingface.py.tpl +68 -30
- flwr/cli/new/templates/app/code/client.jax.py.tpl +63 -42
- flwr/cli/new/templates/app/code/client.mlx.py.tpl +80 -51
- flwr/cli/new/templates/app/code/client.numpy.py.tpl +36 -13
- flwr/cli/new/templates/app/code/client.pytorch.py.tpl +71 -46
- flwr/cli/new/templates/app/code/client.pytorch_legacy_api.py.tpl +55 -0
- flwr/cli/new/templates/app/code/client.sklearn.py.tpl +75 -30
- flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +69 -44
- flwr/cli/new/templates/app/code/client.xgboost.py.tpl +110 -0
- flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl +56 -90
- flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +1 -23
- flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl +37 -58
- flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl +39 -44
- flwr/cli/new/templates/app/code/model.baseline.py.tpl +0 -14
- flwr/cli/new/templates/app/code/server.baseline.py.tpl +27 -29
- flwr/cli/new/templates/app/code/server.huggingface.py.tpl +23 -19
- flwr/cli/new/templates/app/code/server.jax.py.tpl +27 -14
- flwr/cli/new/templates/app/code/server.mlx.py.tpl +29 -19
- flwr/cli/new/templates/app/code/server.numpy.py.tpl +30 -17
- flwr/cli/new/templates/app/code/server.pytorch.py.tpl +36 -26
- flwr/cli/new/templates/app/code/server.pytorch_legacy_api.py.tpl +31 -0
- flwr/cli/new/templates/app/code/server.sklearn.py.tpl +29 -21
- flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +28 -19
- flwr/cli/new/templates/app/code/server.xgboost.py.tpl +56 -0
- flwr/cli/new/templates/app/code/task.huggingface.py.tpl +16 -20
- flwr/cli/new/templates/app/code/task.jax.py.tpl +1 -1
- flwr/cli/new/templates/app/code/task.numpy.py.tpl +1 -1
- flwr/cli/new/templates/app/code/task.pytorch.py.tpl +14 -27
- flwr/cli/new/templates/app/code/task.pytorch_legacy_api.py.tpl +111 -0
- flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +1 -2
- flwr/cli/new/templates/app/code/task.xgboost.py.tpl +67 -0
- flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +4 -4
- flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +2 -2
- flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +4 -4
- flwr/cli/new/templates/app/pyproject.jax.toml.tpl +1 -1
- flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +2 -2
- flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +1 -1
- flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +3 -3
- flwr/cli/new/templates/app/pyproject.pytorch_legacy_api.toml.tpl +53 -0
- flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +1 -1
- flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +1 -1
- flwr/cli/new/templates/app/pyproject.xgboost.toml.tpl +61 -0
- flwr/cli/pull.py +100 -0
- flwr/cli/run/run.py +9 -13
- flwr/cli/stop.py +7 -4
- flwr/cli/utils.py +36 -8
- flwr/client/grpc_rere_client/connection.py +1 -12
- flwr/client/rest_client/connection.py +3 -0
- flwr/clientapp/__init__.py +10 -0
- flwr/clientapp/mod/__init__.py +29 -0
- flwr/clientapp/mod/centraldp_mods.py +248 -0
- flwr/clientapp/mod/localdp_mod.py +169 -0
- flwr/clientapp/typing.py +22 -0
- flwr/common/args.py +20 -6
- flwr/common/auth_plugin/__init__.py +4 -4
- flwr/common/auth_plugin/auth_plugin.py +7 -7
- flwr/common/constant.py +26 -4
- flwr/common/event_log_plugin/event_log_plugin.py +1 -1
- flwr/common/exit/__init__.py +4 -0
- flwr/common/exit/exit.py +8 -1
- flwr/common/exit/exit_code.py +30 -7
- flwr/common/exit/exit_handler.py +62 -0
- flwr/common/{exit_handlers.py → exit/signal_handler.py} +20 -37
- flwr/common/grpc.py +0 -11
- flwr/common/inflatable_utils.py +1 -1
- flwr/common/logger.py +1 -1
- flwr/common/record/typeddict.py +12 -0
- flwr/common/retry_invoker.py +30 -11
- flwr/common/telemetry.py +4 -0
- flwr/compat/server/app.py +2 -2
- flwr/proto/appio_pb2.py +25 -17
- flwr/proto/appio_pb2.pyi +46 -2
- flwr/proto/clientappio_pb2.py +3 -11
- flwr/proto/clientappio_pb2.pyi +0 -47
- flwr/proto/clientappio_pb2_grpc.py +19 -20
- flwr/proto/clientappio_pb2_grpc.pyi +10 -11
- flwr/proto/control_pb2.py +66 -0
- flwr/proto/{exec_pb2.pyi → control_pb2.pyi} +24 -0
- flwr/proto/{exec_pb2_grpc.py → control_pb2_grpc.py} +88 -54
- flwr/proto/control_pb2_grpc.pyi +106 -0
- flwr/proto/serverappio_pb2.py +2 -2
- flwr/proto/serverappio_pb2_grpc.py +68 -0
- flwr/proto/serverappio_pb2_grpc.pyi +26 -0
- flwr/proto/simulationio_pb2.py +4 -11
- flwr/proto/simulationio_pb2.pyi +0 -58
- flwr/proto/simulationio_pb2_grpc.py +129 -27
- flwr/proto/simulationio_pb2_grpc.pyi +52 -13
- flwr/server/app.py +142 -152
- flwr/server/grid/grpc_grid.py +3 -0
- flwr/server/grid/inmemory_grid.py +1 -0
- flwr/server/serverapp/app.py +157 -146
- flwr/server/superlink/fleet/vce/backend/raybackend.py +3 -1
- flwr/server/superlink/fleet/vce/vce_api.py +6 -6
- flwr/server/superlink/linkstate/in_memory_linkstate.py +34 -0
- flwr/server/superlink/linkstate/linkstate.py +2 -1
- flwr/server/superlink/linkstate/sqlite_linkstate.py +45 -0
- flwr/server/superlink/serverappio/serverappio_grpc.py +1 -1
- flwr/server/superlink/serverappio/serverappio_servicer.py +61 -6
- flwr/server/superlink/simulation/simulationio_servicer.py +97 -21
- flwr/serverapp/__init__.py +12 -0
- flwr/serverapp/exception.py +38 -0
- flwr/serverapp/strategy/__init__.py +64 -0
- flwr/serverapp/strategy/bulyan.py +238 -0
- flwr/serverapp/strategy/dp_adaptive_clipping.py +335 -0
- flwr/serverapp/strategy/dp_fixed_clipping.py +374 -0
- flwr/serverapp/strategy/fedadagrad.py +159 -0
- flwr/serverapp/strategy/fedadam.py +178 -0
- flwr/serverapp/strategy/fedavg.py +320 -0
- flwr/serverapp/strategy/fedavgm.py +198 -0
- flwr/serverapp/strategy/fedmedian.py +105 -0
- flwr/serverapp/strategy/fedopt.py +218 -0
- flwr/serverapp/strategy/fedprox.py +174 -0
- flwr/serverapp/strategy/fedtrimmedavg.py +176 -0
- flwr/serverapp/strategy/fedxgb_bagging.py +117 -0
- flwr/serverapp/strategy/fedxgb_cyclic.py +220 -0
- flwr/serverapp/strategy/fedyogi.py +170 -0
- flwr/serverapp/strategy/krum.py +112 -0
- flwr/serverapp/strategy/multikrum.py +247 -0
- flwr/serverapp/strategy/qfedavg.py +252 -0
- flwr/serverapp/strategy/result.py +105 -0
- flwr/serverapp/strategy/strategy.py +285 -0
- flwr/serverapp/strategy/strategy_utils.py +299 -0
- flwr/simulation/app.py +161 -164
- flwr/simulation/run_simulation.py +25 -30
- flwr/supercore/app_utils.py +58 -0
- flwr/{supernode/scheduler → supercore/cli}/__init__.py +3 -3
- flwr/supercore/cli/flower_superexec.py +166 -0
- flwr/supercore/constant.py +19 -0
- flwr/supercore/{scheduler → corestate}/__init__.py +3 -3
- flwr/supercore/corestate/corestate.py +81 -0
- flwr/supercore/grpc_health/__init__.py +3 -0
- flwr/supercore/grpc_health/health_server.py +53 -0
- flwr/supercore/grpc_health/simple_health_servicer.py +2 -2
- flwr/{superexec → supercore/superexec}/__init__.py +1 -1
- flwr/supercore/superexec/plugin/__init__.py +28 -0
- flwr/{supernode/scheduler/simple_clientapp_scheduler_plugin.py → supercore/superexec/plugin/base_exec_plugin.py} +10 -6
- flwr/supercore/superexec/plugin/clientapp_exec_plugin.py +28 -0
- flwr/supercore/{scheduler/plugin.py → superexec/plugin/exec_plugin.py} +15 -5
- flwr/supercore/superexec/plugin/serverapp_exec_plugin.py +28 -0
- flwr/supercore/superexec/plugin/simulation_exec_plugin.py +28 -0
- flwr/supercore/superexec/run_superexec.py +199 -0
- flwr/superlink/artifact_provider/__init__.py +22 -0
- flwr/superlink/artifact_provider/artifact_provider.py +37 -0
- flwr/superlink/servicer/__init__.py +15 -0
- flwr/superlink/servicer/control/__init__.py +22 -0
- flwr/{superexec/exec_event_log_interceptor.py → superlink/servicer/control/control_event_log_interceptor.py} +7 -7
- flwr/{superexec/exec_grpc.py → superlink/servicer/control/control_grpc.py} +27 -29
- flwr/{superexec/exec_license_interceptor.py → superlink/servicer/control/control_license_interceptor.py} +6 -6
- flwr/{superexec/exec_servicer.py → superlink/servicer/control/control_servicer.py} +127 -31
- flwr/{superexec/exec_user_auth_interceptor.py → superlink/servicer/control/control_user_auth_interceptor.py} +10 -10
- flwr/supernode/cli/flower_supernode.py +3 -0
- flwr/supernode/cli/flwr_clientapp.py +18 -21
- flwr/supernode/nodestate/in_memory_nodestate.py +2 -2
- flwr/supernode/nodestate/nodestate.py +3 -59
- flwr/supernode/runtime/run_clientapp.py +39 -102
- flwr/supernode/servicer/clientappio/clientappio_servicer.py +10 -17
- flwr/supernode/start_client_internal.py +35 -76
- {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/METADATA +9 -18
- {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/RECORD +176 -128
- {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/entry_points.txt +1 -0
- flwr/proto/exec_pb2.py +0 -62
- flwr/proto/exec_pb2_grpc.pyi +0 -93
- flwr/superexec/app.py +0 -45
- flwr/superexec/deployment.py +0 -191
- flwr/superexec/executor.py +0 -100
- flwr/superexec/simulation.py +0 -129
- {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Federated Median (FedMedian) [Yin et al., 2018] strategy.
|
|
16
|
+
|
|
17
|
+
Paper: arxiv.org/pdf/1803.01498v1.pdf
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
from collections.abc import Iterable
|
|
22
|
+
from typing import Optional, cast
|
|
23
|
+
|
|
24
|
+
import numpy as np
|
|
25
|
+
|
|
26
|
+
from flwr.common import Array, ArrayRecord, Message, MetricRecord
|
|
27
|
+
|
|
28
|
+
from .fedavg import FedAvg
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class FedMedian(FedAvg):
|
|
32
|
+
"""Federated Median (FedMedian) strategy.
|
|
33
|
+
|
|
34
|
+
Implementation based on https://arxiv.org/pdf/1803.01498v1
|
|
35
|
+
|
|
36
|
+
Parameters
|
|
37
|
+
----------
|
|
38
|
+
fraction_train : float (default: 1.0)
|
|
39
|
+
Fraction of nodes used during training. In case `min_train_nodes`
|
|
40
|
+
is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
|
|
41
|
+
will still be sampled.
|
|
42
|
+
fraction_evaluate : float (default: 1.0)
|
|
43
|
+
Fraction of nodes used during validation. In case `min_evaluate_nodes`
|
|
44
|
+
is larger than `fraction_evaluate * total_connected_nodes`,
|
|
45
|
+
`min_evaluate_nodes` will still be sampled.
|
|
46
|
+
min_train_nodes : int (default: 2)
|
|
47
|
+
Minimum number of nodes used during training.
|
|
48
|
+
min_evaluate_nodes : int (default: 2)
|
|
49
|
+
Minimum number of nodes used during validation.
|
|
50
|
+
min_available_nodes : int (default: 2)
|
|
51
|
+
Minimum number of total nodes in the system.
|
|
52
|
+
weighted_by_key : str (default: "num-examples")
|
|
53
|
+
The key within each MetricRecord whose value is used as the weight when
|
|
54
|
+
computing weighted averages for MetricRecords.
|
|
55
|
+
arrayrecord_key : str (default: "arrays")
|
|
56
|
+
Key used to store the ArrayRecord when constructing Messages.
|
|
57
|
+
configrecord_key : str (default: "config")
|
|
58
|
+
Key used to store the ConfigRecord when constructing Messages.
|
|
59
|
+
train_metrics_aggr_fn : Optional[callable] (default: None)
|
|
60
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
61
|
+
used to aggregate MetricRecords from training round replies.
|
|
62
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
63
|
+
average using the provided weight factor key.
|
|
64
|
+
evaluate_metrics_aggr_fn : Optional[callable] (default: None)
|
|
65
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
66
|
+
used to aggregate MetricRecords from training round replies.
|
|
67
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
68
|
+
average using the provided weight factor key.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def aggregate_train(
|
|
72
|
+
self,
|
|
73
|
+
server_round: int,
|
|
74
|
+
replies: Iterable[Message],
|
|
75
|
+
) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
|
|
76
|
+
"""Aggregate ArrayRecords and MetricRecords in the received Messages."""
|
|
77
|
+
# Call FedAvg aggregate_train to perform validation and aggregation
|
|
78
|
+
valid_replies, _ = self._check_and_log_replies(replies, is_train=True)
|
|
79
|
+
|
|
80
|
+
if not valid_replies:
|
|
81
|
+
return None, None
|
|
82
|
+
|
|
83
|
+
# Aggregate ArrayRecords using median
|
|
84
|
+
# Get the key for the only ArrayRecord from the first Message
|
|
85
|
+
record_key = list(valid_replies[0].content.array_records.keys())[0]
|
|
86
|
+
# Preserve keys for arrays in ArrayRecord
|
|
87
|
+
array_keys = list(valid_replies[0].content[record_key].keys())
|
|
88
|
+
|
|
89
|
+
# Compute median for each layer and construct ArrayRecord
|
|
90
|
+
arrays = ArrayRecord()
|
|
91
|
+
for array_key in array_keys:
|
|
92
|
+
# Get the corresponding layer from each client
|
|
93
|
+
layers = [
|
|
94
|
+
cast(ArrayRecord, msg.content[record_key]).pop(array_key).numpy()
|
|
95
|
+
for msg in valid_replies
|
|
96
|
+
]
|
|
97
|
+
# Compute median and save as Array in ArrayRecord
|
|
98
|
+
arrays[array_key] = Array(np.median(np.stack(layers), axis=0))
|
|
99
|
+
|
|
100
|
+
# Aggregate MetricRecords
|
|
101
|
+
metrics = self.train_metrics_aggr_fn(
|
|
102
|
+
[msg.content for msg in valid_replies],
|
|
103
|
+
self.weighted_by_key,
|
|
104
|
+
)
|
|
105
|
+
return arrays, metrics
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Adaptive Federated Optimization (FedOpt) [Reddi et al., 2020] abstract strategy.
|
|
16
|
+
|
|
17
|
+
Paper: arxiv.org/abs/2003.00295
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from collections.abc import Iterable
|
|
21
|
+
from logging import INFO
|
|
22
|
+
from typing import Callable, Optional
|
|
23
|
+
|
|
24
|
+
import numpy as np
|
|
25
|
+
|
|
26
|
+
from flwr.common import (
|
|
27
|
+
ArrayRecord,
|
|
28
|
+
ConfigRecord,
|
|
29
|
+
Message,
|
|
30
|
+
MetricRecord,
|
|
31
|
+
NDArray,
|
|
32
|
+
RecordDict,
|
|
33
|
+
log,
|
|
34
|
+
)
|
|
35
|
+
from flwr.server import Grid
|
|
36
|
+
|
|
37
|
+
from ..exception import AggregationError
|
|
38
|
+
from .fedavg import FedAvg
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# pylint: disable=line-too-long
|
|
42
|
+
class FedOpt(FedAvg):
|
|
43
|
+
"""Federated Optim strategy.
|
|
44
|
+
|
|
45
|
+
Implementation based on https://arxiv.org/abs/2003.00295v5
|
|
46
|
+
|
|
47
|
+
Parameters
|
|
48
|
+
----------
|
|
49
|
+
fraction_train : float (default: 1.0)
|
|
50
|
+
Fraction of nodes used during training. In case `min_train_nodes`
|
|
51
|
+
is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
|
|
52
|
+
will still be sampled.
|
|
53
|
+
fraction_evaluate : float (default: 1.0)
|
|
54
|
+
Fraction of nodes used during validation. In case `min_evaluate_nodes`
|
|
55
|
+
is larger than `fraction_evaluate * total_connected_nodes`,
|
|
56
|
+
`min_evaluate_nodes` will still be sampled.
|
|
57
|
+
min_train_nodes : int (default: 2)
|
|
58
|
+
Minimum number of nodes used during training.
|
|
59
|
+
min_evaluate_nodes : int (default: 2)
|
|
60
|
+
Minimum number of nodes used during validation.
|
|
61
|
+
min_available_nodes : int (default: 2)
|
|
62
|
+
Minimum number of total nodes in the system.
|
|
63
|
+
weighted_by_key : str (default: "num-examples")
|
|
64
|
+
The key within each MetricRecord whose value is used as the weight when
|
|
65
|
+
computing weighted averages for both ArrayRecords and MetricRecords.
|
|
66
|
+
arrayrecord_key : str (default: "arrays")
|
|
67
|
+
Key used to store the ArrayRecord when constructing Messages.
|
|
68
|
+
configrecord_key : str (default: "config")
|
|
69
|
+
Key used to store the ConfigRecord when constructing Messages.
|
|
70
|
+
train_metrics_aggr_fn : Optional[callable] (default: None)
|
|
71
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
72
|
+
used to aggregate MetricRecords from training round replies.
|
|
73
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
74
|
+
average using the provided weight factor key.
|
|
75
|
+
evaluate_metrics_aggr_fn : Optional[callable] (default: None)
|
|
76
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
77
|
+
used to aggregate MetricRecords from training round replies.
|
|
78
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
79
|
+
average using the provided weight factor key.
|
|
80
|
+
eta : float, optional
|
|
81
|
+
Server-side learning rate. Defaults to 1e-1.
|
|
82
|
+
eta_l : float, optional
|
|
83
|
+
Client-side learning rate. Defaults to 1e-1.
|
|
84
|
+
beta_1 : float, optional
|
|
85
|
+
Momentum parameter. Defaults to 0.0.
|
|
86
|
+
beta_2 : float, optional
|
|
87
|
+
Second moment parameter. Defaults to 0.0.
|
|
88
|
+
tau : float, optional
|
|
89
|
+
Controls the algorithm's degree of adaptability. Defaults to 1e-3.
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
# pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-locals, line-too-long
|
|
93
|
+
def __init__(
|
|
94
|
+
self,
|
|
95
|
+
*,
|
|
96
|
+
fraction_train: float = 1.0,
|
|
97
|
+
fraction_evaluate: float = 1.0,
|
|
98
|
+
min_train_nodes: int = 2,
|
|
99
|
+
min_evaluate_nodes: int = 2,
|
|
100
|
+
min_available_nodes: int = 2,
|
|
101
|
+
weighted_by_key: str = "num-examples",
|
|
102
|
+
arrayrecord_key: str = "arrays",
|
|
103
|
+
configrecord_key: str = "config",
|
|
104
|
+
train_metrics_aggr_fn: Optional[
|
|
105
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
106
|
+
] = None,
|
|
107
|
+
evaluate_metrics_aggr_fn: Optional[
|
|
108
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
109
|
+
] = None,
|
|
110
|
+
eta: float = 1e-1,
|
|
111
|
+
eta_l: float = 1e-1,
|
|
112
|
+
beta_1: float = 0.0,
|
|
113
|
+
beta_2: float = 0.0,
|
|
114
|
+
tau: float = 1e-3,
|
|
115
|
+
) -> None:
|
|
116
|
+
super().__init__(
|
|
117
|
+
fraction_train=fraction_train,
|
|
118
|
+
fraction_evaluate=fraction_evaluate,
|
|
119
|
+
min_train_nodes=min_train_nodes,
|
|
120
|
+
min_evaluate_nodes=min_evaluate_nodes,
|
|
121
|
+
min_available_nodes=min_available_nodes,
|
|
122
|
+
weighted_by_key=weighted_by_key,
|
|
123
|
+
arrayrecord_key=arrayrecord_key,
|
|
124
|
+
configrecord_key=configrecord_key,
|
|
125
|
+
train_metrics_aggr_fn=train_metrics_aggr_fn,
|
|
126
|
+
evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
|
|
127
|
+
)
|
|
128
|
+
self.current_arrays: Optional[dict[str, NDArray]] = None
|
|
129
|
+
self.eta = eta
|
|
130
|
+
self.eta_l = eta_l
|
|
131
|
+
self.tau = tau
|
|
132
|
+
self.beta_1 = beta_1
|
|
133
|
+
self.beta_2 = beta_2
|
|
134
|
+
self.m_t: Optional[dict[str, NDArray]] = None
|
|
135
|
+
self.v_t: Optional[dict[str, NDArray]] = None
|
|
136
|
+
|
|
137
|
+
def summary(self) -> None:
|
|
138
|
+
"""Log summary configuration of the strategy."""
|
|
139
|
+
log(INFO, "\t├──> FedOpt settings:")
|
|
140
|
+
log(
|
|
141
|
+
INFO,
|
|
142
|
+
"\t│\t├── eta (%s) | eta_l (%s)",
|
|
143
|
+
f"{self.eta:.6g}",
|
|
144
|
+
f"{self.eta_l:.6g}",
|
|
145
|
+
)
|
|
146
|
+
log(
|
|
147
|
+
INFO,
|
|
148
|
+
"\t│\t├── beta_1 (%s) | beta_2 (%s)",
|
|
149
|
+
f"{self.beta_1:.6g}",
|
|
150
|
+
f"{self.beta_2:.6g}",
|
|
151
|
+
)
|
|
152
|
+
log(
|
|
153
|
+
INFO,
|
|
154
|
+
"\t│\t└── tau (%s)",
|
|
155
|
+
f"{self.tau:.6g}",
|
|
156
|
+
)
|
|
157
|
+
super().summary()
|
|
158
|
+
|
|
159
|
+
def configure_train(
|
|
160
|
+
self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
|
|
161
|
+
) -> Iterable[Message]:
|
|
162
|
+
"""Configure the next round of federated training."""
|
|
163
|
+
# Keep track of array record being communicated
|
|
164
|
+
self.current_arrays = {k: array.numpy() for k, array in arrays.items()}
|
|
165
|
+
return super().configure_train(server_round, arrays, config, grid)
|
|
166
|
+
|
|
167
|
+
def _compute_deltat_and_mt(
|
|
168
|
+
self, aggregated_arrayrecord: ArrayRecord
|
|
169
|
+
) -> tuple[dict[str, NDArray], dict[str, NDArray], dict[str, NDArray]]:
|
|
170
|
+
"""Compute delta_t and m_t.
|
|
171
|
+
|
|
172
|
+
This is a shared stage during aggregation for FedAdagrad, FedAdam and FedYogi.
|
|
173
|
+
"""
|
|
174
|
+
if self.current_arrays is None:
|
|
175
|
+
reason = (
|
|
176
|
+
"Current arrays not set. Ensure that `configure_train` has been "
|
|
177
|
+
"called before aggregation."
|
|
178
|
+
)
|
|
179
|
+
raise AggregationError(reason=reason)
|
|
180
|
+
|
|
181
|
+
aggregated_ndarrays = {
|
|
182
|
+
k: array.numpy() for k, array in aggregated_arrayrecord.items()
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
# Check keys in aggregated arrays match those in current arrays
|
|
186
|
+
if set(aggregated_ndarrays.keys()) != set(self.current_arrays.keys()):
|
|
187
|
+
reason = (
|
|
188
|
+
"Keys of the aggregated arrays do not match those of the arrays "
|
|
189
|
+
"stored at the strategy. `delta_t = aggregated_arrays - "
|
|
190
|
+
"current_arrays` cannot be computed."
|
|
191
|
+
)
|
|
192
|
+
raise AggregationError(reason=reason)
|
|
193
|
+
|
|
194
|
+
# Check that the shape of values match
|
|
195
|
+
# Only shapes that match can compute delta_t (we don't want
|
|
196
|
+
# broadcasting to happen)
|
|
197
|
+
for k, x in aggregated_ndarrays.items():
|
|
198
|
+
if x.shape != self.current_arrays[k].shape:
|
|
199
|
+
reason = (
|
|
200
|
+
f"Shape of aggregated array '{k}' does not match "
|
|
201
|
+
f"shape of the array under the same key stored in the strategy. "
|
|
202
|
+
f"Cannot compute `delta_t`."
|
|
203
|
+
)
|
|
204
|
+
raise AggregationError(reason=reason)
|
|
205
|
+
|
|
206
|
+
delta_t = {
|
|
207
|
+
k: x - self.current_arrays[k] for k, x in aggregated_ndarrays.items()
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
# m_t
|
|
211
|
+
if not self.m_t:
|
|
212
|
+
self.m_t = {k: np.zeros_like(v) for k, v in aggregated_ndarrays.items()}
|
|
213
|
+
self.m_t = {
|
|
214
|
+
k: self.beta_1 * v + (1 - self.beta_1) * delta_t[k]
|
|
215
|
+
for k, v in self.m_t.items()
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
return delta_t, self.m_t, aggregated_ndarrays
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Federated Optimization (FedProx) [Li et al., 2018] strategy.
|
|
16
|
+
|
|
17
|
+
Paper: arxiv.org/abs/1812.06127
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
from collections.abc import Iterable
|
|
22
|
+
from logging import INFO, WARN
|
|
23
|
+
from typing import Callable, Optional
|
|
24
|
+
|
|
25
|
+
from flwr.common import (
|
|
26
|
+
ArrayRecord,
|
|
27
|
+
ConfigRecord,
|
|
28
|
+
Message,
|
|
29
|
+
MetricRecord,
|
|
30
|
+
RecordDict,
|
|
31
|
+
log,
|
|
32
|
+
)
|
|
33
|
+
from flwr.server import Grid
|
|
34
|
+
|
|
35
|
+
from .fedavg import FedAvg
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class FedProx(FedAvg):
|
|
39
|
+
r"""Federated Optimization strategy.
|
|
40
|
+
|
|
41
|
+
Implementation based on https://arxiv.org/abs/1812.06127
|
|
42
|
+
|
|
43
|
+
FedProx extends FedAvg by introducing a proximal term into the client-side
|
|
44
|
+
optimization objective. The strategy itself behaves identically to FedAvg
|
|
45
|
+
on the server side, but each client **MUST** add a proximal regularization
|
|
46
|
+
term to its local loss function during training:
|
|
47
|
+
|
|
48
|
+
.. math::
|
|
49
|
+
\frac{\mu}{2} || w - w^t ||^2
|
|
50
|
+
|
|
51
|
+
Where $w^t$ denotes the global parameters and $w$ denotes the local weights
|
|
52
|
+
being optimized.
|
|
53
|
+
|
|
54
|
+
This strategy sends the proximal term inside the ``ConfigRecord`` as part of the
|
|
55
|
+
``configure_train`` method under key ``"proximal-mu"``. The client can then use this
|
|
56
|
+
value to add the proximal term to the loss function.
|
|
57
|
+
|
|
58
|
+
In PyTorch, for example, the loss would go from:
|
|
59
|
+
|
|
60
|
+
.. code:: python
|
|
61
|
+
loss = criterion(net(inputs), labels)
|
|
62
|
+
|
|
63
|
+
To:
|
|
64
|
+
|
|
65
|
+
.. code:: python
|
|
66
|
+
# Get proximal term weight from message
|
|
67
|
+
mu = msg.content["config"]["proximal-mu"]
|
|
68
|
+
|
|
69
|
+
# Compute proximal term
|
|
70
|
+
proximal_term = 0.0
|
|
71
|
+
for local_weights, global_weights in zip(net.parameters(), global_params):
|
|
72
|
+
proximal_term += (local_weights - global_weights).norm(2)
|
|
73
|
+
|
|
74
|
+
# Update loss
|
|
75
|
+
loss = criterion(net(inputs), labels) + (mu / 2) * proximal_term
|
|
76
|
+
|
|
77
|
+
With ``global_params`` being a copy of the model parameters, created **after**
|
|
78
|
+
applying the received global weights but **before** local training begins.
|
|
79
|
+
|
|
80
|
+
.. code:: python
|
|
81
|
+
global_params = copy.deepcopy(net).parameters()
|
|
82
|
+
|
|
83
|
+
Parameters
|
|
84
|
+
----------
|
|
85
|
+
fraction_train : float (default: 1.0)
|
|
86
|
+
Fraction of nodes used during training. In case `min_train_nodes`
|
|
87
|
+
is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
|
|
88
|
+
will still be sampled.
|
|
89
|
+
fraction_evaluate : float (default: 1.0)
|
|
90
|
+
Fraction of nodes used during validation. In case `min_evaluate_nodes`
|
|
91
|
+
is larger than `fraction_evaluate * total_connected_nodes`,
|
|
92
|
+
`min_evaluate_nodes` will still be sampled.
|
|
93
|
+
min_train_nodes : int (default: 2)
|
|
94
|
+
Minimum number of nodes used during training.
|
|
95
|
+
min_evaluate_nodes : int (default: 2)
|
|
96
|
+
Minimum number of nodes used during validation.
|
|
97
|
+
min_available_nodes : int (default: 2)
|
|
98
|
+
Minimum number of total nodes in the system.
|
|
99
|
+
weighted_by_key : str (default: "num-examples")
|
|
100
|
+
The key within each MetricRecord whose value is used as the weight when
|
|
101
|
+
computing weighted averages for both ArrayRecords and MetricRecords.
|
|
102
|
+
arrayrecord_key : str (default: "arrays")
|
|
103
|
+
Key used to store the ArrayRecord when constructing Messages.
|
|
104
|
+
configrecord_key : str (default: "config")
|
|
105
|
+
Key used to store the ConfigRecord when constructing Messages.
|
|
106
|
+
train_metrics_aggr_fn : Optional[callable] (default: None)
|
|
107
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
108
|
+
used to aggregate MetricRecords from training round replies.
|
|
109
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
110
|
+
average using the provided weight factor key.
|
|
111
|
+
evaluate_metrics_aggr_fn : Optional[callable] (default: None)
|
|
112
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
113
|
+
used to aggregate MetricRecords from training round replies.
|
|
114
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
115
|
+
average using the provided weight factor key.
|
|
116
|
+
proximal_mu : float (default: 0.0)
|
|
117
|
+
The weight of the proximal term used in the optimization. 0.0 makes
|
|
118
|
+
this strategy equivalent to FedAvg, and the higher the coefficient, the more
|
|
119
|
+
regularization will be used (that is, the client parameters will need to be
|
|
120
|
+
closer to the server parameters during training).
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
def __init__( # pylint: disable=R0913, R0917
|
|
124
|
+
self,
|
|
125
|
+
fraction_train: float = 1.0,
|
|
126
|
+
fraction_evaluate: float = 1.0,
|
|
127
|
+
min_train_nodes: int = 2,
|
|
128
|
+
min_evaluate_nodes: int = 2,
|
|
129
|
+
min_available_nodes: int = 2,
|
|
130
|
+
weighted_by_key: str = "num-examples",
|
|
131
|
+
arrayrecord_key: str = "arrays",
|
|
132
|
+
configrecord_key: str = "config",
|
|
133
|
+
train_metrics_aggr_fn: Optional[
|
|
134
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
135
|
+
] = None,
|
|
136
|
+
evaluate_metrics_aggr_fn: Optional[
|
|
137
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
138
|
+
] = None,
|
|
139
|
+
proximal_mu: float = 0.0,
|
|
140
|
+
) -> None:
|
|
141
|
+
super().__init__(
|
|
142
|
+
fraction_train=fraction_train,
|
|
143
|
+
fraction_evaluate=fraction_evaluate,
|
|
144
|
+
min_train_nodes=min_train_nodes,
|
|
145
|
+
min_evaluate_nodes=min_evaluate_nodes,
|
|
146
|
+
min_available_nodes=min_available_nodes,
|
|
147
|
+
weighted_by_key=weighted_by_key,
|
|
148
|
+
arrayrecord_key=arrayrecord_key,
|
|
149
|
+
configrecord_key=configrecord_key,
|
|
150
|
+
train_metrics_aggr_fn=train_metrics_aggr_fn,
|
|
151
|
+
evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
|
|
152
|
+
)
|
|
153
|
+
self.proximal_mu = proximal_mu
|
|
154
|
+
|
|
155
|
+
if self.proximal_mu == 0.0:
|
|
156
|
+
log(
|
|
157
|
+
WARN,
|
|
158
|
+
"FedProx initialized with `proximal_mu=0.0`. "
|
|
159
|
+
"This makes the strategy equivalent to FedAvg.",
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
def summary(self) -> None:
|
|
163
|
+
"""Log summary configuration of the strategy."""
|
|
164
|
+
log(INFO, "\t├──> FedProx settings:")
|
|
165
|
+
log(INFO, "\t│\t└── Proximal mu: %s", self.proximal_mu)
|
|
166
|
+
super().summary()
|
|
167
|
+
|
|
168
|
+
def configure_train(
|
|
169
|
+
self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
|
|
170
|
+
) -> Iterable[Message]:
|
|
171
|
+
"""Configure the next round of federated training."""
|
|
172
|
+
# Inject proximal term weight into config
|
|
173
|
+
config["proximal-mu"] = self.proximal_mu
|
|
174
|
+
return super().configure_train(server_round, arrays, config, grid)
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021].
|
|
16
|
+
|
|
17
|
+
Paper: arxiv.org/abs/1803.01498
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
from collections.abc import Iterable
|
|
22
|
+
from logging import INFO
|
|
23
|
+
from typing import Callable, Optional, cast
|
|
24
|
+
|
|
25
|
+
import numpy as np
|
|
26
|
+
|
|
27
|
+
from flwr.common import Array, ArrayRecord, Message, MetricRecord, NDArray, RecordDict
|
|
28
|
+
from flwr.common.logger import log
|
|
29
|
+
|
|
30
|
+
from ..exception import AggregationError
|
|
31
|
+
from .fedavg import FedAvg
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class FedTrimmedAvg(FedAvg):
|
|
35
|
+
"""Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021].
|
|
36
|
+
|
|
37
|
+
Implemented based on: https://arxiv.org/abs/1803.01498
|
|
38
|
+
|
|
39
|
+
Parameters
|
|
40
|
+
----------
|
|
41
|
+
fraction_train : float (default: 1.0)
|
|
42
|
+
Fraction of nodes used during training. In case `min_train_nodes`
|
|
43
|
+
is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
|
|
44
|
+
will still be sampled.
|
|
45
|
+
fraction_evaluate : float (default: 1.0)
|
|
46
|
+
Fraction of nodes used during validation. In case `min_evaluate_nodes`
|
|
47
|
+
is larger than `fraction_evaluate * total_connected_nodes`,
|
|
48
|
+
`min_evaluate_nodes` will still be sampled.
|
|
49
|
+
min_train_nodes : int (default: 2)
|
|
50
|
+
Minimum number of nodes used during training.
|
|
51
|
+
min_evaluate_nodes : int (default: 2)
|
|
52
|
+
Minimum number of nodes used during validation.
|
|
53
|
+
min_available_nodes : int (default: 2)
|
|
54
|
+
Minimum number of total nodes in the system.
|
|
55
|
+
weighted_by_key : str (default: "num-examples")
|
|
56
|
+
The key within each MetricRecord whose value is used as the weight when
|
|
57
|
+
computing weighted averages for both ArrayRecords and MetricRecords.
|
|
58
|
+
arrayrecord_key : str (default: "arrays")
|
|
59
|
+
Key used to store the ArrayRecord when constructing Messages.
|
|
60
|
+
configrecord_key : str (default: "config")
|
|
61
|
+
Key used to store the ConfigRecord when constructing Messages.
|
|
62
|
+
train_metrics_aggr_fn : Optional[callable] (default: None)
|
|
63
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
64
|
+
used to aggregate MetricRecords from training round replies.
|
|
65
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
66
|
+
average using the provided weight factor key.
|
|
67
|
+
evaluate_metrics_aggr_fn : Optional[callable] (default: None)
|
|
68
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
69
|
+
used to aggregate MetricRecords from training round replies.
|
|
70
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
71
|
+
average using the provided weight factor key.
|
|
72
|
+
beta : float (default: 0.2)
|
|
73
|
+
Fraction to cut off of both tails of the distribution.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__( # pylint: disable=R0913, R0917
|
|
77
|
+
self,
|
|
78
|
+
fraction_train: float = 1.0,
|
|
79
|
+
fraction_evaluate: float = 1.0,
|
|
80
|
+
min_train_nodes: int = 2,
|
|
81
|
+
min_evaluate_nodes: int = 2,
|
|
82
|
+
min_available_nodes: int = 2,
|
|
83
|
+
weighted_by_key: str = "num-examples",
|
|
84
|
+
arrayrecord_key: str = "arrays",
|
|
85
|
+
configrecord_key: str = "config",
|
|
86
|
+
train_metrics_aggr_fn: Optional[
|
|
87
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
88
|
+
] = None,
|
|
89
|
+
evaluate_metrics_aggr_fn: Optional[
|
|
90
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
91
|
+
] = None,
|
|
92
|
+
beta: float = 0.2,
|
|
93
|
+
) -> None:
|
|
94
|
+
super().__init__(
|
|
95
|
+
fraction_train=fraction_train,
|
|
96
|
+
fraction_evaluate=fraction_evaluate,
|
|
97
|
+
min_train_nodes=min_train_nodes,
|
|
98
|
+
min_evaluate_nodes=min_evaluate_nodes,
|
|
99
|
+
min_available_nodes=min_available_nodes,
|
|
100
|
+
weighted_by_key=weighted_by_key,
|
|
101
|
+
arrayrecord_key=arrayrecord_key,
|
|
102
|
+
configrecord_key=configrecord_key,
|
|
103
|
+
train_metrics_aggr_fn=train_metrics_aggr_fn,
|
|
104
|
+
evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
|
|
105
|
+
)
|
|
106
|
+
self.beta = beta
|
|
107
|
+
|
|
108
|
+
def summary(self) -> None:
|
|
109
|
+
"""Log summary configuration of the strategy."""
|
|
110
|
+
log(INFO, "\t├──> FedTrimmedAvg settings:")
|
|
111
|
+
log(INFO, "\t│\t└── beta: %s", self.beta)
|
|
112
|
+
super().summary()
|
|
113
|
+
|
|
114
|
+
def aggregate_train(
|
|
115
|
+
self,
|
|
116
|
+
server_round: int,
|
|
117
|
+
replies: Iterable[Message],
|
|
118
|
+
) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
|
|
119
|
+
"""Aggregate ArrayRecords and MetricRecords in the received Messages."""
|
|
120
|
+
# Call FedAvg aggregate_train to perform validation and aggregation
|
|
121
|
+
valid_replies, _ = self._check_and_log_replies(replies, is_train=True)
|
|
122
|
+
|
|
123
|
+
if not valid_replies:
|
|
124
|
+
return None, None
|
|
125
|
+
|
|
126
|
+
# Aggregate ArrayRecords using trimmed mean
|
|
127
|
+
# Get the key for the only ArrayRecord from the first Message
|
|
128
|
+
record_key = list(valid_replies[0].content.array_records.keys())[0]
|
|
129
|
+
# Preserve keys for arrays in ArrayRecord
|
|
130
|
+
array_keys = list(valid_replies[0].content[record_key].keys())
|
|
131
|
+
|
|
132
|
+
# Compute trimmed mean for each layer and construct ArrayRecord
|
|
133
|
+
arrays = ArrayRecord()
|
|
134
|
+
for array_key in array_keys:
|
|
135
|
+
# Get the corresponding layer from each client
|
|
136
|
+
layers = [
|
|
137
|
+
cast(ArrayRecord, msg.content[record_key]).pop(array_key).numpy()
|
|
138
|
+
for msg in valid_replies
|
|
139
|
+
]
|
|
140
|
+
# Compute trimmed mean and save as Array in ArrayRecord
|
|
141
|
+
try:
|
|
142
|
+
arrays[array_key] = Array(trim_mean(np.stack(layers), self.beta))
|
|
143
|
+
except ValueError as e:
|
|
144
|
+
raise AggregationError(
|
|
145
|
+
f"Trimmed mean could not be computed. "
|
|
146
|
+
f"Likely cause: beta={self.beta} is too large."
|
|
147
|
+
) from e
|
|
148
|
+
|
|
149
|
+
# Aggregate MetricRecords
|
|
150
|
+
metrics = self.train_metrics_aggr_fn(
|
|
151
|
+
[msg.content for msg in valid_replies],
|
|
152
|
+
self.weighted_by_key,
|
|
153
|
+
)
|
|
154
|
+
return arrays, metrics
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def trim_mean(array: NDArray, cut_fraction: float) -> NDArray:
|
|
158
|
+
"""Compute trimmed mean along axis=0.
|
|
159
|
+
|
|
160
|
+
It is based on the scipy implementation:
|
|
161
|
+
|
|
162
|
+
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.trim_mean.html
|
|
163
|
+
"""
|
|
164
|
+
axis = 0
|
|
165
|
+
nobs = array.shape[0]
|
|
166
|
+
lowercut = int(cut_fraction * nobs)
|
|
167
|
+
uppercut = nobs - lowercut
|
|
168
|
+
if lowercut > uppercut:
|
|
169
|
+
raise ValueError("Fraction too big.")
|
|
170
|
+
|
|
171
|
+
atmp = np.partition(array, (lowercut, uppercut - 1), axis)
|
|
172
|
+
|
|
173
|
+
slice_list = [slice(None)] * atmp.ndim
|
|
174
|
+
slice_list[axis] = slice(lowercut, uppercut)
|
|
175
|
+
result: NDArray = np.mean(atmp[tuple(slice_list)], axis=axis)
|
|
176
|
+
return result
|