flwr 1.20.0__py3-none-any.whl → 1.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flwr/__init__.py +4 -1
- flwr/app/__init__.py +28 -0
- flwr/app/exception.py +31 -0
- flwr/cli/app.py +2 -0
- flwr/cli/auth_plugin/oidc_cli_plugin.py +4 -4
- flwr/cli/cli_user_auth_interceptor.py +1 -1
- flwr/cli/config_utils.py +3 -3
- flwr/cli/constant.py +25 -8
- flwr/cli/log.py +9 -9
- flwr/cli/login/login.py +3 -3
- flwr/cli/ls.py +5 -5
- flwr/cli/new/new.py +15 -2
- flwr/cli/new/templates/app/README.flowertune.md.tpl +1 -1
- flwr/cli/new/templates/app/code/__init__.pytorch_legacy_api.py.tpl +1 -0
- flwr/cli/new/templates/app/code/client.baseline.py.tpl +64 -47
- flwr/cli/new/templates/app/code/client.huggingface.py.tpl +68 -30
- flwr/cli/new/templates/app/code/client.jax.py.tpl +63 -42
- flwr/cli/new/templates/app/code/client.mlx.py.tpl +80 -51
- flwr/cli/new/templates/app/code/client.numpy.py.tpl +36 -13
- flwr/cli/new/templates/app/code/client.pytorch.py.tpl +71 -46
- flwr/cli/new/templates/app/code/client.pytorch_legacy_api.py.tpl +55 -0
- flwr/cli/new/templates/app/code/client.sklearn.py.tpl +75 -30
- flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +69 -44
- flwr/cli/new/templates/app/code/client.xgboost.py.tpl +110 -0
- flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl +56 -90
- flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +1 -23
- flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl +37 -58
- flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl +39 -44
- flwr/cli/new/templates/app/code/model.baseline.py.tpl +0 -14
- flwr/cli/new/templates/app/code/server.baseline.py.tpl +27 -29
- flwr/cli/new/templates/app/code/server.huggingface.py.tpl +23 -19
- flwr/cli/new/templates/app/code/server.jax.py.tpl +27 -14
- flwr/cli/new/templates/app/code/server.mlx.py.tpl +29 -19
- flwr/cli/new/templates/app/code/server.numpy.py.tpl +30 -17
- flwr/cli/new/templates/app/code/server.pytorch.py.tpl +36 -26
- flwr/cli/new/templates/app/code/server.pytorch_legacy_api.py.tpl +31 -0
- flwr/cli/new/templates/app/code/server.sklearn.py.tpl +29 -21
- flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +28 -19
- flwr/cli/new/templates/app/code/server.xgboost.py.tpl +56 -0
- flwr/cli/new/templates/app/code/task.huggingface.py.tpl +16 -20
- flwr/cli/new/templates/app/code/task.jax.py.tpl +1 -1
- flwr/cli/new/templates/app/code/task.numpy.py.tpl +1 -1
- flwr/cli/new/templates/app/code/task.pytorch.py.tpl +14 -27
- flwr/cli/new/templates/app/code/task.pytorch_legacy_api.py.tpl +111 -0
- flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +1 -2
- flwr/cli/new/templates/app/code/task.xgboost.py.tpl +67 -0
- flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +4 -4
- flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +2 -2
- flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +4 -4
- flwr/cli/new/templates/app/pyproject.jax.toml.tpl +1 -1
- flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +2 -2
- flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +1 -1
- flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +3 -3
- flwr/cli/new/templates/app/pyproject.pytorch_legacy_api.toml.tpl +53 -0
- flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +1 -1
- flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +1 -1
- flwr/cli/new/templates/app/pyproject.xgboost.toml.tpl +61 -0
- flwr/cli/pull.py +100 -0
- flwr/cli/run/run.py +9 -13
- flwr/cli/stop.py +7 -4
- flwr/cli/utils.py +36 -8
- flwr/client/grpc_rere_client/connection.py +1 -12
- flwr/client/rest_client/connection.py +3 -0
- flwr/clientapp/__init__.py +10 -0
- flwr/clientapp/mod/__init__.py +29 -0
- flwr/clientapp/mod/centraldp_mods.py +248 -0
- flwr/clientapp/mod/localdp_mod.py +169 -0
- flwr/clientapp/typing.py +22 -0
- flwr/common/args.py +20 -6
- flwr/common/auth_plugin/__init__.py +4 -4
- flwr/common/auth_plugin/auth_plugin.py +7 -7
- flwr/common/constant.py +26 -4
- flwr/common/event_log_plugin/event_log_plugin.py +1 -1
- flwr/common/exit/__init__.py +4 -0
- flwr/common/exit/exit.py +8 -1
- flwr/common/exit/exit_code.py +30 -7
- flwr/common/exit/exit_handler.py +62 -0
- flwr/common/{exit_handlers.py → exit/signal_handler.py} +20 -37
- flwr/common/grpc.py +0 -11
- flwr/common/inflatable_utils.py +1 -1
- flwr/common/logger.py +1 -1
- flwr/common/record/typeddict.py +12 -0
- flwr/common/retry_invoker.py +30 -11
- flwr/common/telemetry.py +4 -0
- flwr/compat/server/app.py +2 -2
- flwr/proto/appio_pb2.py +25 -17
- flwr/proto/appio_pb2.pyi +46 -2
- flwr/proto/clientappio_pb2.py +3 -11
- flwr/proto/clientappio_pb2.pyi +0 -47
- flwr/proto/clientappio_pb2_grpc.py +19 -20
- flwr/proto/clientappio_pb2_grpc.pyi +10 -11
- flwr/proto/control_pb2.py +66 -0
- flwr/proto/{exec_pb2.pyi → control_pb2.pyi} +24 -0
- flwr/proto/{exec_pb2_grpc.py → control_pb2_grpc.py} +88 -54
- flwr/proto/control_pb2_grpc.pyi +106 -0
- flwr/proto/serverappio_pb2.py +2 -2
- flwr/proto/serverappio_pb2_grpc.py +68 -0
- flwr/proto/serverappio_pb2_grpc.pyi +26 -0
- flwr/proto/simulationio_pb2.py +4 -11
- flwr/proto/simulationio_pb2.pyi +0 -58
- flwr/proto/simulationio_pb2_grpc.py +129 -27
- flwr/proto/simulationio_pb2_grpc.pyi +52 -13
- flwr/server/app.py +142 -152
- flwr/server/grid/grpc_grid.py +3 -0
- flwr/server/grid/inmemory_grid.py +1 -0
- flwr/server/serverapp/app.py +157 -146
- flwr/server/superlink/fleet/vce/backend/raybackend.py +3 -1
- flwr/server/superlink/fleet/vce/vce_api.py +6 -6
- flwr/server/superlink/linkstate/in_memory_linkstate.py +34 -0
- flwr/server/superlink/linkstate/linkstate.py +2 -1
- flwr/server/superlink/linkstate/sqlite_linkstate.py +45 -0
- flwr/server/superlink/serverappio/serverappio_grpc.py +1 -1
- flwr/server/superlink/serverappio/serverappio_servicer.py +61 -6
- flwr/server/superlink/simulation/simulationio_servicer.py +97 -21
- flwr/serverapp/__init__.py +12 -0
- flwr/serverapp/exception.py +38 -0
- flwr/serverapp/strategy/__init__.py +64 -0
- flwr/serverapp/strategy/bulyan.py +238 -0
- flwr/serverapp/strategy/dp_adaptive_clipping.py +335 -0
- flwr/serverapp/strategy/dp_fixed_clipping.py +374 -0
- flwr/serverapp/strategy/fedadagrad.py +159 -0
- flwr/serverapp/strategy/fedadam.py +178 -0
- flwr/serverapp/strategy/fedavg.py +320 -0
- flwr/serverapp/strategy/fedavgm.py +198 -0
- flwr/serverapp/strategy/fedmedian.py +105 -0
- flwr/serverapp/strategy/fedopt.py +218 -0
- flwr/serverapp/strategy/fedprox.py +174 -0
- flwr/serverapp/strategy/fedtrimmedavg.py +176 -0
- flwr/serverapp/strategy/fedxgb_bagging.py +117 -0
- flwr/serverapp/strategy/fedxgb_cyclic.py +220 -0
- flwr/serverapp/strategy/fedyogi.py +170 -0
- flwr/serverapp/strategy/krum.py +112 -0
- flwr/serverapp/strategy/multikrum.py +247 -0
- flwr/serverapp/strategy/qfedavg.py +252 -0
- flwr/serverapp/strategy/result.py +105 -0
- flwr/serverapp/strategy/strategy.py +285 -0
- flwr/serverapp/strategy/strategy_utils.py +299 -0
- flwr/simulation/app.py +161 -164
- flwr/simulation/run_simulation.py +25 -30
- flwr/supercore/app_utils.py +58 -0
- flwr/{supernode/scheduler → supercore/cli}/__init__.py +3 -3
- flwr/supercore/cli/flower_superexec.py +166 -0
- flwr/supercore/constant.py +19 -0
- flwr/supercore/{scheduler → corestate}/__init__.py +3 -3
- flwr/supercore/corestate/corestate.py +81 -0
- flwr/supercore/grpc_health/__init__.py +3 -0
- flwr/supercore/grpc_health/health_server.py +53 -0
- flwr/supercore/grpc_health/simple_health_servicer.py +2 -2
- flwr/{superexec → supercore/superexec}/__init__.py +1 -1
- flwr/supercore/superexec/plugin/__init__.py +28 -0
- flwr/{supernode/scheduler/simple_clientapp_scheduler_plugin.py → supercore/superexec/plugin/base_exec_plugin.py} +10 -6
- flwr/supercore/superexec/plugin/clientapp_exec_plugin.py +28 -0
- flwr/supercore/{scheduler/plugin.py → superexec/plugin/exec_plugin.py} +15 -5
- flwr/supercore/superexec/plugin/serverapp_exec_plugin.py +28 -0
- flwr/supercore/superexec/plugin/simulation_exec_plugin.py +28 -0
- flwr/supercore/superexec/run_superexec.py +199 -0
- flwr/superlink/artifact_provider/__init__.py +22 -0
- flwr/superlink/artifact_provider/artifact_provider.py +37 -0
- flwr/superlink/servicer/__init__.py +15 -0
- flwr/superlink/servicer/control/__init__.py +22 -0
- flwr/{superexec/exec_event_log_interceptor.py → superlink/servicer/control/control_event_log_interceptor.py} +7 -7
- flwr/{superexec/exec_grpc.py → superlink/servicer/control/control_grpc.py} +27 -29
- flwr/{superexec/exec_license_interceptor.py → superlink/servicer/control/control_license_interceptor.py} +6 -6
- flwr/{superexec/exec_servicer.py → superlink/servicer/control/control_servicer.py} +127 -31
- flwr/{superexec/exec_user_auth_interceptor.py → superlink/servicer/control/control_user_auth_interceptor.py} +10 -10
- flwr/supernode/cli/flower_supernode.py +3 -0
- flwr/supernode/cli/flwr_clientapp.py +18 -21
- flwr/supernode/nodestate/in_memory_nodestate.py +2 -2
- flwr/supernode/nodestate/nodestate.py +3 -59
- flwr/supernode/runtime/run_clientapp.py +39 -102
- flwr/supernode/servicer/clientappio/clientappio_servicer.py +10 -17
- flwr/supernode/start_client_internal.py +35 -76
- {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/METADATA +9 -18
- {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/RECORD +176 -128
- {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/entry_points.txt +1 -0
- flwr/proto/exec_pb2.py +0 -62
- flwr/proto/exec_pb2_grpc.pyi +0 -93
- flwr/superexec/app.py +0 -45
- flwr/superexec/deployment.py +0 -191
- flwr/superexec/executor.py +0 -100
- flwr/superexec/simulation.py +0 -129
- {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Flower message-based FedXgbBagging strategy."""
|
|
16
|
+
from collections.abc import Iterable
|
|
17
|
+
from typing import Optional, cast
|
|
18
|
+
|
|
19
|
+
import numpy as np
|
|
20
|
+
|
|
21
|
+
from flwr.common import ArrayRecord, ConfigRecord, Message, MetricRecord
|
|
22
|
+
from flwr.server import Grid
|
|
23
|
+
|
|
24
|
+
from ..exception import InconsistentMessageReplies
|
|
25
|
+
from .fedavg import FedAvg
|
|
26
|
+
from .strategy_utils import aggregate_bagging
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# pylint: disable=line-too-long
|
|
30
|
+
class FedXgbBagging(FedAvg):
|
|
31
|
+
"""Configurable FedXgbBagging strategy implementation.
|
|
32
|
+
|
|
33
|
+
Parameters
|
|
34
|
+
----------
|
|
35
|
+
fraction_train : float (default: 1.0)
|
|
36
|
+
Fraction of nodes used during training. In case `min_train_nodes`
|
|
37
|
+
is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
|
|
38
|
+
will still be sampled.
|
|
39
|
+
fraction_evaluate : float (default: 1.0)
|
|
40
|
+
Fraction of nodes used during validation. In case `min_evaluate_nodes`
|
|
41
|
+
is larger than `fraction_evaluate * total_connected_nodes`,
|
|
42
|
+
`min_evaluate_nodes` will still be sampled.
|
|
43
|
+
min_train_nodes : int (default: 2)
|
|
44
|
+
Minimum number of nodes used during training.
|
|
45
|
+
min_evaluate_nodes : int (default: 2)
|
|
46
|
+
Minimum number of nodes used during validation.
|
|
47
|
+
min_available_nodes : int (default: 2)
|
|
48
|
+
Minimum number of total nodes in the system.
|
|
49
|
+
weighted_by_key : str (default: "num-examples")
|
|
50
|
+
The key within each MetricRecord whose value is used as the weight when
|
|
51
|
+
computing weighted averages for MetricRecords.
|
|
52
|
+
arrayrecord_key : str (default: "arrays")
|
|
53
|
+
Key used to store the ArrayRecord when constructing Messages.
|
|
54
|
+
configrecord_key : str (default: "config")
|
|
55
|
+
Key used to store the ConfigRecord when constructing Messages.
|
|
56
|
+
train_metrics_aggr_fn : Optional[callable] (default: None)
|
|
57
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
58
|
+
used to aggregate MetricRecords from training round replies.
|
|
59
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
60
|
+
average using the provided weight factor key.
|
|
61
|
+
evaluate_metrics_aggr_fn : Optional[callable] (default: None)
|
|
62
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
63
|
+
used to aggregate MetricRecords from training round replies.
|
|
64
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
65
|
+
average using the provided weight factor key.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
current_bst: Optional[bytes] = None
|
|
69
|
+
|
|
70
|
+
def _ensure_single_array(self, arrays: ArrayRecord) -> None:
|
|
71
|
+
"""Check that ensures there's only one Array in the ArrayRecord."""
|
|
72
|
+
n = len(arrays)
|
|
73
|
+
if n != 1:
|
|
74
|
+
raise InconsistentMessageReplies(
|
|
75
|
+
reason="Expected exactly one Array in ArrayRecord. "
|
|
76
|
+
"Skipping aggregation."
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
def configure_train(
|
|
80
|
+
self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
|
|
81
|
+
) -> Iterable[Message]:
|
|
82
|
+
"""Configure the next round of federated training."""
|
|
83
|
+
self._ensure_single_array(arrays)
|
|
84
|
+
# Keep track of array record being communicated
|
|
85
|
+
self.current_bst = arrays["0"].numpy().tobytes()
|
|
86
|
+
return super().configure_train(server_round, arrays, config, grid)
|
|
87
|
+
|
|
88
|
+
def aggregate_train(
|
|
89
|
+
self,
|
|
90
|
+
server_round: int,
|
|
91
|
+
replies: Iterable[Message],
|
|
92
|
+
) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
|
|
93
|
+
"""Aggregate ArrayRecords and MetricRecords in the received Messages."""
|
|
94
|
+
valid_replies, _ = self._check_and_log_replies(replies, is_train=True)
|
|
95
|
+
|
|
96
|
+
arrays, metrics = None, None
|
|
97
|
+
if valid_replies:
|
|
98
|
+
reply_contents = [msg.content for msg in valid_replies]
|
|
99
|
+
array_record_key = next(iter(reply_contents[0].array_records.keys()))
|
|
100
|
+
|
|
101
|
+
# Aggregate ArrayRecords
|
|
102
|
+
for content in reply_contents:
|
|
103
|
+
self._ensure_single_array(cast(ArrayRecord, content[array_record_key]))
|
|
104
|
+
bst = content[array_record_key]["0"].numpy().tobytes() # type: ignore[union-attr]
|
|
105
|
+
|
|
106
|
+
if self.current_bst is not None:
|
|
107
|
+
self.current_bst = aggregate_bagging(self.current_bst, bst)
|
|
108
|
+
|
|
109
|
+
if self.current_bst is not None:
|
|
110
|
+
arrays = ArrayRecord([np.frombuffer(self.current_bst, dtype=np.uint8)])
|
|
111
|
+
|
|
112
|
+
# Aggregate MetricRecords
|
|
113
|
+
metrics = self.train_metrics_aggr_fn(
|
|
114
|
+
reply_contents,
|
|
115
|
+
self.weighted_by_key,
|
|
116
|
+
)
|
|
117
|
+
return arrays, metrics
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Flower message-based FedXgbCyclic strategy."""
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
from collections.abc import Iterable
|
|
19
|
+
from logging import INFO
|
|
20
|
+
from typing import Callable, Optional, cast
|
|
21
|
+
|
|
22
|
+
from flwr.common import (
|
|
23
|
+
ArrayRecord,
|
|
24
|
+
ConfigRecord,
|
|
25
|
+
Message,
|
|
26
|
+
MessageType,
|
|
27
|
+
MetricRecord,
|
|
28
|
+
RecordDict,
|
|
29
|
+
log,
|
|
30
|
+
)
|
|
31
|
+
from flwr.server import Grid
|
|
32
|
+
|
|
33
|
+
from .fedavg import FedAvg
|
|
34
|
+
from .strategy_utils import sample_nodes
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# pylint: disable=line-too-long
|
|
38
|
+
class FedXgbCyclic(FedAvg):
|
|
39
|
+
"""Configurable FedXgbCyclic strategy implementation.
|
|
40
|
+
|
|
41
|
+
Parameters
|
|
42
|
+
----------
|
|
43
|
+
fraction_train : float (default: 1.0)
|
|
44
|
+
Fraction of nodes used during training. In case `min_train_nodes`
|
|
45
|
+
is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
|
|
46
|
+
will still be sampled.
|
|
47
|
+
fraction_evaluate : float (default: 1.0)
|
|
48
|
+
Fraction of nodes used during validation. In case `min_evaluate_nodes`
|
|
49
|
+
is larger than `fraction_evaluate * total_connected_nodes`,
|
|
50
|
+
`min_evaluate_nodes` will still be sampled.
|
|
51
|
+
min_available_nodes : int (default: 2)
|
|
52
|
+
Minimum number of total nodes in the system.
|
|
53
|
+
weighted_by_key : str (default: "num-examples")
|
|
54
|
+
The key within each MetricRecord whose value is used as the weight when
|
|
55
|
+
computing weighted averages for MetricRecords.
|
|
56
|
+
arrayrecord_key : str (default: "arrays")
|
|
57
|
+
Key used to store the ArrayRecord when constructing Messages.
|
|
58
|
+
configrecord_key : str (default: "config")
|
|
59
|
+
Key used to store the ConfigRecord when constructing Messages.
|
|
60
|
+
train_metrics_aggr_fn : Optional[callable] (default: None)
|
|
61
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
62
|
+
used to aggregate MetricRecords from training round replies.
|
|
63
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
64
|
+
average using the provided weight factor key.
|
|
65
|
+
evaluate_metrics_aggr_fn : Optional[callable] (default: None)
|
|
66
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
67
|
+
used to aggregate MetricRecords from training round replies.
|
|
68
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
69
|
+
average using the provided weight factor key.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
# pylint: disable=too-many-arguments,too-many-positional-arguments
|
|
73
|
+
def __init__(
|
|
74
|
+
self,
|
|
75
|
+
fraction_train: float = 1.0,
|
|
76
|
+
fraction_evaluate: float = 1.0,
|
|
77
|
+
min_available_nodes: int = 2,
|
|
78
|
+
weighted_by_key: str = "num-examples",
|
|
79
|
+
arrayrecord_key: str = "arrays",
|
|
80
|
+
configrecord_key: str = "config",
|
|
81
|
+
train_metrics_aggr_fn: Optional[
|
|
82
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
83
|
+
] = None,
|
|
84
|
+
evaluate_metrics_aggr_fn: Optional[
|
|
85
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
86
|
+
] = None,
|
|
87
|
+
) -> None:
|
|
88
|
+
super().__init__(
|
|
89
|
+
fraction_train=fraction_train,
|
|
90
|
+
fraction_evaluate=fraction_evaluate,
|
|
91
|
+
min_train_nodes=2,
|
|
92
|
+
min_evaluate_nodes=2,
|
|
93
|
+
min_available_nodes=min_available_nodes,
|
|
94
|
+
weighted_by_key=weighted_by_key,
|
|
95
|
+
arrayrecord_key=arrayrecord_key,
|
|
96
|
+
configrecord_key=configrecord_key,
|
|
97
|
+
train_metrics_aggr_fn=train_metrics_aggr_fn,
|
|
98
|
+
evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
self.registered_nodes: dict[int, int] = {}
|
|
102
|
+
|
|
103
|
+
if fraction_train not in (0.0, 1.0):
|
|
104
|
+
raise ValueError(
|
|
105
|
+
"fraction_train can only be set to 1.0 or 0.0 for FedXgbCyclic."
|
|
106
|
+
)
|
|
107
|
+
if fraction_evaluate not in (0.0, 1.0):
|
|
108
|
+
raise ValueError(
|
|
109
|
+
"fraction_evaluate can only be set to 1.0 or 0.0 for FedXgbCyclic."
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def _reorder_nodes(self, node_ids: list[int]) -> list[int]:
|
|
113
|
+
"""Re-order node ids based on registered nodes.
|
|
114
|
+
|
|
115
|
+
Each node ID is assigned a persistent index in `self.registered_nodes`
|
|
116
|
+
the first time it appears. The input list is then reordered according
|
|
117
|
+
to these stored indices, and the result is compacted into ascending
|
|
118
|
+
order (1..N) for the current call.
|
|
119
|
+
"""
|
|
120
|
+
# Assign new indices to unknown nodes
|
|
121
|
+
next_index = max(self.registered_nodes.values(), default=0) + 1
|
|
122
|
+
for nid in node_ids:
|
|
123
|
+
if nid not in self.registered_nodes:
|
|
124
|
+
self.registered_nodes[nid] = next_index
|
|
125
|
+
next_index += 1
|
|
126
|
+
|
|
127
|
+
# Sort node_ids by their stored indices
|
|
128
|
+
sorted_by_index = sorted(node_ids, key=lambda x: self.registered_nodes[x])
|
|
129
|
+
|
|
130
|
+
# Compact re-map of indices just for this output list
|
|
131
|
+
unique_indices = sorted(self.registered_nodes[nid] for nid in sorted_by_index)
|
|
132
|
+
remap = {old: new for new, old in enumerate(unique_indices, start=1)}
|
|
133
|
+
|
|
134
|
+
# Build the result list ordered by compact indices
|
|
135
|
+
result_list = [
|
|
136
|
+
nid
|
|
137
|
+
for _, nid in sorted(
|
|
138
|
+
(remap[self.registered_nodes[nid]], nid) for nid in sorted_by_index
|
|
139
|
+
)
|
|
140
|
+
]
|
|
141
|
+
return result_list
|
|
142
|
+
|
|
143
|
+
def _make_sampling(
|
|
144
|
+
self, grid: Grid, server_round: int, configure_type: str
|
|
145
|
+
) -> list[int]:
|
|
146
|
+
"""Sample nodes using the Grid."""
|
|
147
|
+
# Sample nodes
|
|
148
|
+
num_nodes = int(len(list(grid.get_node_ids())) * self.fraction_train)
|
|
149
|
+
sample_size = max(num_nodes, self.min_train_nodes)
|
|
150
|
+
node_ids, _ = sample_nodes(grid, self.min_available_nodes, sample_size)
|
|
151
|
+
|
|
152
|
+
# Re-order node_ids
|
|
153
|
+
node_ids = self._reorder_nodes(node_ids)
|
|
154
|
+
|
|
155
|
+
# Sample the clients sequentially given server_round
|
|
156
|
+
sampled_idx = (server_round - 1) % len(node_ids)
|
|
157
|
+
sampled_node_id = [node_ids[sampled_idx]]
|
|
158
|
+
|
|
159
|
+
log(
|
|
160
|
+
INFO,
|
|
161
|
+
f"{configure_type}: Sampled %s nodes (out of %s)",
|
|
162
|
+
len(sampled_node_id),
|
|
163
|
+
len(node_ids),
|
|
164
|
+
)
|
|
165
|
+
return sampled_node_id
|
|
166
|
+
|
|
167
|
+
def configure_train(
|
|
168
|
+
self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
|
|
169
|
+
) -> Iterable[Message]:
|
|
170
|
+
"""Configure the next round of federated training."""
|
|
171
|
+
# Sample one node
|
|
172
|
+
sampled_node_id = self._make_sampling(grid, server_round, "configure_train")
|
|
173
|
+
|
|
174
|
+
# Always inject current server round
|
|
175
|
+
config["server-round"] = server_round
|
|
176
|
+
|
|
177
|
+
# Construct messages
|
|
178
|
+
record = RecordDict(
|
|
179
|
+
{self.arrayrecord_key: arrays, self.configrecord_key: config}
|
|
180
|
+
)
|
|
181
|
+
return self._construct_messages(record, sampled_node_id, MessageType.TRAIN)
|
|
182
|
+
|
|
183
|
+
def aggregate_train(
|
|
184
|
+
self,
|
|
185
|
+
server_round: int,
|
|
186
|
+
replies: Iterable[Message],
|
|
187
|
+
) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
|
|
188
|
+
"""Aggregate ArrayRecords and MetricRecords in the received Messages."""
|
|
189
|
+
valid_replies, _ = self._check_and_log_replies(replies, is_train=True)
|
|
190
|
+
|
|
191
|
+
arrays, metrics = None, None
|
|
192
|
+
if valid_replies:
|
|
193
|
+
reply_contents = [msg.content for msg in valid_replies]
|
|
194
|
+
array_record_key = next(iter(reply_contents[0].array_records.keys()))
|
|
195
|
+
|
|
196
|
+
# Fetch the client model from current round as global model
|
|
197
|
+
arrays = cast(ArrayRecord, reply_contents[0][array_record_key])
|
|
198
|
+
|
|
199
|
+
# Aggregate MetricRecords
|
|
200
|
+
metrics = self.train_metrics_aggr_fn(
|
|
201
|
+
reply_contents,
|
|
202
|
+
self.weighted_by_key,
|
|
203
|
+
)
|
|
204
|
+
return arrays, metrics
|
|
205
|
+
|
|
206
|
+
def configure_evaluate(
|
|
207
|
+
self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
|
|
208
|
+
) -> Iterable[Message]:
|
|
209
|
+
"""Configure the next round of federated evaluation."""
|
|
210
|
+
# Sample one node
|
|
211
|
+
sampled_node_id = self._make_sampling(grid, server_round, "configure_evaluate")
|
|
212
|
+
|
|
213
|
+
# Always inject current server round
|
|
214
|
+
config["server-round"] = server_round
|
|
215
|
+
|
|
216
|
+
# Construct messages
|
|
217
|
+
record = RecordDict(
|
|
218
|
+
{self.arrayrecord_key: arrays, self.configrecord_key: config}
|
|
219
|
+
)
|
|
220
|
+
return self._construct_messages(record, sampled_node_id, MessageType.EVALUATE)
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Adaptive Federated Optimization using Yogi (FedYogi) [Reddi et al., 2020] strategy.
|
|
16
|
+
|
|
17
|
+
Paper: arxiv.org/abs/2003.00295
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
from collections import OrderedDict
|
|
22
|
+
from collections.abc import Iterable
|
|
23
|
+
from typing import Callable, Optional
|
|
24
|
+
|
|
25
|
+
import numpy as np
|
|
26
|
+
|
|
27
|
+
from flwr.common import Array, ArrayRecord, Message, MetricRecord, RecordDict
|
|
28
|
+
|
|
29
|
+
from ..exception import AggregationError
|
|
30
|
+
from .fedopt import FedOpt
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# pylint: disable=line-too-long
|
|
34
|
+
class FedYogi(FedOpt):
|
|
35
|
+
"""FedYogi [Reddi et al., 2020] strategy.
|
|
36
|
+
|
|
37
|
+
Implementation based on https://arxiv.org/abs/2003.00295v5
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
Parameters
|
|
41
|
+
----------
|
|
42
|
+
fraction_train : float (default: 1.0)
|
|
43
|
+
Fraction of nodes used during training. In case `min_train_nodes`
|
|
44
|
+
is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
|
|
45
|
+
will still be sampled.
|
|
46
|
+
fraction_evaluate : float (default: 1.0)
|
|
47
|
+
Fraction of nodes used during validation. In case `min_evaluate_nodes`
|
|
48
|
+
is larger than `fraction_evaluate * total_connected_nodes`,
|
|
49
|
+
`min_evaluate_nodes` will still be sampled.
|
|
50
|
+
min_train_nodes : int (default: 2)
|
|
51
|
+
Minimum number of nodes used during training.
|
|
52
|
+
min_evaluate_nodes : int (default: 2)
|
|
53
|
+
Minimum number of nodes used during validation.
|
|
54
|
+
min_available_nodes : int (default: 2)
|
|
55
|
+
Minimum number of total nodes in the system.
|
|
56
|
+
weighted_by_key : str (default: "num-examples")
|
|
57
|
+
The key within each MetricRecord whose value is used as the weight when
|
|
58
|
+
computing weighted averages for both ArrayRecords and MetricRecords.
|
|
59
|
+
arrayrecord_key : str (default: "arrays")
|
|
60
|
+
Key used to store the ArrayRecord when constructing Messages.
|
|
61
|
+
configrecord_key : str (default: "config")
|
|
62
|
+
Key used to store the ConfigRecord when constructing Messages.
|
|
63
|
+
train_metrics_aggr_fn : Optional[callable] (default: None)
|
|
64
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
65
|
+
used to aggregate MetricRecords from training round replies.
|
|
66
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
67
|
+
average using the provided weight factor key.
|
|
68
|
+
evaluate_metrics_aggr_fn : Optional[callable] (default: None)
|
|
69
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
70
|
+
used to aggregate MetricRecords from training round replies.
|
|
71
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
72
|
+
average using the provided weight factor key.
|
|
73
|
+
eta : float, optional
|
|
74
|
+
Server-side learning rate. Defaults to 1e-2.
|
|
75
|
+
eta_l : float, optional
|
|
76
|
+
Client-side learning rate. Defaults to 0.0316.
|
|
77
|
+
beta_1 : float, optional
|
|
78
|
+
Momentum parameter. Defaults to 0.9.
|
|
79
|
+
beta_2 : float, optional
|
|
80
|
+
Second moment parameter. Defaults to 0.99.
|
|
81
|
+
tau : float, optional
|
|
82
|
+
Controls the algorithm's degree of adaptability.
|
|
83
|
+
Defaults to 1e-3.
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
# pylint: disable=too-many-arguments, too-many-locals
|
|
87
|
+
def __init__(
|
|
88
|
+
self,
|
|
89
|
+
*,
|
|
90
|
+
fraction_train: float = 1.0,
|
|
91
|
+
fraction_evaluate: float = 1.0,
|
|
92
|
+
min_train_nodes: int = 2,
|
|
93
|
+
min_evaluate_nodes: int = 2,
|
|
94
|
+
min_available_nodes: int = 2,
|
|
95
|
+
weighted_by_key: str = "num-examples",
|
|
96
|
+
arrayrecord_key: str = "arrays",
|
|
97
|
+
configrecord_key: str = "config",
|
|
98
|
+
train_metrics_aggr_fn: Optional[
|
|
99
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
100
|
+
] = None,
|
|
101
|
+
evaluate_metrics_aggr_fn: Optional[
|
|
102
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
103
|
+
] = None,
|
|
104
|
+
eta: float = 1e-2,
|
|
105
|
+
eta_l: float = 0.0316,
|
|
106
|
+
beta_1: float = 0.9,
|
|
107
|
+
beta_2: float = 0.99,
|
|
108
|
+
tau: float = 1e-3,
|
|
109
|
+
) -> None:
|
|
110
|
+
super().__init__(
|
|
111
|
+
fraction_train=fraction_train,
|
|
112
|
+
fraction_evaluate=fraction_evaluate,
|
|
113
|
+
min_train_nodes=min_train_nodes,
|
|
114
|
+
min_evaluate_nodes=min_evaluate_nodes,
|
|
115
|
+
min_available_nodes=min_available_nodes,
|
|
116
|
+
weighted_by_key=weighted_by_key,
|
|
117
|
+
arrayrecord_key=arrayrecord_key,
|
|
118
|
+
configrecord_key=configrecord_key,
|
|
119
|
+
train_metrics_aggr_fn=train_metrics_aggr_fn,
|
|
120
|
+
evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
|
|
121
|
+
eta=eta,
|
|
122
|
+
eta_l=eta_l,
|
|
123
|
+
beta_1=beta_1,
|
|
124
|
+
beta_2=beta_2,
|
|
125
|
+
tau=tau,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
def aggregate_train(
|
|
129
|
+
self,
|
|
130
|
+
server_round: int,
|
|
131
|
+
replies: Iterable[Message],
|
|
132
|
+
) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
|
|
133
|
+
"""Aggregate ArrayRecords and MetricRecords in the received Messages."""
|
|
134
|
+
aggregated_arrayrecord, aggregated_metrics = super().aggregate_train(
|
|
135
|
+
server_round, replies
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
if aggregated_arrayrecord is None:
|
|
139
|
+
return aggregated_arrayrecord, aggregated_metrics
|
|
140
|
+
|
|
141
|
+
if self.current_arrays is None:
|
|
142
|
+
reason = (
|
|
143
|
+
"Current arrays not set. Ensure that `configure_train` has been "
|
|
144
|
+
"called before aggregation."
|
|
145
|
+
)
|
|
146
|
+
raise AggregationError(reason=reason)
|
|
147
|
+
|
|
148
|
+
# Compute intermediate variables
|
|
149
|
+
delta_t, m_t, aggregated_ndarrays = self._compute_deltat_and_mt(
|
|
150
|
+
aggregated_arrayrecord
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# v_t
|
|
154
|
+
if not self.v_t:
|
|
155
|
+
self.v_t = {k: np.zeros_like(v) for k, v in aggregated_ndarrays.items()}
|
|
156
|
+
self.v_t = {
|
|
157
|
+
k: v
|
|
158
|
+
- (1.0 - self.beta_2) * (delta_t[k] ** 2) * np.sign(v - delta_t[k] ** 2)
|
|
159
|
+
for k, v in self.v_t.items()
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
new_arrays = {
|
|
163
|
+
k: x + self.eta * m_t[k] / (np.sqrt(self.v_t[k]) + self.tau)
|
|
164
|
+
for k, x in self.current_arrays.items()
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
return (
|
|
168
|
+
ArrayRecord(OrderedDict({k: Array(v) for k, v in new_arrays.items()})),
|
|
169
|
+
aggregated_metrics,
|
|
170
|
+
)
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# Copyright 2025 Flower Labs GmbH. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Machine Learning with Adversaries: Byzantine Tolerant Gradient Descent.
|
|
16
|
+
|
|
17
|
+
[Blanchard et al., 2017].
|
|
18
|
+
|
|
19
|
+
Paper: proceedings.neurips.cc/paper/2017/file/f4b9ec30ad9f68f89b29639786cb62ef-Paper.pdf
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
from logging import INFO
|
|
24
|
+
from typing import Callable, Optional
|
|
25
|
+
|
|
26
|
+
from flwr.common import MetricRecord, RecordDict, log
|
|
27
|
+
|
|
28
|
+
from .multikrum import MultiKrum
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# pylint: disable=too-many-instance-attributes
|
|
32
|
+
class Krum(MultiKrum):
|
|
33
|
+
"""Krum [Blanchard et al., 2017] strategy.
|
|
34
|
+
|
|
35
|
+
Implementation based on https://arxiv.org/abs/1703.02757
|
|
36
|
+
|
|
37
|
+
Parameters
|
|
38
|
+
----------
|
|
39
|
+
fraction_train : float (default: 1.0)
|
|
40
|
+
Fraction of nodes used during training. In case `min_train_nodes`
|
|
41
|
+
is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
|
|
42
|
+
will still be sampled.
|
|
43
|
+
fraction_evaluate : float (default: 1.0)
|
|
44
|
+
Fraction of nodes used during validation. In case `min_evaluate_nodes`
|
|
45
|
+
is larger than `fraction_evaluate * total_connected_nodes`,
|
|
46
|
+
`min_evaluate_nodes` will still be sampled.
|
|
47
|
+
min_train_nodes : int (default: 2)
|
|
48
|
+
Minimum number of nodes used during training.
|
|
49
|
+
min_evaluate_nodes : int (default: 2)
|
|
50
|
+
Minimum number of nodes used during validation.
|
|
51
|
+
min_available_nodes : int (default: 2)
|
|
52
|
+
Minimum number of total nodes in the system.
|
|
53
|
+
num_malicious_nodes : int (default: 0)
|
|
54
|
+
Number of malicious nodes in the system. Defaults to 0.
|
|
55
|
+
weighted_by_key : str (default: "num-examples")
|
|
56
|
+
The key within each MetricRecord whose value is used as the weight when
|
|
57
|
+
computing weighted averages for MetricRecords.
|
|
58
|
+
arrayrecord_key : str (default: "arrays")
|
|
59
|
+
Key used to store the ArrayRecord when constructing Messages.
|
|
60
|
+
configrecord_key : str (default: "config")
|
|
61
|
+
Key used to store the ConfigRecord when constructing Messages.
|
|
62
|
+
train_metrics_aggr_fn : Optional[callable] (default: None)
|
|
63
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
64
|
+
used to aggregate MetricRecords from training round replies.
|
|
65
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
66
|
+
average using the provided weight factor key.
|
|
67
|
+
evaluate_metrics_aggr_fn : Optional[callable] (default: None)
|
|
68
|
+
Function with signature (list[RecordDict], str) -> MetricRecord,
|
|
69
|
+
used to aggregate MetricRecords from training round replies.
|
|
70
|
+
If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
|
|
71
|
+
average using the provided weight factor key.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
# pylint: disable=too-many-arguments,too-many-positional-arguments
|
|
75
|
+
def __init__(
|
|
76
|
+
self,
|
|
77
|
+
fraction_train: float = 1.0,
|
|
78
|
+
fraction_evaluate: float = 1.0,
|
|
79
|
+
min_train_nodes: int = 2,
|
|
80
|
+
min_evaluate_nodes: int = 2,
|
|
81
|
+
min_available_nodes: int = 2,
|
|
82
|
+
num_malicious_nodes: int = 0,
|
|
83
|
+
weighted_by_key: str = "num-examples",
|
|
84
|
+
arrayrecord_key: str = "arrays",
|
|
85
|
+
configrecord_key: str = "config",
|
|
86
|
+
train_metrics_aggr_fn: Optional[
|
|
87
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
88
|
+
] = None,
|
|
89
|
+
evaluate_metrics_aggr_fn: Optional[
|
|
90
|
+
Callable[[list[RecordDict], str], MetricRecord]
|
|
91
|
+
] = None,
|
|
92
|
+
) -> None:
|
|
93
|
+
super().__init__(
|
|
94
|
+
fraction_train=fraction_train,
|
|
95
|
+
fraction_evaluate=fraction_evaluate,
|
|
96
|
+
min_train_nodes=min_train_nodes,
|
|
97
|
+
min_evaluate_nodes=min_evaluate_nodes,
|
|
98
|
+
min_available_nodes=min_available_nodes,
|
|
99
|
+
weighted_by_key=weighted_by_key,
|
|
100
|
+
num_malicious_nodes=num_malicious_nodes,
|
|
101
|
+
num_nodes_to_select=1, # Krum selects 1 node
|
|
102
|
+
arrayrecord_key=arrayrecord_key,
|
|
103
|
+
configrecord_key=configrecord_key,
|
|
104
|
+
train_metrics_aggr_fn=train_metrics_aggr_fn,
|
|
105
|
+
evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def summary(self) -> None:
|
|
109
|
+
"""Log summary configuration of the strategy."""
|
|
110
|
+
log(INFO, "\t├──> Krum settings:")
|
|
111
|
+
log(INFO, "\t│\t└── Number of malicious nodes: %d", self.num_malicious_nodes)
|
|
112
|
+
super().summary()
|