flwr 1.21.0__py3-none-any.whl → 1.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. flwr/cli/app.py +2 -0
  2. flwr/cli/new/new.py +9 -7
  3. flwr/cli/new/templates/app/README.flowertune.md.tpl +1 -1
  4. flwr/cli/new/templates/app/code/client.baseline.py.tpl +64 -47
  5. flwr/cli/new/templates/app/code/client.huggingface.py.tpl +68 -30
  6. flwr/cli/new/templates/app/code/client.jax.py.tpl +63 -42
  7. flwr/cli/new/templates/app/code/client.mlx.py.tpl +80 -51
  8. flwr/cli/new/templates/app/code/client.numpy.py.tpl +36 -13
  9. flwr/cli/new/templates/app/code/client.pytorch.py.tpl +71 -46
  10. flwr/cli/new/templates/app/code/client.pytorch_legacy_api.py.tpl +55 -0
  11. flwr/cli/new/templates/app/code/client.sklearn.py.tpl +75 -30
  12. flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +69 -44
  13. flwr/cli/new/templates/app/code/client.xgboost.py.tpl +110 -0
  14. flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl +56 -90
  15. flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +1 -23
  16. flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl +37 -58
  17. flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl +39 -44
  18. flwr/cli/new/templates/app/code/model.baseline.py.tpl +0 -14
  19. flwr/cli/new/templates/app/code/server.baseline.py.tpl +27 -29
  20. flwr/cli/new/templates/app/code/server.huggingface.py.tpl +23 -19
  21. flwr/cli/new/templates/app/code/server.jax.py.tpl +27 -14
  22. flwr/cli/new/templates/app/code/server.mlx.py.tpl +29 -19
  23. flwr/cli/new/templates/app/code/server.numpy.py.tpl +30 -17
  24. flwr/cli/new/templates/app/code/server.pytorch.py.tpl +36 -26
  25. flwr/cli/new/templates/app/code/server.pytorch_legacy_api.py.tpl +31 -0
  26. flwr/cli/new/templates/app/code/server.sklearn.py.tpl +29 -21
  27. flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +28 -19
  28. flwr/cli/new/templates/app/code/server.xgboost.py.tpl +56 -0
  29. flwr/cli/new/templates/app/code/task.huggingface.py.tpl +16 -20
  30. flwr/cli/new/templates/app/code/task.jax.py.tpl +1 -1
  31. flwr/cli/new/templates/app/code/task.numpy.py.tpl +1 -1
  32. flwr/cli/new/templates/app/code/task.pytorch.py.tpl +14 -27
  33. flwr/cli/new/templates/app/code/{task.pytorch_msg_api.py.tpl → task.pytorch_legacy_api.py.tpl} +27 -14
  34. flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +1 -2
  35. flwr/cli/new/templates/app/code/task.xgboost.py.tpl +67 -0
  36. flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +4 -4
  37. flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +2 -2
  38. flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +4 -4
  39. flwr/cli/new/templates/app/pyproject.jax.toml.tpl +1 -1
  40. flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +2 -2
  41. flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +1 -1
  42. flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +3 -3
  43. flwr/cli/new/templates/app/{pyproject.pytorch_msg_api.toml.tpl → pyproject.pytorch_legacy_api.toml.tpl} +3 -3
  44. flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +1 -1
  45. flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +1 -1
  46. flwr/cli/new/templates/app/pyproject.xgboost.toml.tpl +61 -0
  47. flwr/cli/pull.py +100 -0
  48. flwr/cli/utils.py +17 -0
  49. flwr/clientapp/mod/__init__.py +4 -1
  50. flwr/clientapp/mod/centraldp_mods.py +156 -40
  51. flwr/clientapp/mod/localdp_mod.py +169 -0
  52. flwr/clientapp/typing.py +22 -0
  53. flwr/common/constant.py +3 -0
  54. flwr/common/exit/exit_code.py +4 -0
  55. flwr/common/record/typeddict.py +12 -0
  56. flwr/proto/control_pb2.py +7 -3
  57. flwr/proto/control_pb2.pyi +24 -0
  58. flwr/proto/control_pb2_grpc.py +34 -0
  59. flwr/proto/control_pb2_grpc.pyi +13 -0
  60. flwr/server/app.py +13 -0
  61. flwr/serverapp/strategy/__init__.py +26 -0
  62. flwr/serverapp/strategy/bulyan.py +238 -0
  63. flwr/serverapp/strategy/dp_adaptive_clipping.py +335 -0
  64. flwr/serverapp/strategy/dp_fixed_clipping.py +71 -49
  65. flwr/serverapp/strategy/fedadagrad.py +0 -3
  66. flwr/serverapp/strategy/fedadam.py +0 -3
  67. flwr/serverapp/strategy/fedavg.py +89 -64
  68. flwr/serverapp/strategy/fedavgm.py +198 -0
  69. flwr/serverapp/strategy/fedmedian.py +105 -0
  70. flwr/serverapp/strategy/fedprox.py +174 -0
  71. flwr/serverapp/strategy/fedtrimmedavg.py +176 -0
  72. flwr/serverapp/strategy/fedxgb_bagging.py +117 -0
  73. flwr/serverapp/strategy/fedxgb_cyclic.py +220 -0
  74. flwr/serverapp/strategy/fedyogi.py +0 -3
  75. flwr/serverapp/strategy/krum.py +112 -0
  76. flwr/serverapp/strategy/multikrum.py +247 -0
  77. flwr/serverapp/strategy/qfedavg.py +252 -0
  78. flwr/serverapp/strategy/strategy_utils.py +48 -0
  79. flwr/simulation/app.py +1 -1
  80. flwr/simulation/run_simulation.py +25 -30
  81. flwr/supercore/cli/flower_superexec.py +26 -1
  82. flwr/supercore/constant.py +19 -0
  83. flwr/supercore/superexec/plugin/exec_plugin.py +11 -1
  84. flwr/supercore/superexec/run_superexec.py +16 -2
  85. flwr/superlink/artifact_provider/__init__.py +22 -0
  86. flwr/superlink/artifact_provider/artifact_provider.py +37 -0
  87. flwr/superlink/servicer/control/control_grpc.py +3 -0
  88. flwr/superlink/servicer/control/control_servicer.py +59 -2
  89. {flwr-1.21.0.dist-info → flwr-1.22.0.dist-info}/METADATA +6 -16
  90. {flwr-1.21.0.dist-info → flwr-1.22.0.dist-info}/RECORD +93 -74
  91. flwr/cli/new/templates/app/code/client.pytorch_msg_api.py.tpl +0 -80
  92. flwr/cli/new/templates/app/code/server.pytorch_msg_api.py.tpl +0 -41
  93. flwr/serverapp/dp_fixed_clipping.py +0 -352
  94. flwr/serverapp/strategy/strategy_utils_tests.py +0 -304
  95. /flwr/cli/new/templates/app/code/{__init__.pytorch_msg_api.py.tpl → __init__.pytorch_legacy_api.py.tpl} +0 -0
  96. {flwr-1.21.0.dist-info → flwr-1.22.0.dist-info}/WHEEL +0 -0
  97. {flwr-1.21.0.dist-info → flwr-1.22.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,220 @@
1
+ # Copyright 2025 Flower Labs GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Flower message-based FedXgbCyclic strategy."""
16
+
17
+
18
+ from collections.abc import Iterable
19
+ from logging import INFO
20
+ from typing import Callable, Optional, cast
21
+
22
+ from flwr.common import (
23
+ ArrayRecord,
24
+ ConfigRecord,
25
+ Message,
26
+ MessageType,
27
+ MetricRecord,
28
+ RecordDict,
29
+ log,
30
+ )
31
+ from flwr.server import Grid
32
+
33
+ from .fedavg import FedAvg
34
+ from .strategy_utils import sample_nodes
35
+
36
+
37
+ # pylint: disable=line-too-long
38
+ class FedXgbCyclic(FedAvg):
39
+ """Configurable FedXgbCyclic strategy implementation.
40
+
41
+ Parameters
42
+ ----------
43
+ fraction_train : float (default: 1.0)
44
+ Fraction of nodes used during training. In case `min_train_nodes`
45
+ is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
46
+ will still be sampled.
47
+ fraction_evaluate : float (default: 1.0)
48
+ Fraction of nodes used during validation. In case `min_evaluate_nodes`
49
+ is larger than `fraction_evaluate * total_connected_nodes`,
50
+ `min_evaluate_nodes` will still be sampled.
51
+ min_available_nodes : int (default: 2)
52
+ Minimum number of total nodes in the system.
53
+ weighted_by_key : str (default: "num-examples")
54
+ The key within each MetricRecord whose value is used as the weight when
55
+ computing weighted averages for MetricRecords.
56
+ arrayrecord_key : str (default: "arrays")
57
+ Key used to store the ArrayRecord when constructing Messages.
58
+ configrecord_key : str (default: "config")
59
+ Key used to store the ConfigRecord when constructing Messages.
60
+ train_metrics_aggr_fn : Optional[callable] (default: None)
61
+ Function with signature (list[RecordDict], str) -> MetricRecord,
62
+ used to aggregate MetricRecords from training round replies.
63
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
64
+ average using the provided weight factor key.
65
+ evaluate_metrics_aggr_fn : Optional[callable] (default: None)
66
+ Function with signature (list[RecordDict], str) -> MetricRecord,
67
+ used to aggregate MetricRecords from training round replies.
68
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
69
+ average using the provided weight factor key.
70
+ """
71
+
72
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
73
+ def __init__(
74
+ self,
75
+ fraction_train: float = 1.0,
76
+ fraction_evaluate: float = 1.0,
77
+ min_available_nodes: int = 2,
78
+ weighted_by_key: str = "num-examples",
79
+ arrayrecord_key: str = "arrays",
80
+ configrecord_key: str = "config",
81
+ train_metrics_aggr_fn: Optional[
82
+ Callable[[list[RecordDict], str], MetricRecord]
83
+ ] = None,
84
+ evaluate_metrics_aggr_fn: Optional[
85
+ Callable[[list[RecordDict], str], MetricRecord]
86
+ ] = None,
87
+ ) -> None:
88
+ super().__init__(
89
+ fraction_train=fraction_train,
90
+ fraction_evaluate=fraction_evaluate,
91
+ min_train_nodes=2,
92
+ min_evaluate_nodes=2,
93
+ min_available_nodes=min_available_nodes,
94
+ weighted_by_key=weighted_by_key,
95
+ arrayrecord_key=arrayrecord_key,
96
+ configrecord_key=configrecord_key,
97
+ train_metrics_aggr_fn=train_metrics_aggr_fn,
98
+ evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
99
+ )
100
+
101
+ self.registered_nodes: dict[int, int] = {}
102
+
103
+ if fraction_train not in (0.0, 1.0):
104
+ raise ValueError(
105
+ "fraction_train can only be set to 1.0 or 0.0 for FedXgbCyclic."
106
+ )
107
+ if fraction_evaluate not in (0.0, 1.0):
108
+ raise ValueError(
109
+ "fraction_evaluate can only be set to 1.0 or 0.0 for FedXgbCyclic."
110
+ )
111
+
112
+ def _reorder_nodes(self, node_ids: list[int]) -> list[int]:
113
+ """Re-order node ids based on registered nodes.
114
+
115
+ Each node ID is assigned a persistent index in `self.registered_nodes`
116
+ the first time it appears. The input list is then reordered according
117
+ to these stored indices, and the result is compacted into ascending
118
+ order (1..N) for the current call.
119
+ """
120
+ # Assign new indices to unknown nodes
121
+ next_index = max(self.registered_nodes.values(), default=0) + 1
122
+ for nid in node_ids:
123
+ if nid not in self.registered_nodes:
124
+ self.registered_nodes[nid] = next_index
125
+ next_index += 1
126
+
127
+ # Sort node_ids by their stored indices
128
+ sorted_by_index = sorted(node_ids, key=lambda x: self.registered_nodes[x])
129
+
130
+ # Compact re-map of indices just for this output list
131
+ unique_indices = sorted(self.registered_nodes[nid] for nid in sorted_by_index)
132
+ remap = {old: new for new, old in enumerate(unique_indices, start=1)}
133
+
134
+ # Build the result list ordered by compact indices
135
+ result_list = [
136
+ nid
137
+ for _, nid in sorted(
138
+ (remap[self.registered_nodes[nid]], nid) for nid in sorted_by_index
139
+ )
140
+ ]
141
+ return result_list
142
+
143
+ def _make_sampling(
144
+ self, grid: Grid, server_round: int, configure_type: str
145
+ ) -> list[int]:
146
+ """Sample nodes using the Grid."""
147
+ # Sample nodes
148
+ num_nodes = int(len(list(grid.get_node_ids())) * self.fraction_train)
149
+ sample_size = max(num_nodes, self.min_train_nodes)
150
+ node_ids, _ = sample_nodes(grid, self.min_available_nodes, sample_size)
151
+
152
+ # Re-order node_ids
153
+ node_ids = self._reorder_nodes(node_ids)
154
+
155
+ # Sample the clients sequentially given server_round
156
+ sampled_idx = (server_round - 1) % len(node_ids)
157
+ sampled_node_id = [node_ids[sampled_idx]]
158
+
159
+ log(
160
+ INFO,
161
+ f"{configure_type}: Sampled %s nodes (out of %s)",
162
+ len(sampled_node_id),
163
+ len(node_ids),
164
+ )
165
+ return sampled_node_id
166
+
167
+ def configure_train(
168
+ self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
169
+ ) -> Iterable[Message]:
170
+ """Configure the next round of federated training."""
171
+ # Sample one node
172
+ sampled_node_id = self._make_sampling(grid, server_round, "configure_train")
173
+
174
+ # Always inject current server round
175
+ config["server-round"] = server_round
176
+
177
+ # Construct messages
178
+ record = RecordDict(
179
+ {self.arrayrecord_key: arrays, self.configrecord_key: config}
180
+ )
181
+ return self._construct_messages(record, sampled_node_id, MessageType.TRAIN)
182
+
183
+ def aggregate_train(
184
+ self,
185
+ server_round: int,
186
+ replies: Iterable[Message],
187
+ ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
188
+ """Aggregate ArrayRecords and MetricRecords in the received Messages."""
189
+ valid_replies, _ = self._check_and_log_replies(replies, is_train=True)
190
+
191
+ arrays, metrics = None, None
192
+ if valid_replies:
193
+ reply_contents = [msg.content for msg in valid_replies]
194
+ array_record_key = next(iter(reply_contents[0].array_records.keys()))
195
+
196
+ # Fetch the client model from current round as global model
197
+ arrays = cast(ArrayRecord, reply_contents[0][array_record_key])
198
+
199
+ # Aggregate MetricRecords
200
+ metrics = self.train_metrics_aggr_fn(
201
+ reply_contents,
202
+ self.weighted_by_key,
203
+ )
204
+ return arrays, metrics
205
+
206
+ def configure_evaluate(
207
+ self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
208
+ ) -> Iterable[Message]:
209
+ """Configure the next round of federated evaluation."""
210
+ # Sample one node
211
+ sampled_node_id = self._make_sampling(grid, server_round, "configure_evaluate")
212
+
213
+ # Always inject current server round
214
+ config["server-round"] = server_round
215
+
216
+ # Construct messages
217
+ record = RecordDict(
218
+ {self.arrayrecord_key: arrays, self.configrecord_key: config}
219
+ )
220
+ return self._construct_messages(record, sampled_node_id, MessageType.EVALUATE)
@@ -164,9 +164,6 @@ class FedYogi(FedOpt):
164
164
  for k, x in self.current_arrays.items()
165
165
  }
166
166
 
167
- # Update current arrays
168
- self.current_arrays = new_arrays
169
-
170
167
  return (
171
168
  ArrayRecord(OrderedDict({k: Array(v) for k, v in new_arrays.items()})),
172
169
  aggregated_metrics,
@@ -0,0 +1,112 @@
1
+ # Copyright 2025 Flower Labs GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Machine Learning with Adversaries: Byzantine Tolerant Gradient Descent.
16
+
17
+ [Blanchard et al., 2017].
18
+
19
+ Paper: proceedings.neurips.cc/paper/2017/file/f4b9ec30ad9f68f89b29639786cb62ef-Paper.pdf
20
+ """
21
+
22
+
23
+ from logging import INFO
24
+ from typing import Callable, Optional
25
+
26
+ from flwr.common import MetricRecord, RecordDict, log
27
+
28
+ from .multikrum import MultiKrum
29
+
30
+
31
+ # pylint: disable=too-many-instance-attributes
32
+ class Krum(MultiKrum):
33
+ """Krum [Blanchard et al., 2017] strategy.
34
+
35
+ Implementation based on https://arxiv.org/abs/1703.02757
36
+
37
+ Parameters
38
+ ----------
39
+ fraction_train : float (default: 1.0)
40
+ Fraction of nodes used during training. In case `min_train_nodes`
41
+ is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
42
+ will still be sampled.
43
+ fraction_evaluate : float (default: 1.0)
44
+ Fraction of nodes used during validation. In case `min_evaluate_nodes`
45
+ is larger than `fraction_evaluate * total_connected_nodes`,
46
+ `min_evaluate_nodes` will still be sampled.
47
+ min_train_nodes : int (default: 2)
48
+ Minimum number of nodes used during training.
49
+ min_evaluate_nodes : int (default: 2)
50
+ Minimum number of nodes used during validation.
51
+ min_available_nodes : int (default: 2)
52
+ Minimum number of total nodes in the system.
53
+ num_malicious_nodes : int (default: 0)
54
+ Number of malicious nodes in the system. Defaults to 0.
55
+ weighted_by_key : str (default: "num-examples")
56
+ The key within each MetricRecord whose value is used as the weight when
57
+ computing weighted averages for MetricRecords.
58
+ arrayrecord_key : str (default: "arrays")
59
+ Key used to store the ArrayRecord when constructing Messages.
60
+ configrecord_key : str (default: "config")
61
+ Key used to store the ConfigRecord when constructing Messages.
62
+ train_metrics_aggr_fn : Optional[callable] (default: None)
63
+ Function with signature (list[RecordDict], str) -> MetricRecord,
64
+ used to aggregate MetricRecords from training round replies.
65
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
66
+ average using the provided weight factor key.
67
+ evaluate_metrics_aggr_fn : Optional[callable] (default: None)
68
+ Function with signature (list[RecordDict], str) -> MetricRecord,
69
+ used to aggregate MetricRecords from training round replies.
70
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
71
+ average using the provided weight factor key.
72
+ """
73
+
74
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
75
+ def __init__(
76
+ self,
77
+ fraction_train: float = 1.0,
78
+ fraction_evaluate: float = 1.0,
79
+ min_train_nodes: int = 2,
80
+ min_evaluate_nodes: int = 2,
81
+ min_available_nodes: int = 2,
82
+ num_malicious_nodes: int = 0,
83
+ weighted_by_key: str = "num-examples",
84
+ arrayrecord_key: str = "arrays",
85
+ configrecord_key: str = "config",
86
+ train_metrics_aggr_fn: Optional[
87
+ Callable[[list[RecordDict], str], MetricRecord]
88
+ ] = None,
89
+ evaluate_metrics_aggr_fn: Optional[
90
+ Callable[[list[RecordDict], str], MetricRecord]
91
+ ] = None,
92
+ ) -> None:
93
+ super().__init__(
94
+ fraction_train=fraction_train,
95
+ fraction_evaluate=fraction_evaluate,
96
+ min_train_nodes=min_train_nodes,
97
+ min_evaluate_nodes=min_evaluate_nodes,
98
+ min_available_nodes=min_available_nodes,
99
+ weighted_by_key=weighted_by_key,
100
+ num_malicious_nodes=num_malicious_nodes,
101
+ num_nodes_to_select=1, # Krum selects 1 node
102
+ arrayrecord_key=arrayrecord_key,
103
+ configrecord_key=configrecord_key,
104
+ train_metrics_aggr_fn=train_metrics_aggr_fn,
105
+ evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
106
+ )
107
+
108
+ def summary(self) -> None:
109
+ """Log summary configuration of the strategy."""
110
+ log(INFO, "\t├──> Krum settings:")
111
+ log(INFO, "\t│\t└── Number of malicious nodes: %d", self.num_malicious_nodes)
112
+ super().summary()
@@ -0,0 +1,247 @@
1
+ # Copyright 2025 Flower Labs GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Machine Learning with Adversaries: Byzantine Tolerant Gradient Descent.
16
+
17
+ [Blanchard et al., 2017].
18
+
19
+ Paper: proceedings.neurips.cc/paper/2017/file/f4b9ec30ad9f68f89b29639786cb62ef-Paper.pdf
20
+ """
21
+
22
+
23
+ from collections.abc import Iterable
24
+ from logging import INFO
25
+ from typing import Callable, Optional, cast
26
+
27
+ import numpy as np
28
+
29
+ from flwr.common import ArrayRecord, Message, MetricRecord, NDArray, RecordDict, log
30
+
31
+ from .fedavg import FedAvg
32
+ from .strategy_utils import aggregate_arrayrecords
33
+
34
+
35
+ # pylint: disable=too-many-instance-attributes
36
+ class MultiKrum(FedAvg):
37
+ """MultiKrum [Blanchard et al., 2017] strategy.
38
+
39
+ Implementation based on https://arxiv.org/abs/1703.02757
40
+
41
+ Parameters
42
+ ----------
43
+ fraction_train : float (default: 1.0)
44
+ Fraction of nodes used during training. In case `min_train_nodes`
45
+ is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
46
+ will still be sampled.
47
+ fraction_evaluate : float (default: 1.0)
48
+ Fraction of nodes used during validation. In case `min_evaluate_nodes`
49
+ is larger than `fraction_evaluate * total_connected_nodes`,
50
+ `min_evaluate_nodes` will still be sampled.
51
+ min_train_nodes : int (default: 2)
52
+ Minimum number of nodes used during training.
53
+ min_evaluate_nodes : int (default: 2)
54
+ Minimum number of nodes used during validation.
55
+ min_available_nodes : int (default: 2)
56
+ Minimum number of total nodes in the system.
57
+ num_malicious_nodes : int (default: 0)
58
+ Number of malicious nodes in the system. Defaults to 0.
59
+ num_nodes_to_select : int (default: 1)
60
+ Number of nodes to select before averaging.
61
+ weighted_by_key : str (default: "num-examples")
62
+ The key within each MetricRecord whose value is used as the weight when
63
+ computing weighted averages for both ArrayRecords and MetricRecords.
64
+ arrayrecord_key : str (default: "arrays")
65
+ Key used to store the ArrayRecord when constructing Messages.
66
+ configrecord_key : str (default: "config")
67
+ Key used to store the ConfigRecord when constructing Messages.
68
+ train_metrics_aggr_fn : Optional[callable] (default: None)
69
+ Function with signature (list[RecordDict], str) -> MetricRecord,
70
+ used to aggregate MetricRecords from training round replies.
71
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
72
+ average using the provided weight factor key.
73
+ evaluate_metrics_aggr_fn : Optional[callable] (default: None)
74
+ Function with signature (list[RecordDict], str) -> MetricRecord,
75
+ used to aggregate MetricRecords from training round replies.
76
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
77
+ average using the provided weight factor key.
78
+
79
+ Notes
80
+ -----
81
+ MultiKrum is a generalization of Krum. If `num_nodes_to_select` is set to 1,
82
+ MultiKrum will reduce to classical Krum.
83
+ """
84
+
85
+ # pylint: disable=too-many-arguments,too-many-positional-arguments
86
+ def __init__(
87
+ self,
88
+ fraction_train: float = 1.0,
89
+ fraction_evaluate: float = 1.0,
90
+ min_train_nodes: int = 2,
91
+ min_evaluate_nodes: int = 2,
92
+ min_available_nodes: int = 2,
93
+ num_malicious_nodes: int = 0,
94
+ num_nodes_to_select: int = 1,
95
+ weighted_by_key: str = "num-examples",
96
+ arrayrecord_key: str = "arrays",
97
+ configrecord_key: str = "config",
98
+ train_metrics_aggr_fn: Optional[
99
+ Callable[[list[RecordDict], str], MetricRecord]
100
+ ] = None,
101
+ evaluate_metrics_aggr_fn: Optional[
102
+ Callable[[list[RecordDict], str], MetricRecord]
103
+ ] = None,
104
+ ) -> None:
105
+ super().__init__(
106
+ fraction_train=fraction_train,
107
+ fraction_evaluate=fraction_evaluate,
108
+ min_train_nodes=min_train_nodes,
109
+ min_evaluate_nodes=min_evaluate_nodes,
110
+ min_available_nodes=min_available_nodes,
111
+ weighted_by_key=weighted_by_key,
112
+ arrayrecord_key=arrayrecord_key,
113
+ configrecord_key=configrecord_key,
114
+ train_metrics_aggr_fn=train_metrics_aggr_fn,
115
+ evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
116
+ )
117
+ self.num_malicious_nodes = num_malicious_nodes
118
+ self.num_nodes_to_select = num_nodes_to_select
119
+
120
+ def summary(self) -> None:
121
+ """Log summary configuration of the strategy."""
122
+ log(INFO, "\t├──> MultiKrum settings:")
123
+ log(INFO, "\t│\t├── Number of malicious nodes: %d", self.num_malicious_nodes)
124
+ log(INFO, "\t│\t└── Number of nodes to select: %d", self.num_nodes_to_select)
125
+ super().summary()
126
+
127
+ def aggregate_train(
128
+ self,
129
+ server_round: int,
130
+ replies: Iterable[Message],
131
+ ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
132
+ """Aggregate ArrayRecords and MetricRecords in the received Messages."""
133
+ valid_replies, _ = self._check_and_log_replies(replies, is_train=True)
134
+
135
+ arrays, metrics = None, None
136
+ if valid_replies:
137
+ reply_contents = [msg.content for msg in valid_replies]
138
+
139
+ # Krum or MultiKrum selection
140
+ replies_to_aggregate = select_multikrum(
141
+ reply_contents,
142
+ num_malicious_nodes=self.num_malicious_nodes,
143
+ num_nodes_to_select=self.num_nodes_to_select,
144
+ )
145
+
146
+ # Aggregate ArrayRecords
147
+ arrays = aggregate_arrayrecords(
148
+ replies_to_aggregate,
149
+ self.weighted_by_key,
150
+ )
151
+
152
+ # Aggregate MetricRecords
153
+ metrics = self.train_metrics_aggr_fn(
154
+ replies_to_aggregate,
155
+ self.weighted_by_key,
156
+ )
157
+ return arrays, metrics
158
+
159
+
160
+ def compute_distances(records: list[ArrayRecord]) -> NDArray:
161
+ """Compute squared L2 distances between ArrayRecords.
162
+
163
+ Parameters
164
+ ----------
165
+ records : list[ArrayRecord]
166
+ A list of ArrayRecords (arrays received in replies)
167
+
168
+ Returns
169
+ -------
170
+ NDArray
171
+ A 2D array representing the distance matrix of squared L2 distances
172
+ between input ArrayRecords
173
+ """
174
+ # Formula: ||x - y||^2 = ||x||^2 + ||y||^2 - 2 * x.y
175
+ # Flatten records and stack them into a matrix
176
+ flat_w = np.stack(
177
+ [np.concatenate(rec.to_numpy_ndarrays(), axis=None).ravel() for rec in records],
178
+ axis=0,
179
+ ) # shape: (n, d) with n number of records and d the dimension of model
180
+
181
+ # Compute squared norms of each vector
182
+ norms: NDArray = np.square(flat_w).sum(axis=1) # shape (n,)
183
+
184
+ # Use broadcasting to compute pairwise distances
185
+ distance_matrix: NDArray = norms[:, None] + norms[None, :] - 2 * flat_w @ flat_w.T
186
+ return distance_matrix
187
+
188
+
189
+ def select_multikrum(
190
+ contents: list[RecordDict],
191
+ num_malicious_nodes: int,
192
+ num_nodes_to_select: int,
193
+ ) -> list[RecordDict]:
194
+ """Select the set of RecordDicts to aggregate using the Krum or MultiKrum algorithm.
195
+
196
+ For each node, computes the sum of squared L2 distances to its n-f-2 closest
197
+ parameter vectors, where n is the number of nodes and f is the number of
198
+ malicious nodes. The node(s) with the lowest score(s) are selected for
199
+ aggregation.
200
+
201
+ Parameters
202
+ ----------
203
+ contents : list[RecordDict]
204
+ List of contents from reply messages, where each content is a RecordDict
205
+ containing an ArrayRecord of model parameters from a node (client).
206
+ num_malicious_nodes : int
207
+ Number of malicious nodes in the system.
208
+ num_nodes_to_select : int
209
+ Number of client updates to select.
210
+ - If 1, the algorithm reduces to classical Krum (selecting a single update).
211
+ - If >1, Multi-Krum is applied (selecting multiple updates).
212
+
213
+ Returns
214
+ -------
215
+ list[RecordDict]
216
+ Selected contents following the Krum or Multi-Krum algorithm.
217
+
218
+ Notes
219
+ -----
220
+ If `num_nodes_to_select` is set to 1, Multi-Krum reduces to classical Krum
221
+ and only a single RecordDict is selected.
222
+ """
223
+ # Construct list of ArrayRecord objects from replies
224
+ record_key = list(contents[0].array_records.keys())[0]
225
+ # Recall aggregate_train first ensures replies only contain one ArrayRecord
226
+ array_records = [cast(ArrayRecord, reply[record_key]) for reply in contents]
227
+ distance_matrix = compute_distances(array_records)
228
+
229
+ # For each node, take the n-f-2 closest parameters vectors
230
+ num_closest = max(1, len(array_records) - num_malicious_nodes - 2)
231
+ closest_indices = []
232
+ for distance in distance_matrix:
233
+ closest_indices.append(
234
+ np.argsort(distance)[1 : num_closest + 1].tolist() # noqa: E203
235
+ )
236
+
237
+ # Compute the score for each node, that is the sum of the distances
238
+ # of the n-f-2 closest parameters vectors
239
+ scores = [
240
+ np.sum(distance_matrix[i, closest_indices[i]])
241
+ for i in range(len(distance_matrix))
242
+ ]
243
+
244
+ # Choose the num_nodes_to_select lowest-scoring nodes (MultiKrum)
245
+ # and return their updates
246
+ best_indices = np.argsort(scores)[:num_nodes_to_select]
247
+ return [contents[i] for i in best_indices]