flwr 1.21.0__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (175) hide show
  1. flwr/cli/app.py +17 -1
  2. flwr/cli/auth_plugin/__init__.py +15 -6
  3. flwr/cli/auth_plugin/auth_plugin.py +95 -0
  4. flwr/cli/auth_plugin/noop_auth_plugin.py +58 -0
  5. flwr/cli/auth_plugin/oidc_cli_plugin.py +16 -25
  6. flwr/cli/build.py +118 -47
  7. flwr/cli/{cli_user_auth_interceptor.py → cli_account_auth_interceptor.py} +6 -5
  8. flwr/cli/log.py +2 -2
  9. flwr/cli/login/login.py +34 -23
  10. flwr/cli/ls.py +13 -9
  11. flwr/cli/new/new.py +196 -42
  12. flwr/cli/new/templates/app/README.flowertune.md.tpl +1 -1
  13. flwr/cli/new/templates/app/code/client.baseline.py.tpl +64 -47
  14. flwr/cli/new/templates/app/code/client.huggingface.py.tpl +68 -30
  15. flwr/cli/new/templates/app/code/client.jax.py.tpl +63 -42
  16. flwr/cli/new/templates/app/code/client.mlx.py.tpl +80 -51
  17. flwr/cli/new/templates/app/code/client.numpy.py.tpl +36 -13
  18. flwr/cli/new/templates/app/code/client.pytorch.py.tpl +71 -46
  19. flwr/cli/new/templates/app/code/client.pytorch_legacy_api.py.tpl +55 -0
  20. flwr/cli/new/templates/app/code/client.sklearn.py.tpl +75 -30
  21. flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +69 -44
  22. flwr/cli/new/templates/app/code/client.xgboost.py.tpl +110 -0
  23. flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl +56 -90
  24. flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +1 -23
  25. flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl +37 -58
  26. flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl +39 -44
  27. flwr/cli/new/templates/app/code/model.baseline.py.tpl +0 -14
  28. flwr/cli/new/templates/app/code/server.baseline.py.tpl +27 -29
  29. flwr/cli/new/templates/app/code/server.huggingface.py.tpl +23 -19
  30. flwr/cli/new/templates/app/code/server.jax.py.tpl +27 -14
  31. flwr/cli/new/templates/app/code/server.mlx.py.tpl +29 -19
  32. flwr/cli/new/templates/app/code/server.numpy.py.tpl +30 -17
  33. flwr/cli/new/templates/app/code/server.pytorch.py.tpl +36 -26
  34. flwr/cli/new/templates/app/code/server.pytorch_legacy_api.py.tpl +31 -0
  35. flwr/cli/new/templates/app/code/server.sklearn.py.tpl +29 -21
  36. flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +28 -19
  37. flwr/cli/new/templates/app/code/server.xgboost.py.tpl +56 -0
  38. flwr/cli/new/templates/app/code/task.huggingface.py.tpl +16 -20
  39. flwr/cli/new/templates/app/code/task.jax.py.tpl +1 -1
  40. flwr/cli/new/templates/app/code/task.numpy.py.tpl +1 -1
  41. flwr/cli/new/templates/app/code/task.pytorch.py.tpl +14 -27
  42. flwr/cli/new/templates/app/code/{task.pytorch_msg_api.py.tpl → task.pytorch_legacy_api.py.tpl} +27 -14
  43. flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +1 -2
  44. flwr/cli/new/templates/app/code/task.xgboost.py.tpl +67 -0
  45. flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +4 -4
  46. flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +2 -2
  47. flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +4 -4
  48. flwr/cli/new/templates/app/pyproject.jax.toml.tpl +1 -1
  49. flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +2 -2
  50. flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +1 -1
  51. flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +3 -3
  52. flwr/cli/new/templates/app/{pyproject.pytorch_msg_api.toml.tpl → pyproject.pytorch_legacy_api.toml.tpl} +3 -3
  53. flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +1 -1
  54. flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +1 -1
  55. flwr/cli/new/templates/app/pyproject.xgboost.toml.tpl +61 -0
  56. flwr/cli/pull.py +100 -0
  57. flwr/cli/run/run.py +11 -7
  58. flwr/cli/stop.py +2 -2
  59. flwr/cli/supernode/__init__.py +25 -0
  60. flwr/cli/supernode/ls.py +260 -0
  61. flwr/cli/supernode/register.py +185 -0
  62. flwr/cli/supernode/unregister.py +138 -0
  63. flwr/cli/utils.py +109 -69
  64. flwr/client/__init__.py +2 -1
  65. flwr/client/grpc_adapter_client/connection.py +6 -8
  66. flwr/client/grpc_rere_client/connection.py +59 -31
  67. flwr/client/grpc_rere_client/grpc_adapter.py +28 -12
  68. flwr/client/grpc_rere_client/{client_interceptor.py → node_auth_client_interceptor.py} +3 -6
  69. flwr/client/mod/secure_aggregation/secaggplus_mod.py +7 -5
  70. flwr/client/rest_client/connection.py +82 -37
  71. flwr/clientapp/__init__.py +1 -2
  72. flwr/clientapp/mod/__init__.py +4 -1
  73. flwr/clientapp/mod/centraldp_mods.py +156 -40
  74. flwr/clientapp/mod/localdp_mod.py +169 -0
  75. flwr/clientapp/typing.py +22 -0
  76. flwr/{client/clientapp → clientapp}/utils.py +1 -1
  77. flwr/common/constant.py +56 -13
  78. flwr/common/exit/exit_code.py +24 -10
  79. flwr/common/inflatable_utils.py +10 -10
  80. flwr/common/record/array.py +3 -3
  81. flwr/common/record/arrayrecord.py +10 -1
  82. flwr/common/record/typeddict.py +12 -0
  83. flwr/common/secure_aggregation/crypto/symmetric_encryption.py +1 -89
  84. flwr/common/serde.py +4 -2
  85. flwr/common/typing.py +7 -6
  86. flwr/compat/client/app.py +1 -1
  87. flwr/compat/client/grpc_client/connection.py +2 -2
  88. flwr/proto/control_pb2.py +48 -31
  89. flwr/proto/control_pb2.pyi +95 -5
  90. flwr/proto/control_pb2_grpc.py +136 -0
  91. flwr/proto/control_pb2_grpc.pyi +52 -0
  92. flwr/proto/fab_pb2.py +11 -7
  93. flwr/proto/fab_pb2.pyi +21 -1
  94. flwr/proto/fleet_pb2.py +31 -23
  95. flwr/proto/fleet_pb2.pyi +63 -23
  96. flwr/proto/fleet_pb2_grpc.py +98 -28
  97. flwr/proto/fleet_pb2_grpc.pyi +45 -13
  98. flwr/proto/node_pb2.py +3 -1
  99. flwr/proto/node_pb2.pyi +48 -0
  100. flwr/server/app.py +152 -114
  101. flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py +17 -7
  102. flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py +132 -38
  103. flwr/server/superlink/fleet/grpc_rere/{server_interceptor.py → node_auth_server_interceptor.py} +27 -51
  104. flwr/server/superlink/fleet/message_handler/message_handler.py +67 -22
  105. flwr/server/superlink/fleet/rest_rere/rest_api.py +52 -31
  106. flwr/server/superlink/fleet/vce/backend/backend.py +1 -1
  107. flwr/server/superlink/fleet/vce/backend/raybackend.py +1 -1
  108. flwr/server/superlink/fleet/vce/vce_api.py +18 -5
  109. flwr/server/superlink/linkstate/in_memory_linkstate.py +167 -73
  110. flwr/server/superlink/linkstate/linkstate.py +107 -24
  111. flwr/server/superlink/linkstate/linkstate_factory.py +2 -1
  112. flwr/server/superlink/linkstate/sqlite_linkstate.py +306 -255
  113. flwr/server/superlink/linkstate/utils.py +3 -54
  114. flwr/server/superlink/serverappio/serverappio_servicer.py +2 -2
  115. flwr/server/superlink/simulation/simulationio_servicer.py +1 -1
  116. flwr/server/utils/validator.py +2 -3
  117. flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +4 -2
  118. flwr/serverapp/strategy/__init__.py +26 -0
  119. flwr/serverapp/strategy/bulyan.py +238 -0
  120. flwr/serverapp/strategy/dp_adaptive_clipping.py +335 -0
  121. flwr/serverapp/strategy/dp_fixed_clipping.py +71 -49
  122. flwr/serverapp/strategy/fedadagrad.py +0 -3
  123. flwr/serverapp/strategy/fedadam.py +0 -3
  124. flwr/serverapp/strategy/fedavg.py +89 -64
  125. flwr/serverapp/strategy/fedavgm.py +198 -0
  126. flwr/serverapp/strategy/fedmedian.py +105 -0
  127. flwr/serverapp/strategy/fedprox.py +174 -0
  128. flwr/serverapp/strategy/fedtrimmedavg.py +176 -0
  129. flwr/serverapp/strategy/fedxgb_bagging.py +117 -0
  130. flwr/serverapp/strategy/fedxgb_cyclic.py +220 -0
  131. flwr/serverapp/strategy/fedyogi.py +0 -3
  132. flwr/serverapp/strategy/krum.py +112 -0
  133. flwr/serverapp/strategy/multikrum.py +247 -0
  134. flwr/serverapp/strategy/qfedavg.py +252 -0
  135. flwr/serverapp/strategy/strategy_utils.py +48 -0
  136. flwr/simulation/app.py +1 -1
  137. flwr/simulation/ray_transport/ray_actor.py +1 -1
  138. flwr/simulation/ray_transport/ray_client_proxy.py +1 -1
  139. flwr/simulation/run_simulation.py +28 -32
  140. flwr/supercore/cli/flower_superexec.py +26 -1
  141. flwr/supercore/constant.py +41 -0
  142. flwr/supercore/object_store/in_memory_object_store.py +0 -4
  143. flwr/supercore/object_store/object_store_factory.py +26 -6
  144. flwr/supercore/object_store/sqlite_object_store.py +252 -0
  145. flwr/{client/clientapp → supercore/primitives}/__init__.py +1 -1
  146. flwr/supercore/primitives/asymmetric.py +117 -0
  147. flwr/supercore/primitives/asymmetric_ed25519.py +165 -0
  148. flwr/supercore/sqlite_mixin.py +156 -0
  149. flwr/supercore/superexec/plugin/exec_plugin.py +11 -1
  150. flwr/supercore/superexec/run_superexec.py +16 -2
  151. flwr/supercore/utils.py +20 -0
  152. flwr/superlink/artifact_provider/__init__.py +22 -0
  153. flwr/superlink/artifact_provider/artifact_provider.py +37 -0
  154. flwr/{common → superlink}/auth_plugin/__init__.py +6 -6
  155. flwr/superlink/auth_plugin/auth_plugin.py +91 -0
  156. flwr/superlink/auth_plugin/noop_auth_plugin.py +87 -0
  157. flwr/superlink/servicer/control/{control_user_auth_interceptor.py → control_account_auth_interceptor.py} +19 -19
  158. flwr/superlink/servicer/control/control_event_log_interceptor.py +1 -1
  159. flwr/superlink/servicer/control/control_grpc.py +16 -11
  160. flwr/superlink/servicer/control/control_servicer.py +207 -58
  161. flwr/supernode/cli/flower_supernode.py +19 -26
  162. flwr/supernode/runtime/run_clientapp.py +2 -2
  163. flwr/supernode/servicer/clientappio/clientappio_servicer.py +1 -1
  164. flwr/supernode/start_client_internal.py +17 -9
  165. {flwr-1.21.0.dist-info → flwr-1.23.0.dist-info}/METADATA +6 -16
  166. {flwr-1.21.0.dist-info → flwr-1.23.0.dist-info}/RECORD +170 -140
  167. flwr/cli/new/templates/app/code/client.pytorch_msg_api.py.tpl +0 -80
  168. flwr/cli/new/templates/app/code/server.pytorch_msg_api.py.tpl +0 -41
  169. flwr/common/auth_plugin/auth_plugin.py +0 -149
  170. flwr/serverapp/dp_fixed_clipping.py +0 -352
  171. flwr/serverapp/strategy/strategy_utils_tests.py +0 -304
  172. /flwr/cli/new/templates/app/code/{__init__.pytorch_msg_api.py.tpl → __init__.pytorch_legacy_api.py.tpl} +0 -0
  173. /flwr/{client → clientapp}/client_app.py +0 -0
  174. {flwr-1.21.0.dist-info → flwr-1.23.0.dist-info}/WHEEL +0 -0
  175. {flwr-1.21.0.dist-info → flwr-1.23.0.dist-info}/entry_points.txt +0 -0
@@ -16,7 +16,7 @@
16
16
 
17
17
 
18
18
  from collections.abc import Iterable
19
- from logging import INFO
19
+ from logging import INFO, WARNING
20
20
  from typing import Callable, Optional
21
21
 
22
22
  from flwr.common import (
@@ -67,7 +67,7 @@ class FedAvg(Strategy):
67
67
  arrayrecord_key : str (default: "arrays")
68
68
  Key used to store the ArrayRecord when constructing Messages.
69
69
  configrecord_key : str (default: "config")
70
- Key used to store the ConfigRecord when constructing Messages.
70
+ Key used to store the ConfigRecord when constructing Messages.
71
71
  train_metrics_aggr_fn : Optional[callable] (default: None)
72
72
  Function with signature (list[RecordDict], str) -> MetricRecord,
73
73
  used to aggregate MetricRecords from training round replies.
@@ -111,6 +111,20 @@ class FedAvg(Strategy):
111
111
  evaluate_metrics_aggr_fn or aggregate_metricrecords
112
112
  )
113
113
 
114
+ if self.fraction_evaluate == 0.0:
115
+ self.min_evaluate_nodes = 0
116
+ log(
117
+ WARNING,
118
+ "fraction_evaluate is set to 0.0. "
119
+ "Federated evaluation will be skipped.",
120
+ )
121
+ if self.fraction_train == 0.0:
122
+ self.min_train_nodes = 0
123
+ log(
124
+ WARNING,
125
+ "fraction_train is set to 0.0. Federated training will be skipped.",
126
+ )
127
+
114
128
  def summary(self) -> None:
115
129
  """Log summary configuration of the strategy."""
116
130
  log(INFO, "\t├──> Sampling:")
@@ -150,6 +164,9 @@ class FedAvg(Strategy):
150
164
  self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
151
165
  ) -> Iterable[Message]:
152
166
  """Configure the next round of federated training."""
167
+ # Do not configure federated train if fraction_train is 0.
168
+ if self.fraction_train == 0.0:
169
+ return []
153
170
  # Sample nodes
154
171
  num_nodes = int(len(list(grid.get_node_ids())) * self.fraction_train)
155
172
  sample_size = max(num_nodes, self.min_train_nodes)
@@ -169,56 +186,88 @@ class FedAvg(Strategy):
169
186
  )
170
187
  return self._construct_messages(record, node_ids, MessageType.TRAIN)
171
188
 
172
- def aggregate_train(
173
- self,
174
- server_round: int,
175
- replies: Iterable[Message],
176
- ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
177
- """Aggregate ArrayRecords and MetricRecords in the received Messages."""
189
+ def _check_and_log_replies(
190
+ self, replies: Iterable[Message], is_train: bool, validate: bool = True
191
+ ) -> tuple[list[Message], list[Message]]:
192
+ """Check replies for errors and log them.
193
+
194
+ Parameters
195
+ ----------
196
+ replies : Iterable[Message]
197
+ Iterable of reply Messages.
198
+ is_train : bool
199
+ Set to True if the replies are from a training round; False otherwise.
200
+ This impacts logging and validation behavior.
201
+ validate : bool (default: True)
202
+ Whether to validate the reply contents for consistency.
203
+
204
+ Returns
205
+ -------
206
+ tuple[list[Message], list[Message]]
207
+ A tuple containing two lists:
208
+ - Messages with valid contents.
209
+ - Messages with errors.
210
+ """
178
211
  if not replies:
179
- return None, None
212
+ return [], []
180
213
 
181
- # Log if any Messages carried errors
182
214
  # Filter messages that carry content
183
- num_errors = 0
184
- replies_with_content = []
215
+ valid_replies: list[Message] = []
216
+ error_replies: list[Message] = []
185
217
  for msg in replies:
186
218
  if msg.has_error():
187
- log(
188
- INFO,
189
- "Received error in reply from node %d: %s",
190
- msg.metadata.src_node_id,
191
- msg.error,
192
- )
193
- num_errors += 1
219
+ error_replies.append(msg)
194
220
  else:
195
- replies_with_content.append(msg.content)
221
+ valid_replies.append(msg)
196
222
 
197
223
  log(
198
224
  INFO,
199
- "aggregate_train: Received %s results and %s failures",
200
- len(replies_with_content),
201
- num_errors,
225
+ "%s: Received %s results and %s failures",
226
+ "aggregate_train" if is_train else "aggregate_evaluate",
227
+ len(valid_replies),
228
+ len(error_replies),
202
229
  )
203
230
 
231
+ # Log errors
232
+ for msg in error_replies:
233
+ log(
234
+ INFO,
235
+ "\t> Received error in reply from node %d: %s",
236
+ msg.metadata.src_node_id,
237
+ msg.error.reason,
238
+ )
239
+
204
240
  # Ensure expected ArrayRecords and MetricRecords are received
205
- validate_message_reply_consistency(
206
- replies=replies_with_content,
207
- weighted_by_key=self.weighted_by_key,
208
- check_arrayrecord=True,
209
- )
241
+ if validate and valid_replies:
242
+ validate_message_reply_consistency(
243
+ replies=[msg.content for msg in valid_replies],
244
+ weighted_by_key=self.weighted_by_key,
245
+ check_arrayrecord=is_train,
246
+ )
247
+
248
+ return valid_replies, error_replies
249
+
250
+ def aggregate_train(
251
+ self,
252
+ server_round: int,
253
+ replies: Iterable[Message],
254
+ ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
255
+ """Aggregate ArrayRecords and MetricRecords in the received Messages."""
256
+ valid_replies, _ = self._check_and_log_replies(replies, is_train=True)
210
257
 
211
258
  arrays, metrics = None, None
212
- if replies_with_content:
259
+ if valid_replies:
260
+ reply_contents = [msg.content for msg in valid_replies]
261
+
213
262
  # Aggregate ArrayRecords
214
263
  arrays = aggregate_arrayrecords(
215
- replies_with_content,
264
+ reply_contents,
216
265
  self.weighted_by_key,
217
266
  )
218
267
 
219
268
  # Aggregate MetricRecords
220
269
  metrics = self.train_metrics_aggr_fn(
221
- replies_with_content,
270
+ reply_contents,
222
271
  self.weighted_by_key,
223
272
  )
224
273
  return arrays, metrics
@@ -227,6 +276,10 @@ class FedAvg(Strategy):
227
276
  self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
228
277
  ) -> Iterable[Message]:
229
278
  """Configure the next round of federated evaluation."""
279
+ # Do not configure federated evaluation if fraction_evaluate is 0.
280
+ if self.fraction_evaluate == 0.0:
281
+ return []
282
+
230
283
  # Sample nodes
231
284
  num_nodes = int(len(list(grid.get_node_ids())) * self.fraction_evaluate)
232
285
  sample_size = max(num_nodes, self.min_evaluate_nodes)
@@ -253,43 +306,15 @@ class FedAvg(Strategy):
253
306
  replies: Iterable[Message],
254
307
  ) -> Optional[MetricRecord]:
255
308
  """Aggregate MetricRecords in the received Messages."""
256
- if not replies:
257
- return None
309
+ valid_replies, _ = self._check_and_log_replies(replies, is_train=False)
258
310
 
259
- # Log if any Messages carried errors
260
- # Filter messages that carry content
261
- num_errors = 0
262
- replies_with_content = []
263
- for msg in replies:
264
- if msg.has_error():
265
- log(
266
- INFO,
267
- "Received error in reply from node %d: %s",
268
- msg.metadata.src_node_id,
269
- msg.error,
270
- )
271
- num_errors += 1
272
- else:
273
- replies_with_content.append(msg.content)
274
-
275
- log(
276
- INFO,
277
- "aggregate_evaluate: Received %s results and %s failures",
278
- len(replies_with_content),
279
- num_errors,
280
- )
281
-
282
- # Ensure expected ArrayRecords and MetricRecords are received
283
- validate_message_reply_consistency(
284
- replies=replies_with_content,
285
- weighted_by_key=self.weighted_by_key,
286
- check_arrayrecord=False,
287
- )
288
311
  metrics = None
289
- if replies_with_content:
312
+ if valid_replies:
313
+ reply_contents = [msg.content for msg in valid_replies]
314
+
290
315
  # Aggregate MetricRecords
291
316
  metrics = self.evaluate_metrics_aggr_fn(
292
- replies_with_content,
317
+ reply_contents,
293
318
  self.weighted_by_key,
294
319
  )
295
320
  return metrics
@@ -0,0 +1,198 @@
1
+ # Copyright 2025 Flower Labs GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Federated Averaging with Momentum (FedAvgM) [Hsu et al., 2019] strategy.
16
+
17
+ Paper: arxiv.org/pdf/1909.06335.pdf
18
+ """
19
+
20
+
21
+ from collections import OrderedDict
22
+ from collections.abc import Iterable
23
+ from logging import INFO
24
+ from typing import Callable, Optional
25
+
26
+ from flwr.common import (
27
+ Array,
28
+ ArrayRecord,
29
+ ConfigRecord,
30
+ Message,
31
+ MetricRecord,
32
+ NDArrays,
33
+ RecordDict,
34
+ log,
35
+ )
36
+ from flwr.server import Grid
37
+
38
+ from ..exception import AggregationError
39
+ from .fedavg import FedAvg
40
+
41
+
42
+ class FedAvgM(FedAvg):
43
+ """Federated Averaging with Momentum strategy.
44
+
45
+ Implementation based on https://arxiv.org/abs/1909.06335
46
+
47
+ Parameters
48
+ ----------
49
+ fraction_train : float (default: 1.0)
50
+ Fraction of nodes used during training. In case `min_train_nodes`
51
+ is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
52
+ will still be sampled.
53
+ fraction_evaluate : float (default: 1.0)
54
+ Fraction of nodes used during validation. In case `min_evaluate_nodes`
55
+ is larger than `fraction_evaluate * total_connected_nodes`,
56
+ `min_evaluate_nodes` will still be sampled.
57
+ min_train_nodes : int (default: 2)
58
+ Minimum number of nodes used during training.
59
+ min_evaluate_nodes : int (default: 2)
60
+ Minimum number of nodes used during validation.
61
+ min_available_nodes : int (default: 2)
62
+ Minimum number of total nodes in the system.
63
+ weighted_by_key : str (default: "num-examples")
64
+ The key within each MetricRecord whose value is used as the weight when
65
+ computing weighted averages for both ArrayRecords and MetricRecords.
66
+ arrayrecord_key : str (default: "arrays")
67
+ Key used to store the ArrayRecord when constructing Messages.
68
+ configrecord_key : str (default: "config")
69
+ Key used to store the ConfigRecord when constructing Messages.
70
+ train_metrics_aggr_fn : Optional[callable] (default: None)
71
+ Function with signature (list[RecordDict], str) -> MetricRecord,
72
+ used to aggregate MetricRecords from training round replies.
73
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
74
+ average using the provided weight factor key.
75
+ evaluate_metrics_aggr_fn : Optional[callable] (default: None)
76
+ Function with signature (list[RecordDict], str) -> MetricRecord,
77
+ used to aggregate MetricRecords from training round replies.
78
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
79
+ average using the provided weight factor key.
80
+ server_learning_rate: float (default: 1.0)
81
+ Server-side learning rate used in server-side optimization.
82
+ server_momentum: float (default: 0.0)
83
+ Server-side momentum factor used for FedAvgM.
84
+ """
85
+
86
+ def __init__( # pylint: disable=R0913, R0917
87
+ self,
88
+ fraction_train: float = 1.0,
89
+ fraction_evaluate: float = 1.0,
90
+ min_train_nodes: int = 2,
91
+ min_evaluate_nodes: int = 2,
92
+ min_available_nodes: int = 2,
93
+ weighted_by_key: str = "num-examples",
94
+ arrayrecord_key: str = "arrays",
95
+ configrecord_key: str = "config",
96
+ train_metrics_aggr_fn: Optional[
97
+ Callable[[list[RecordDict], str], MetricRecord]
98
+ ] = None,
99
+ evaluate_metrics_aggr_fn: Optional[
100
+ Callable[[list[RecordDict], str], MetricRecord]
101
+ ] = None,
102
+ server_learning_rate: float = 1.0,
103
+ server_momentum: float = 0.0,
104
+ ) -> None:
105
+ super().__init__(
106
+ fraction_train=fraction_train,
107
+ fraction_evaluate=fraction_evaluate,
108
+ min_train_nodes=min_train_nodes,
109
+ min_evaluate_nodes=min_evaluate_nodes,
110
+ min_available_nodes=min_available_nodes,
111
+ weighted_by_key=weighted_by_key,
112
+ arrayrecord_key=arrayrecord_key,
113
+ configrecord_key=configrecord_key,
114
+ train_metrics_aggr_fn=train_metrics_aggr_fn,
115
+ evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
116
+ )
117
+ self.server_learning_rate = server_learning_rate
118
+ self.server_momentum = server_momentum
119
+ self.server_opt: bool = (self.server_momentum != 0.0) or (
120
+ self.server_learning_rate != 1.0
121
+ )
122
+ self.current_arrays: Optional[ArrayRecord] = None
123
+ self.momentum_vector: Optional[NDArrays] = None
124
+
125
+ def summary(self) -> None:
126
+ """Log summary configuration of the strategy."""
127
+ opt_status = "ON" if self.server_opt else "OFF"
128
+ log(INFO, "\t├──> FedAvgM settings:")
129
+ log(INFO, "\t│\t├── Server optimization: %s", opt_status)
130
+ log(INFO, "\t│\t├── Server learning rate: %s", self.server_learning_rate)
131
+ log(INFO, "\t│\t└── Server Momentum: %s", self.server_momentum)
132
+ super().summary()
133
+
134
+ def configure_train(
135
+ self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
136
+ ) -> Iterable[Message]:
137
+ """Configure the next round of federated training."""
138
+ if self.current_arrays is None:
139
+ self.current_arrays = arrays
140
+ return super().configure_train(server_round, arrays, config, grid)
141
+
142
+ def aggregate_train(
143
+ self,
144
+ server_round: int,
145
+ replies: Iterable[Message],
146
+ ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
147
+ """Aggregate ArrayRecords and MetricRecords in the received Messages."""
148
+ # Call FedAvg aggregate_train to perform validation and aggregation
149
+ aggregated_arrays, aggregated_metrics = super().aggregate_train(
150
+ server_round, replies
151
+ )
152
+
153
+ # following convention described in
154
+ # https://pytorch.org/docs/stable/generated/torch.optim.SGD.html
155
+ if self.server_opt and aggregated_arrays is not None:
156
+ # The initial parameters should be set in `start()` method already
157
+ if self.current_arrays is None:
158
+ raise AggregationError(
159
+ "No initial parameters set for FedAvgM. "
160
+ "Ensure that `configure_train` has been called before aggregation."
161
+ )
162
+ ndarrays = self.current_arrays.to_numpy_ndarrays()
163
+ aggregated_ndarrays = aggregated_arrays.to_numpy_ndarrays()
164
+
165
+ # Preserve keys for arrays in ArrayRecord
166
+ array_keys = list(aggregated_arrays.keys())
167
+ aggregated_arrays.clear()
168
+
169
+ # Remember that updates are the opposite of gradients
170
+ pseudo_gradient = [
171
+ old - new for new, old in zip(aggregated_ndarrays, ndarrays)
172
+ ]
173
+ if self.server_momentum > 0.0:
174
+ if self.momentum_vector is None:
175
+ # Initialize momentum vector in the first round
176
+ self.momentum_vector = pseudo_gradient
177
+ else:
178
+ self.momentum_vector = [
179
+ self.server_momentum * mv + pg
180
+ for mv, pg in zip(self.momentum_vector, pseudo_gradient)
181
+ ]
182
+
183
+ # No nesterov for now
184
+ pseudo_gradient = self.momentum_vector
185
+
186
+ # SGD and convert back to ArrayRecord
187
+ updated_array_list = [
188
+ Array(old - self.server_learning_rate * pg)
189
+ for old, pg in zip(ndarrays, pseudo_gradient)
190
+ ]
191
+ aggregated_arrays = ArrayRecord(
192
+ OrderedDict(zip(array_keys, updated_array_list))
193
+ )
194
+
195
+ # Update current weights
196
+ self.current_arrays = aggregated_arrays
197
+
198
+ return aggregated_arrays, aggregated_metrics
@@ -0,0 +1,105 @@
1
+ # Copyright 2025 Flower Labs GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Federated Median (FedMedian) [Yin et al., 2018] strategy.
16
+
17
+ Paper: arxiv.org/pdf/1803.01498v1.pdf
18
+ """
19
+
20
+
21
+ from collections.abc import Iterable
22
+ from typing import Optional, cast
23
+
24
+ import numpy as np
25
+
26
+ from flwr.common import Array, ArrayRecord, Message, MetricRecord
27
+
28
+ from .fedavg import FedAvg
29
+
30
+
31
+ class FedMedian(FedAvg):
32
+ """Federated Median (FedMedian) strategy.
33
+
34
+ Implementation based on https://arxiv.org/pdf/1803.01498v1
35
+
36
+ Parameters
37
+ ----------
38
+ fraction_train : float (default: 1.0)
39
+ Fraction of nodes used during training. In case `min_train_nodes`
40
+ is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
41
+ will still be sampled.
42
+ fraction_evaluate : float (default: 1.0)
43
+ Fraction of nodes used during validation. In case `min_evaluate_nodes`
44
+ is larger than `fraction_evaluate * total_connected_nodes`,
45
+ `min_evaluate_nodes` will still be sampled.
46
+ min_train_nodes : int (default: 2)
47
+ Minimum number of nodes used during training.
48
+ min_evaluate_nodes : int (default: 2)
49
+ Minimum number of nodes used during validation.
50
+ min_available_nodes : int (default: 2)
51
+ Minimum number of total nodes in the system.
52
+ weighted_by_key : str (default: "num-examples")
53
+ The key within each MetricRecord whose value is used as the weight when
54
+ computing weighted averages for MetricRecords.
55
+ arrayrecord_key : str (default: "arrays")
56
+ Key used to store the ArrayRecord when constructing Messages.
57
+ configrecord_key : str (default: "config")
58
+ Key used to store the ConfigRecord when constructing Messages.
59
+ train_metrics_aggr_fn : Optional[callable] (default: None)
60
+ Function with signature (list[RecordDict], str) -> MetricRecord,
61
+ used to aggregate MetricRecords from training round replies.
62
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
63
+ average using the provided weight factor key.
64
+ evaluate_metrics_aggr_fn : Optional[callable] (default: None)
65
+ Function with signature (list[RecordDict], str) -> MetricRecord,
66
+ used to aggregate MetricRecords from training round replies.
67
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
68
+ average using the provided weight factor key.
69
+ """
70
+
71
+ def aggregate_train(
72
+ self,
73
+ server_round: int,
74
+ replies: Iterable[Message],
75
+ ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
76
+ """Aggregate ArrayRecords and MetricRecords in the received Messages."""
77
+ # Call FedAvg aggregate_train to perform validation and aggregation
78
+ valid_replies, _ = self._check_and_log_replies(replies, is_train=True)
79
+
80
+ if not valid_replies:
81
+ return None, None
82
+
83
+ # Aggregate ArrayRecords using median
84
+ # Get the key for the only ArrayRecord from the first Message
85
+ record_key = list(valid_replies[0].content.array_records.keys())[0]
86
+ # Preserve keys for arrays in ArrayRecord
87
+ array_keys = list(valid_replies[0].content[record_key].keys())
88
+
89
+ # Compute median for each layer and construct ArrayRecord
90
+ arrays = ArrayRecord()
91
+ for array_key in array_keys:
92
+ # Get the corresponding layer from each client
93
+ layers = [
94
+ cast(ArrayRecord, msg.content[record_key]).pop(array_key).numpy()
95
+ for msg in valid_replies
96
+ ]
97
+ # Compute median and save as Array in ArrayRecord
98
+ arrays[array_key] = Array(np.median(np.stack(layers), axis=0))
99
+
100
+ # Aggregate MetricRecords
101
+ metrics = self.train_metrics_aggr_fn(
102
+ [msg.content for msg in valid_replies],
103
+ self.weighted_by_key,
104
+ )
105
+ return arrays, metrics