flwr 1.20.0__py3-none-any.whl → 1.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. flwr/__init__.py +4 -1
  2. flwr/app/__init__.py +28 -0
  3. flwr/app/exception.py +31 -0
  4. flwr/cli/app.py +2 -0
  5. flwr/cli/auth_plugin/oidc_cli_plugin.py +4 -4
  6. flwr/cli/cli_user_auth_interceptor.py +1 -1
  7. flwr/cli/config_utils.py +3 -3
  8. flwr/cli/constant.py +25 -8
  9. flwr/cli/log.py +9 -9
  10. flwr/cli/login/login.py +3 -3
  11. flwr/cli/ls.py +5 -5
  12. flwr/cli/new/new.py +15 -2
  13. flwr/cli/new/templates/app/README.flowertune.md.tpl +1 -1
  14. flwr/cli/new/templates/app/code/__init__.pytorch_legacy_api.py.tpl +1 -0
  15. flwr/cli/new/templates/app/code/client.baseline.py.tpl +64 -47
  16. flwr/cli/new/templates/app/code/client.huggingface.py.tpl +68 -30
  17. flwr/cli/new/templates/app/code/client.jax.py.tpl +63 -42
  18. flwr/cli/new/templates/app/code/client.mlx.py.tpl +80 -51
  19. flwr/cli/new/templates/app/code/client.numpy.py.tpl +36 -13
  20. flwr/cli/new/templates/app/code/client.pytorch.py.tpl +71 -46
  21. flwr/cli/new/templates/app/code/client.pytorch_legacy_api.py.tpl +55 -0
  22. flwr/cli/new/templates/app/code/client.sklearn.py.tpl +75 -30
  23. flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +69 -44
  24. flwr/cli/new/templates/app/code/client.xgboost.py.tpl +110 -0
  25. flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl +56 -90
  26. flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +1 -23
  27. flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl +37 -58
  28. flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl +39 -44
  29. flwr/cli/new/templates/app/code/model.baseline.py.tpl +0 -14
  30. flwr/cli/new/templates/app/code/server.baseline.py.tpl +27 -29
  31. flwr/cli/new/templates/app/code/server.huggingface.py.tpl +23 -19
  32. flwr/cli/new/templates/app/code/server.jax.py.tpl +27 -14
  33. flwr/cli/new/templates/app/code/server.mlx.py.tpl +29 -19
  34. flwr/cli/new/templates/app/code/server.numpy.py.tpl +30 -17
  35. flwr/cli/new/templates/app/code/server.pytorch.py.tpl +36 -26
  36. flwr/cli/new/templates/app/code/server.pytorch_legacy_api.py.tpl +31 -0
  37. flwr/cli/new/templates/app/code/server.sklearn.py.tpl +29 -21
  38. flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +28 -19
  39. flwr/cli/new/templates/app/code/server.xgboost.py.tpl +56 -0
  40. flwr/cli/new/templates/app/code/task.huggingface.py.tpl +16 -20
  41. flwr/cli/new/templates/app/code/task.jax.py.tpl +1 -1
  42. flwr/cli/new/templates/app/code/task.numpy.py.tpl +1 -1
  43. flwr/cli/new/templates/app/code/task.pytorch.py.tpl +14 -27
  44. flwr/cli/new/templates/app/code/task.pytorch_legacy_api.py.tpl +111 -0
  45. flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +1 -2
  46. flwr/cli/new/templates/app/code/task.xgboost.py.tpl +67 -0
  47. flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +4 -4
  48. flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +2 -2
  49. flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +4 -4
  50. flwr/cli/new/templates/app/pyproject.jax.toml.tpl +1 -1
  51. flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +2 -2
  52. flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +1 -1
  53. flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +3 -3
  54. flwr/cli/new/templates/app/pyproject.pytorch_legacy_api.toml.tpl +53 -0
  55. flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +1 -1
  56. flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +1 -1
  57. flwr/cli/new/templates/app/pyproject.xgboost.toml.tpl +61 -0
  58. flwr/cli/pull.py +100 -0
  59. flwr/cli/run/run.py +9 -13
  60. flwr/cli/stop.py +7 -4
  61. flwr/cli/utils.py +36 -8
  62. flwr/client/grpc_rere_client/connection.py +1 -12
  63. flwr/client/rest_client/connection.py +3 -0
  64. flwr/clientapp/__init__.py +10 -0
  65. flwr/clientapp/mod/__init__.py +29 -0
  66. flwr/clientapp/mod/centraldp_mods.py +248 -0
  67. flwr/clientapp/mod/localdp_mod.py +169 -0
  68. flwr/clientapp/typing.py +22 -0
  69. flwr/common/args.py +20 -6
  70. flwr/common/auth_plugin/__init__.py +4 -4
  71. flwr/common/auth_plugin/auth_plugin.py +7 -7
  72. flwr/common/constant.py +26 -4
  73. flwr/common/event_log_plugin/event_log_plugin.py +1 -1
  74. flwr/common/exit/__init__.py +4 -0
  75. flwr/common/exit/exit.py +8 -1
  76. flwr/common/exit/exit_code.py +30 -7
  77. flwr/common/exit/exit_handler.py +62 -0
  78. flwr/common/{exit_handlers.py → exit/signal_handler.py} +20 -37
  79. flwr/common/grpc.py +0 -11
  80. flwr/common/inflatable_utils.py +1 -1
  81. flwr/common/logger.py +1 -1
  82. flwr/common/record/typeddict.py +12 -0
  83. flwr/common/retry_invoker.py +30 -11
  84. flwr/common/telemetry.py +4 -0
  85. flwr/compat/server/app.py +2 -2
  86. flwr/proto/appio_pb2.py +25 -17
  87. flwr/proto/appio_pb2.pyi +46 -2
  88. flwr/proto/clientappio_pb2.py +3 -11
  89. flwr/proto/clientappio_pb2.pyi +0 -47
  90. flwr/proto/clientappio_pb2_grpc.py +19 -20
  91. flwr/proto/clientappio_pb2_grpc.pyi +10 -11
  92. flwr/proto/control_pb2.py +66 -0
  93. flwr/proto/{exec_pb2.pyi → control_pb2.pyi} +24 -0
  94. flwr/proto/{exec_pb2_grpc.py → control_pb2_grpc.py} +88 -54
  95. flwr/proto/control_pb2_grpc.pyi +106 -0
  96. flwr/proto/serverappio_pb2.py +2 -2
  97. flwr/proto/serverappio_pb2_grpc.py +68 -0
  98. flwr/proto/serverappio_pb2_grpc.pyi +26 -0
  99. flwr/proto/simulationio_pb2.py +4 -11
  100. flwr/proto/simulationio_pb2.pyi +0 -58
  101. flwr/proto/simulationio_pb2_grpc.py +129 -27
  102. flwr/proto/simulationio_pb2_grpc.pyi +52 -13
  103. flwr/server/app.py +142 -152
  104. flwr/server/grid/grpc_grid.py +3 -0
  105. flwr/server/grid/inmemory_grid.py +1 -0
  106. flwr/server/serverapp/app.py +157 -146
  107. flwr/server/superlink/fleet/vce/backend/raybackend.py +3 -1
  108. flwr/server/superlink/fleet/vce/vce_api.py +6 -6
  109. flwr/server/superlink/linkstate/in_memory_linkstate.py +34 -0
  110. flwr/server/superlink/linkstate/linkstate.py +2 -1
  111. flwr/server/superlink/linkstate/sqlite_linkstate.py +45 -0
  112. flwr/server/superlink/serverappio/serverappio_grpc.py +1 -1
  113. flwr/server/superlink/serverappio/serverappio_servicer.py +61 -6
  114. flwr/server/superlink/simulation/simulationio_servicer.py +97 -21
  115. flwr/serverapp/__init__.py +12 -0
  116. flwr/serverapp/exception.py +38 -0
  117. flwr/serverapp/strategy/__init__.py +64 -0
  118. flwr/serverapp/strategy/bulyan.py +238 -0
  119. flwr/serverapp/strategy/dp_adaptive_clipping.py +335 -0
  120. flwr/serverapp/strategy/dp_fixed_clipping.py +374 -0
  121. flwr/serverapp/strategy/fedadagrad.py +159 -0
  122. flwr/serverapp/strategy/fedadam.py +178 -0
  123. flwr/serverapp/strategy/fedavg.py +320 -0
  124. flwr/serverapp/strategy/fedavgm.py +198 -0
  125. flwr/serverapp/strategy/fedmedian.py +105 -0
  126. flwr/serverapp/strategy/fedopt.py +218 -0
  127. flwr/serverapp/strategy/fedprox.py +174 -0
  128. flwr/serverapp/strategy/fedtrimmedavg.py +176 -0
  129. flwr/serverapp/strategy/fedxgb_bagging.py +117 -0
  130. flwr/serverapp/strategy/fedxgb_cyclic.py +220 -0
  131. flwr/serverapp/strategy/fedyogi.py +170 -0
  132. flwr/serverapp/strategy/krum.py +112 -0
  133. flwr/serverapp/strategy/multikrum.py +247 -0
  134. flwr/serverapp/strategy/qfedavg.py +252 -0
  135. flwr/serverapp/strategy/result.py +105 -0
  136. flwr/serverapp/strategy/strategy.py +285 -0
  137. flwr/serverapp/strategy/strategy_utils.py +299 -0
  138. flwr/simulation/app.py +161 -164
  139. flwr/simulation/run_simulation.py +25 -30
  140. flwr/supercore/app_utils.py +58 -0
  141. flwr/{supernode/scheduler → supercore/cli}/__init__.py +3 -3
  142. flwr/supercore/cli/flower_superexec.py +166 -0
  143. flwr/supercore/constant.py +19 -0
  144. flwr/supercore/{scheduler → corestate}/__init__.py +3 -3
  145. flwr/supercore/corestate/corestate.py +81 -0
  146. flwr/supercore/grpc_health/__init__.py +3 -0
  147. flwr/supercore/grpc_health/health_server.py +53 -0
  148. flwr/supercore/grpc_health/simple_health_servicer.py +2 -2
  149. flwr/{superexec → supercore/superexec}/__init__.py +1 -1
  150. flwr/supercore/superexec/plugin/__init__.py +28 -0
  151. flwr/{supernode/scheduler/simple_clientapp_scheduler_plugin.py → supercore/superexec/plugin/base_exec_plugin.py} +10 -6
  152. flwr/supercore/superexec/plugin/clientapp_exec_plugin.py +28 -0
  153. flwr/supercore/{scheduler/plugin.py → superexec/plugin/exec_plugin.py} +15 -5
  154. flwr/supercore/superexec/plugin/serverapp_exec_plugin.py +28 -0
  155. flwr/supercore/superexec/plugin/simulation_exec_plugin.py +28 -0
  156. flwr/supercore/superexec/run_superexec.py +199 -0
  157. flwr/superlink/artifact_provider/__init__.py +22 -0
  158. flwr/superlink/artifact_provider/artifact_provider.py +37 -0
  159. flwr/superlink/servicer/__init__.py +15 -0
  160. flwr/superlink/servicer/control/__init__.py +22 -0
  161. flwr/{superexec/exec_event_log_interceptor.py → superlink/servicer/control/control_event_log_interceptor.py} +7 -7
  162. flwr/{superexec/exec_grpc.py → superlink/servicer/control/control_grpc.py} +27 -29
  163. flwr/{superexec/exec_license_interceptor.py → superlink/servicer/control/control_license_interceptor.py} +6 -6
  164. flwr/{superexec/exec_servicer.py → superlink/servicer/control/control_servicer.py} +127 -31
  165. flwr/{superexec/exec_user_auth_interceptor.py → superlink/servicer/control/control_user_auth_interceptor.py} +10 -10
  166. flwr/supernode/cli/flower_supernode.py +3 -0
  167. flwr/supernode/cli/flwr_clientapp.py +18 -21
  168. flwr/supernode/nodestate/in_memory_nodestate.py +2 -2
  169. flwr/supernode/nodestate/nodestate.py +3 -59
  170. flwr/supernode/runtime/run_clientapp.py +39 -102
  171. flwr/supernode/servicer/clientappio/clientappio_servicer.py +10 -17
  172. flwr/supernode/start_client_internal.py +35 -76
  173. {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/METADATA +9 -18
  174. {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/RECORD +176 -128
  175. {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/entry_points.txt +1 -0
  176. flwr/proto/exec_pb2.py +0 -62
  177. flwr/proto/exec_pb2_grpc.pyi +0 -93
  178. flwr/superexec/app.py +0 -45
  179. flwr/superexec/deployment.py +0 -191
  180. flwr/superexec/executor.py +0 -100
  181. flwr/superexec/simulation.py +0 -129
  182. {flwr-1.20.0.dist-info → flwr-1.22.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,374 @@
1
+ # Copyright 2025 Flower Labs GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Message-based Central differential privacy with fixed clipping.
16
+
17
+ Papers: https://arxiv.org/abs/1712.07557, https://arxiv.org/abs/1710.06963
18
+ """
19
+
20
+ from abc import ABC
21
+ from collections import OrderedDict
22
+ from collections.abc import Iterable
23
+ from logging import INFO, WARNING
24
+ from typing import Optional
25
+
26
+ from flwr.common import Array, ArrayRecord, ConfigRecord, Message, MetricRecord, log
27
+ from flwr.common.differential_privacy import (
28
+ add_gaussian_noise_inplace,
29
+ compute_clip_model_update,
30
+ compute_stdv,
31
+ )
32
+ from flwr.common.differential_privacy_constants import (
33
+ CLIENTS_DISCREPANCY_WARNING,
34
+ KEY_CLIPPING_NORM,
35
+ )
36
+ from flwr.server import Grid
37
+
38
+ from .strategy import Strategy
39
+
40
+
41
+ class DifferentialPrivacyFixedClippingBase(Strategy, ABC):
42
+ """Base class for DP strategies with fixed clipping.
43
+
44
+ This class contains common functionality shared between server-side and
45
+ client-side fixed clipping implementations.
46
+
47
+ Parameters
48
+ ----------
49
+ strategy : Strategy
50
+ The strategy to which DP functionalities will be added by this wrapper.
51
+ noise_multiplier : float
52
+ The noise multiplier for the Gaussian mechanism for model updates.
53
+ A value of 1.0 or higher is recommended for strong privacy.
54
+ clipping_norm : float
55
+ The value of the clipping norm.
56
+ num_sampled_clients : int
57
+ The number of clients that are sampled on each round.
58
+ """
59
+
60
+ # pylint: disable=too-many-arguments,too-many-instance-attributes
61
+ def __init__(
62
+ self,
63
+ strategy: Strategy,
64
+ noise_multiplier: float,
65
+ clipping_norm: float,
66
+ num_sampled_clients: int,
67
+ ) -> None:
68
+ super().__init__()
69
+
70
+ self.strategy = strategy
71
+
72
+ if noise_multiplier < 0:
73
+ raise ValueError("The noise multiplier should be a non-negative value.")
74
+
75
+ if clipping_norm <= 0:
76
+ raise ValueError("The clipping norm should be a positive value.")
77
+
78
+ if num_sampled_clients <= 0:
79
+ raise ValueError(
80
+ "The number of sampled clients should be a positive value."
81
+ )
82
+
83
+ self.noise_multiplier = noise_multiplier
84
+ self.clipping_norm = clipping_norm
85
+ self.num_sampled_clients = num_sampled_clients
86
+
87
+ def _add_noise_to_aggregated_arrays(
88
+ self, aggregated_arrays: ArrayRecord
89
+ ) -> ArrayRecord:
90
+ """Add Gaussian noise to aggregated arrays.
91
+
92
+ Parameters
93
+ ----------
94
+ aggregated_arrays : ArrayRecord
95
+ The aggregated arrays to add noise to.
96
+
97
+ Returns
98
+ -------
99
+ ArrayRecord
100
+ The aggregated arrays with noise added.
101
+ """
102
+ aggregated_ndarrays = aggregated_arrays.to_numpy_ndarrays()
103
+ stdv = compute_stdv(
104
+ self.noise_multiplier, self.clipping_norm, self.num_sampled_clients
105
+ )
106
+ add_gaussian_noise_inplace(aggregated_ndarrays, stdv)
107
+
108
+ log(
109
+ INFO,
110
+ "aggregate_fit: central DP noise with %.4f stdev added",
111
+ stdv,
112
+ )
113
+
114
+ return ArrayRecord(
115
+ OrderedDict(
116
+ {
117
+ k: Array(v)
118
+ for k, v in zip(aggregated_arrays.keys(), aggregated_ndarrays)
119
+ }
120
+ )
121
+ )
122
+
123
+ def configure_evaluate(
124
+ self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
125
+ ) -> Iterable[Message]:
126
+ """Configure the next round of federated evaluation."""
127
+ return self.strategy.configure_evaluate(server_round, arrays, config, grid)
128
+
129
+ def aggregate_evaluate(
130
+ self,
131
+ server_round: int,
132
+ replies: Iterable[Message],
133
+ ) -> Optional[MetricRecord]:
134
+ """Aggregate MetricRecords in the received Messages."""
135
+ return self.strategy.aggregate_evaluate(server_round, replies)
136
+
137
+ def summary(self) -> None:
138
+ """Log summary configuration of the strategy."""
139
+ self.strategy.summary()
140
+
141
+
142
+ class DifferentialPrivacyServerSideFixedClipping(DifferentialPrivacyFixedClippingBase):
143
+ """Strategy wrapper for central DP with server-side fixed clipping.
144
+
145
+ Parameters
146
+ ----------
147
+ strategy : Strategy
148
+ The strategy to which DP functionalities will be added by this wrapper.
149
+ noise_multiplier : float
150
+ The noise multiplier for the Gaussian mechanism for model updates.
151
+ A value of 1.0 or higher is recommended for strong privacy.
152
+ clipping_norm : float
153
+ The value of the clipping norm.
154
+ num_sampled_clients : int
155
+ The number of clients that are sampled on each round.
156
+
157
+ Examples
158
+ --------
159
+ Create a strategy::
160
+
161
+ strategy = fl.serverapp.FedAvg( ... )
162
+
163
+ Wrap the strategy with the `DifferentialPrivacyServerSideFixedClipping` wrapper::
164
+
165
+ dp_strategy = DifferentialPrivacyServerSideFixedClipping(
166
+ strategy, cfg.noise_multiplier, cfg.clipping_norm, cfg.num_sampled_clients
167
+ )
168
+ """
169
+
170
+ def __init__(
171
+ self,
172
+ strategy: Strategy,
173
+ noise_multiplier: float,
174
+ clipping_norm: float,
175
+ num_sampled_clients: int,
176
+ ) -> None:
177
+ super().__init__(strategy, noise_multiplier, clipping_norm, num_sampled_clients)
178
+ self.current_arrays: ArrayRecord = ArrayRecord()
179
+
180
+ def __repr__(self) -> str:
181
+ """Compute a string representation of the strategy."""
182
+ return "Differential Privacy Strategy Wrapper (Server-Side Fixed Clipping)"
183
+
184
+ def summary(self) -> None:
185
+ """Log summary configuration of the strategy."""
186
+ log(INFO, "\t├──> DP settings:")
187
+ log(INFO, "\t│\t├── Noise multiplier: %s", self.noise_multiplier)
188
+ log(INFO, "\t│\t└── Clipping norm: %s", self.clipping_norm)
189
+ super().summary()
190
+
191
+ def configure_train(
192
+ self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
193
+ ) -> Iterable[Message]:
194
+ """Configure the next round of training."""
195
+ self.current_arrays = arrays
196
+ return self.strategy.configure_train(server_round, arrays, config, grid)
197
+
198
+ def aggregate_train(
199
+ self,
200
+ server_round: int,
201
+ replies: Iterable[Message],
202
+ ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
203
+ """Aggregate ArrayRecords and MetricRecords in the received Messages."""
204
+ if not validate_replies(replies, self.num_sampled_clients):
205
+ return None, None
206
+
207
+ # Clip arrays in replies
208
+ current_ndarrays = self.current_arrays.to_numpy_ndarrays()
209
+ for reply in replies:
210
+ for arr_name, record in reply.content.array_records.items():
211
+ # Clip
212
+ reply_ndarrays = record.to_numpy_ndarrays()
213
+ compute_clip_model_update(
214
+ param1=reply_ndarrays,
215
+ param2=current_ndarrays,
216
+ clipping_norm=self.clipping_norm,
217
+ )
218
+ # Replace content while preserving keys
219
+ reply.content[arr_name] = ArrayRecord(
220
+ OrderedDict(
221
+ {k: Array(v) for k, v in zip(record.keys(), reply_ndarrays)}
222
+ )
223
+ )
224
+ log(
225
+ INFO,
226
+ "aggregate_fit: parameters are clipped by value: %.4f.",
227
+ self.clipping_norm,
228
+ )
229
+
230
+ # Pass the new parameters for aggregation
231
+ aggregated_arrays, aggregated_metrics = self.strategy.aggregate_train(
232
+ server_round, replies
233
+ )
234
+
235
+ # Add Gaussian noise to the aggregated arrays
236
+ if aggregated_arrays:
237
+ aggregated_arrays = self._add_noise_to_aggregated_arrays(aggregated_arrays)
238
+
239
+ return aggregated_arrays, aggregated_metrics
240
+
241
+
242
+ class DifferentialPrivacyClientSideFixedClipping(DifferentialPrivacyFixedClippingBase):
243
+ """Strategy wrapper for central DP with client-side fixed clipping.
244
+
245
+ Use `fixedclipping_mod` modifier at the client side.
246
+
247
+ In comparison to `DifferentialPrivacyServerSideFixedClipping`,
248
+ which performs clipping on the server-side,
249
+ `DifferentialPrivacyClientSideFixedClipping` expects clipping to happen
250
+ on the client-side, usually by using the built-in `fixedclipping_mod`.
251
+
252
+ Parameters
253
+ ----------
254
+ strategy : Strategy
255
+ The strategy to which DP functionalities will be added by this wrapper.
256
+ noise_multiplier : float
257
+ The noise multiplier for the Gaussian mechanism for model updates.
258
+ A value of 1.0 or higher is recommended for strong privacy.
259
+ clipping_norm : float
260
+ The value of the clipping norm.
261
+ num_sampled_clients : int
262
+ The number of clients that are sampled on each round.
263
+
264
+ Examples
265
+ --------
266
+ Create a strategy::
267
+
268
+ strategy = fl.serverapp.FedAvg(...)
269
+
270
+ Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` wrapper::
271
+
272
+ dp_strategy = DifferentialPrivacyClientSideFixedClipping(
273
+ strategy, cfg.noise_multiplier, cfg.clipping_norm, cfg.num_sampled_clients
274
+ )
275
+
276
+ On the client, add the `fixedclipping_mod` to the client-side mods::
277
+
278
+ app = fl.client.ClientApp(mods=[fixedclipping_mod])
279
+ """
280
+
281
+ def __repr__(self) -> str:
282
+ """Compute a string representation of the strategy."""
283
+ return "Differential Privacy Strategy Wrapper (Client-Side Fixed Clipping)"
284
+
285
+ def summary(self) -> None:
286
+ """Log summary configuration of the strategy."""
287
+ log(INFO, "\t├──> DP settings:")
288
+ log(INFO, "\t│\t├── Noise multiplier: %s", self.noise_multiplier)
289
+ log(INFO, "\t│\t└── Clipping norm: %s", self.clipping_norm)
290
+ super().summary()
291
+
292
+ def configure_train(
293
+ self, server_round: int, arrays: ArrayRecord, config: ConfigRecord, grid: Grid
294
+ ) -> Iterable[Message]:
295
+ """Configure the next round of training."""
296
+ # Inject clipping norm in config
297
+ config[KEY_CLIPPING_NORM] = self.clipping_norm
298
+ # Call parent method
299
+ return self.strategy.configure_train(server_round, arrays, config, grid)
300
+
301
+ def aggregate_train(
302
+ self,
303
+ server_round: int,
304
+ replies: Iterable[Message],
305
+ ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
306
+ """Aggregate ArrayRecords and MetricRecords in the received Messages."""
307
+ if not validate_replies(replies, self.num_sampled_clients):
308
+ return None, None
309
+
310
+ # Aggregate
311
+ aggregated_arrays, aggregated_metrics = self.strategy.aggregate_train(
312
+ server_round, replies
313
+ )
314
+
315
+ # Add Gaussian noise to the aggregated arrays
316
+ if aggregated_arrays:
317
+ aggregated_arrays = self._add_noise_to_aggregated_arrays(aggregated_arrays)
318
+
319
+ return aggregated_arrays, aggregated_metrics
320
+
321
+
322
+ def validate_replies(replies: Iterable[Message], num_sampled_clients: int) -> bool:
323
+ """Validate replies and log errors/warnings.
324
+
325
+ Arguments
326
+ ----------
327
+ replies : Iterable[Message]
328
+ The replies to validate.
329
+ num_sampled_clients : int
330
+ The expected number of sampled clients.
331
+
332
+ Returns
333
+ -------
334
+ bool
335
+ True if replies are valid for aggregation, False otherwise.
336
+ """
337
+ num_errors = 0
338
+ num_replies_with_content = 0
339
+ for msg in replies:
340
+ if msg.has_error():
341
+ log(
342
+ INFO,
343
+ "Received error in reply from node %d: %s",
344
+ msg.metadata.src_node_id,
345
+ msg.error,
346
+ )
347
+ num_errors += 1
348
+ else:
349
+ num_replies_with_content += 1
350
+
351
+ # Errors are not allowed
352
+ if num_errors:
353
+ log(
354
+ INFO,
355
+ "aggregate_train: Some clients reported errors. Skipping aggregation.",
356
+ )
357
+ return False
358
+
359
+ log(
360
+ INFO,
361
+ "aggregate_train: Received %s results and %s failures",
362
+ num_replies_with_content,
363
+ num_errors,
364
+ )
365
+
366
+ if num_replies_with_content != num_sampled_clients:
367
+ log(
368
+ WARNING,
369
+ CLIENTS_DISCREPANCY_WARNING,
370
+ num_replies_with_content,
371
+ num_sampled_clients,
372
+ )
373
+
374
+ return True
@@ -0,0 +1,159 @@
1
+ # Copyright 2025 Flower Labs GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """FedAdagrad [Reddi et al., 2020] strategy.
16
+
17
+ Adaptive Federated Optimization using Adagrad.
18
+
19
+ Paper: arxiv.org/abs/2003.00295
20
+ """
21
+
22
+ from collections import OrderedDict
23
+ from collections.abc import Iterable
24
+ from typing import Callable, Optional
25
+
26
+ import numpy as np
27
+
28
+ from flwr.common import Array, ArrayRecord, Message, MetricRecord, RecordDict
29
+
30
+ from ..exception import AggregationError
31
+ from .fedopt import FedOpt
32
+
33
+
34
+ # pylint: disable=line-too-long
35
+ class FedAdagrad(FedOpt):
36
+ """FedAdagrad strategy - Adaptive Federated Optimization using Adagrad.
37
+
38
+ Implementation based on https://arxiv.org/abs/2003.00295v5
39
+
40
+ Parameters
41
+ ----------
42
+ fraction_train : float (default: 1.0)
43
+ Fraction of nodes used during training. In case `min_train_nodes`
44
+ is larger than `fraction_train * total_connected_nodes`, `min_train_nodes`
45
+ will still be sampled.
46
+ fraction_evaluate : float (default: 1.0)
47
+ Fraction of nodes used during validation. In case `min_evaluate_nodes`
48
+ is larger than `fraction_evaluate * total_connected_nodes`,
49
+ `min_evaluate_nodes` will still be sampled.
50
+ min_train_nodes : int (default: 2)
51
+ Minimum number of nodes used during training.
52
+ min_evaluate_nodes : int (default: 2)
53
+ Minimum number of nodes used during validation.
54
+ min_available_nodes : int (default: 2)
55
+ Minimum number of total nodes in the system.
56
+ weighted_by_key : str (default: "num-examples")
57
+ The key within each MetricRecord whose value is used as the weight when
58
+ computing weighted averages for both ArrayRecords and MetricRecords.
59
+ arrayrecord_key : str (default: "arrays")
60
+ Key used to store the ArrayRecord when constructing Messages.
61
+ configrecord_key : str (default: "config")
62
+ Key used to store the ConfigRecord when constructing Messages.
63
+ train_metrics_aggr_fn : Optional[callable] (default: None)
64
+ Function with signature (list[RecordDict], str) -> MetricRecord,
65
+ used to aggregate MetricRecords from training round replies.
66
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
67
+ average using the provided weight factor key.
68
+ evaluate_metrics_aggr_fn : Optional[callable] (default: None)
69
+ Function with signature (list[RecordDict], str) -> MetricRecord,
70
+ used to aggregate MetricRecords from training round replies.
71
+ If `None`, defaults to `aggregate_metricrecords`, which performs a weighted
72
+ average using the provided weight factor key.
73
+ eta : float, optional
74
+ Server-side learning rate. Defaults to 1e-1.
75
+ eta_l : float, optional
76
+ Client-side learning rate. Defaults to 1e-1.
77
+ tau : float, optional
78
+ Controls the algorithm's degree of adaptability. Defaults to 1e-3.
79
+ """
80
+
81
+ # pylint: disable=too-many-arguments
82
+ def __init__(
83
+ self,
84
+ *,
85
+ fraction_train: float = 1.0,
86
+ fraction_evaluate: float = 1.0,
87
+ min_train_nodes: int = 2,
88
+ min_evaluate_nodes: int = 2,
89
+ min_available_nodes: int = 2,
90
+ weighted_by_key: str = "num-examples",
91
+ arrayrecord_key: str = "arrays",
92
+ configrecord_key: str = "config",
93
+ train_metrics_aggr_fn: Optional[
94
+ Callable[[list[RecordDict], str], MetricRecord]
95
+ ] = None,
96
+ evaluate_metrics_aggr_fn: Optional[
97
+ Callable[[list[RecordDict], str], MetricRecord]
98
+ ] = None,
99
+ eta: float = 1e-1,
100
+ eta_l: float = 1e-1,
101
+ tau: float = 1e-3,
102
+ ) -> None:
103
+ super().__init__(
104
+ fraction_train=fraction_train,
105
+ fraction_evaluate=fraction_evaluate,
106
+ min_train_nodes=min_train_nodes,
107
+ min_evaluate_nodes=min_evaluate_nodes,
108
+ min_available_nodes=min_available_nodes,
109
+ weighted_by_key=weighted_by_key,
110
+ arrayrecord_key=arrayrecord_key,
111
+ configrecord_key=configrecord_key,
112
+ train_metrics_aggr_fn=train_metrics_aggr_fn,
113
+ evaluate_metrics_aggr_fn=evaluate_metrics_aggr_fn,
114
+ eta=eta,
115
+ eta_l=eta_l,
116
+ beta_1=0.0,
117
+ beta_2=0.0,
118
+ tau=tau,
119
+ )
120
+
121
+ def aggregate_train(
122
+ self,
123
+ server_round: int,
124
+ replies: Iterable[Message],
125
+ ) -> tuple[Optional[ArrayRecord], Optional[MetricRecord]]:
126
+ """Aggregate ArrayRecords and MetricRecords in the received Messages."""
127
+ aggregated_arrayrecord, aggregated_metrics = super().aggregate_train(
128
+ server_round, replies
129
+ )
130
+
131
+ if aggregated_arrayrecord is None:
132
+ return aggregated_arrayrecord, aggregated_metrics
133
+
134
+ if self.current_arrays is None:
135
+ reason = (
136
+ "Current arrays not set. Ensure that `configure_train` has been "
137
+ "called before aggregation."
138
+ )
139
+ raise AggregationError(reason=reason)
140
+
141
+ # Compute intermediate variables
142
+ delta_t, m_t, aggregated_ndarrays = self._compute_deltat_and_mt(
143
+ aggregated_arrayrecord
144
+ )
145
+
146
+ # v_t
147
+ if not self.v_t:
148
+ self.v_t = {k: np.zeros_like(v) for k, v in aggregated_ndarrays.items()}
149
+ self.v_t = {k: v + (delta_t[k] ** 2) for k, v in self.v_t.items()}
150
+
151
+ new_arrays = {
152
+ k: x + self.eta * m_t[k] / (np.sqrt(self.v_t[k]) + self.tau)
153
+ for k, x in self.current_arrays.items()
154
+ }
155
+
156
+ return (
157
+ ArrayRecord(OrderedDict({k: Array(v) for k, v in new_arrays.items()})),
158
+ aggregated_metrics,
159
+ )