flwr-nightly 1.8.0.dev20240315__py3-none-any.whl → 1.15.0.dev20250115__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (312) hide show
  1. flwr/cli/app.py +16 -2
  2. flwr/cli/build.py +181 -0
  3. flwr/cli/cli_user_auth_interceptor.py +90 -0
  4. flwr/cli/config_utils.py +343 -0
  5. flwr/cli/example.py +4 -1
  6. flwr/cli/install.py +253 -0
  7. flwr/cli/log.py +182 -0
  8. flwr/{server/superlink/state → cli/login}/__init__.py +4 -10
  9. flwr/cli/login/login.py +88 -0
  10. flwr/cli/ls.py +327 -0
  11. flwr/cli/new/__init__.py +1 -0
  12. flwr/cli/new/new.py +210 -66
  13. flwr/cli/new/templates/app/.gitignore.tpl +163 -0
  14. flwr/cli/new/templates/app/LICENSE.tpl +202 -0
  15. flwr/cli/new/templates/app/README.baseline.md.tpl +127 -0
  16. flwr/cli/new/templates/app/README.flowertune.md.tpl +66 -0
  17. flwr/cli/new/templates/app/README.md.tpl +16 -32
  18. flwr/cli/new/templates/app/code/__init__.baseline.py.tpl +1 -0
  19. flwr/cli/new/templates/app/code/__init__.py.tpl +1 -1
  20. flwr/cli/new/templates/app/code/client.baseline.py.tpl +58 -0
  21. flwr/cli/new/templates/app/code/client.huggingface.py.tpl +55 -0
  22. flwr/cli/new/templates/app/code/client.jax.py.tpl +50 -0
  23. flwr/cli/new/templates/app/code/client.mlx.py.tpl +73 -0
  24. flwr/cli/new/templates/app/code/client.numpy.py.tpl +7 -7
  25. flwr/cli/new/templates/app/code/client.pytorch.py.tpl +30 -21
  26. flwr/cli/new/templates/app/code/client.sklearn.py.tpl +63 -0
  27. flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +57 -1
  28. flwr/cli/new/templates/app/code/dataset.baseline.py.tpl +36 -0
  29. flwr/cli/new/templates/app/code/flwr_tune/__init__.py +15 -0
  30. flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl +126 -0
  31. flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl +87 -0
  32. flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +78 -0
  33. flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl +94 -0
  34. flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl +83 -0
  35. flwr/cli/new/templates/app/code/model.baseline.py.tpl +80 -0
  36. flwr/cli/new/templates/app/code/server.baseline.py.tpl +46 -0
  37. flwr/cli/new/templates/app/code/server.huggingface.py.tpl +38 -0
  38. flwr/cli/new/templates/app/code/server.jax.py.tpl +26 -0
  39. flwr/cli/new/templates/app/code/server.mlx.py.tpl +31 -0
  40. flwr/cli/new/templates/app/code/server.numpy.py.tpl +22 -9
  41. flwr/cli/new/templates/app/code/server.pytorch.py.tpl +21 -18
  42. flwr/cli/new/templates/app/code/server.sklearn.py.tpl +36 -0
  43. flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +29 -1
  44. flwr/cli/new/templates/app/code/strategy.baseline.py.tpl +1 -0
  45. flwr/cli/new/templates/app/code/task.huggingface.py.tpl +102 -0
  46. flwr/cli/new/templates/app/code/task.jax.py.tpl +57 -0
  47. flwr/cli/new/templates/app/code/task.mlx.py.tpl +102 -0
  48. flwr/cli/new/templates/app/code/task.numpy.py.tpl +7 -0
  49. flwr/cli/new/templates/app/code/task.pytorch.py.tpl +29 -24
  50. flwr/cli/new/templates/app/code/task.sklearn.py.tpl +67 -0
  51. flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +53 -0
  52. flwr/cli/new/templates/app/code/utils.baseline.py.tpl +1 -0
  53. flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +138 -0
  54. flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +68 -0
  55. flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +46 -0
  56. flwr/cli/new/templates/app/pyproject.jax.toml.tpl +35 -0
  57. flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +39 -0
  58. flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +25 -12
  59. flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +29 -14
  60. flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +35 -0
  61. flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +29 -14
  62. flwr/cli/run/__init__.py +1 -0
  63. flwr/cli/run/run.py +212 -34
  64. flwr/cli/stop.py +130 -0
  65. flwr/cli/utils.py +240 -5
  66. flwr/client/__init__.py +3 -2
  67. flwr/client/app.py +432 -255
  68. flwr/client/client.py +1 -11
  69. flwr/client/client_app.py +74 -13
  70. flwr/client/clientapp/__init__.py +22 -0
  71. flwr/client/clientapp/app.py +259 -0
  72. flwr/client/clientapp/clientappio_servicer.py +244 -0
  73. flwr/client/clientapp/utils.py +115 -0
  74. flwr/client/dpfedavg_numpy_client.py +7 -8
  75. flwr/client/grpc_adapter_client/__init__.py +15 -0
  76. flwr/client/grpc_adapter_client/connection.py +98 -0
  77. flwr/client/grpc_client/connection.py +21 -7
  78. flwr/client/grpc_rere_client/__init__.py +1 -1
  79. flwr/client/grpc_rere_client/client_interceptor.py +176 -0
  80. flwr/client/grpc_rere_client/connection.py +163 -56
  81. flwr/client/grpc_rere_client/grpc_adapter.py +167 -0
  82. flwr/client/heartbeat.py +74 -0
  83. flwr/client/message_handler/__init__.py +1 -1
  84. flwr/client/message_handler/message_handler.py +10 -11
  85. flwr/client/mod/__init__.py +5 -5
  86. flwr/client/mod/centraldp_mods.py +4 -2
  87. flwr/client/mod/comms_mods.py +5 -4
  88. flwr/client/mod/localdp_mod.py +10 -5
  89. flwr/client/mod/secure_aggregation/__init__.py +1 -1
  90. flwr/client/mod/secure_aggregation/secaggplus_mod.py +26 -26
  91. flwr/client/mod/utils.py +2 -4
  92. flwr/client/nodestate/__init__.py +26 -0
  93. flwr/client/nodestate/in_memory_nodestate.py +38 -0
  94. flwr/client/nodestate/nodestate.py +31 -0
  95. flwr/client/nodestate/nodestate_factory.py +38 -0
  96. flwr/client/numpy_client.py +8 -31
  97. flwr/client/rest_client/__init__.py +1 -1
  98. flwr/client/rest_client/connection.py +199 -176
  99. flwr/client/run_info_store.py +112 -0
  100. flwr/client/supernode/__init__.py +24 -0
  101. flwr/client/supernode/app.py +321 -0
  102. flwr/client/typing.py +1 -0
  103. flwr/common/__init__.py +17 -11
  104. flwr/common/address.py +47 -3
  105. flwr/common/args.py +153 -0
  106. flwr/common/auth_plugin/__init__.py +24 -0
  107. flwr/common/auth_plugin/auth_plugin.py +121 -0
  108. flwr/common/config.py +243 -0
  109. flwr/common/constant.py +135 -1
  110. flwr/common/context.py +32 -2
  111. flwr/common/date.py +22 -4
  112. flwr/common/differential_privacy.py +2 -2
  113. flwr/common/dp.py +2 -4
  114. flwr/common/exit_handlers.py +3 -3
  115. flwr/common/grpc.py +164 -5
  116. flwr/common/logger.py +230 -12
  117. flwr/common/message.py +191 -106
  118. flwr/common/object_ref.py +179 -44
  119. flwr/common/pyproject.py +1 -0
  120. flwr/common/record/__init__.py +2 -1
  121. flwr/common/record/configsrecord.py +58 -18
  122. flwr/common/record/metricsrecord.py +57 -17
  123. flwr/common/record/parametersrecord.py +88 -20
  124. flwr/common/record/recordset.py +153 -30
  125. flwr/common/record/typeddict.py +30 -55
  126. flwr/common/recordset_compat.py +31 -12
  127. flwr/common/retry_invoker.py +123 -30
  128. flwr/common/secure_aggregation/__init__.py +1 -1
  129. flwr/common/secure_aggregation/crypto/__init__.py +1 -1
  130. flwr/common/secure_aggregation/crypto/shamir.py +11 -11
  131. flwr/common/secure_aggregation/crypto/symmetric_encryption.py +68 -4
  132. flwr/common/secure_aggregation/ndarrays_arithmetic.py +17 -17
  133. flwr/common/secure_aggregation/quantization.py +8 -8
  134. flwr/common/secure_aggregation/secaggplus_constants.py +1 -1
  135. flwr/common/secure_aggregation/secaggplus_utils.py +10 -12
  136. flwr/common/serde.py +304 -23
  137. flwr/common/telemetry.py +65 -29
  138. flwr/common/typing.py +120 -19
  139. flwr/common/version.py +17 -3
  140. flwr/proto/clientappio_pb2.py +45 -0
  141. flwr/proto/clientappio_pb2.pyi +132 -0
  142. flwr/proto/clientappio_pb2_grpc.py +135 -0
  143. flwr/proto/clientappio_pb2_grpc.pyi +53 -0
  144. flwr/proto/exec_pb2.py +62 -0
  145. flwr/proto/exec_pb2.pyi +212 -0
  146. flwr/proto/exec_pb2_grpc.py +237 -0
  147. flwr/proto/exec_pb2_grpc.pyi +93 -0
  148. flwr/proto/fab_pb2.py +31 -0
  149. flwr/proto/fab_pb2.pyi +65 -0
  150. flwr/proto/fab_pb2_grpc.py +4 -0
  151. flwr/proto/fab_pb2_grpc.pyi +4 -0
  152. flwr/proto/fleet_pb2.py +42 -23
  153. flwr/proto/fleet_pb2.pyi +123 -1
  154. flwr/proto/fleet_pb2_grpc.py +170 -0
  155. flwr/proto/fleet_pb2_grpc.pyi +61 -0
  156. flwr/proto/grpcadapter_pb2.py +32 -0
  157. flwr/proto/grpcadapter_pb2.pyi +43 -0
  158. flwr/proto/grpcadapter_pb2_grpc.py +66 -0
  159. flwr/proto/grpcadapter_pb2_grpc.pyi +24 -0
  160. flwr/proto/log_pb2.py +29 -0
  161. flwr/proto/log_pb2.pyi +39 -0
  162. flwr/proto/log_pb2_grpc.py +4 -0
  163. flwr/proto/log_pb2_grpc.pyi +4 -0
  164. flwr/proto/message_pb2.py +41 -0
  165. flwr/proto/message_pb2.pyi +128 -0
  166. flwr/proto/message_pb2_grpc.py +4 -0
  167. flwr/proto/message_pb2_grpc.pyi +4 -0
  168. flwr/proto/node_pb2.py +2 -2
  169. flwr/proto/node_pb2.pyi +1 -4
  170. flwr/proto/recordset_pb2.py +35 -33
  171. flwr/proto/recordset_pb2.pyi +40 -14
  172. flwr/proto/run_pb2.py +64 -0
  173. flwr/proto/run_pb2.pyi +268 -0
  174. flwr/proto/run_pb2_grpc.py +4 -0
  175. flwr/proto/run_pb2_grpc.pyi +4 -0
  176. flwr/proto/serverappio_pb2.py +52 -0
  177. flwr/proto/{driver_pb2.pyi → serverappio_pb2.pyi} +62 -20
  178. flwr/proto/serverappio_pb2_grpc.py +410 -0
  179. flwr/proto/serverappio_pb2_grpc.pyi +160 -0
  180. flwr/proto/simulationio_pb2.py +38 -0
  181. flwr/proto/simulationio_pb2.pyi +65 -0
  182. flwr/proto/simulationio_pb2_grpc.py +239 -0
  183. flwr/proto/simulationio_pb2_grpc.pyi +94 -0
  184. flwr/proto/task_pb2.py +7 -8
  185. flwr/proto/task_pb2.pyi +8 -5
  186. flwr/proto/transport_pb2.py +8 -8
  187. flwr/proto/transport_pb2.pyi +9 -6
  188. flwr/server/__init__.py +2 -10
  189. flwr/server/app.py +579 -402
  190. flwr/server/client_manager.py +8 -6
  191. flwr/server/compat/app.py +6 -62
  192. flwr/server/compat/app_utils.py +14 -9
  193. flwr/server/compat/driver_client_proxy.py +25 -59
  194. flwr/server/compat/legacy_context.py +5 -4
  195. flwr/server/driver/__init__.py +2 -0
  196. flwr/server/driver/driver.py +36 -131
  197. flwr/server/driver/grpc_driver.py +220 -81
  198. flwr/server/driver/inmemory_driver.py +183 -0
  199. flwr/server/history.py +28 -29
  200. flwr/server/run_serverapp.py +15 -126
  201. flwr/server/server.py +50 -44
  202. flwr/server/server_app.py +59 -10
  203. flwr/server/serverapp/__init__.py +22 -0
  204. flwr/server/serverapp/app.py +256 -0
  205. flwr/server/serverapp_components.py +52 -0
  206. flwr/server/strategy/__init__.py +2 -2
  207. flwr/server/strategy/aggregate.py +37 -23
  208. flwr/server/strategy/bulyan.py +9 -9
  209. flwr/server/strategy/dp_adaptive_clipping.py +25 -25
  210. flwr/server/strategy/dp_fixed_clipping.py +23 -22
  211. flwr/server/strategy/dpfedavg_adaptive.py +8 -8
  212. flwr/server/strategy/dpfedavg_fixed.py +13 -12
  213. flwr/server/strategy/fault_tolerant_fedavg.py +11 -11
  214. flwr/server/strategy/fedadagrad.py +9 -9
  215. flwr/server/strategy/fedadam.py +20 -10
  216. flwr/server/strategy/fedavg.py +16 -16
  217. flwr/server/strategy/fedavg_android.py +17 -17
  218. flwr/server/strategy/fedavgm.py +9 -9
  219. flwr/server/strategy/fedmedian.py +5 -5
  220. flwr/server/strategy/fedopt.py +6 -6
  221. flwr/server/strategy/fedprox.py +7 -7
  222. flwr/server/strategy/fedtrimmedavg.py +8 -8
  223. flwr/server/strategy/fedxgb_bagging.py +12 -12
  224. flwr/server/strategy/fedxgb_cyclic.py +10 -10
  225. flwr/server/strategy/fedxgb_nn_avg.py +6 -6
  226. flwr/server/strategy/fedyogi.py +9 -9
  227. flwr/server/strategy/krum.py +9 -9
  228. flwr/server/strategy/qfedavg.py +16 -16
  229. flwr/server/strategy/strategy.py +10 -10
  230. flwr/server/superlink/driver/__init__.py +2 -2
  231. flwr/server/superlink/driver/serverappio_grpc.py +61 -0
  232. flwr/server/superlink/driver/serverappio_servicer.py +361 -0
  233. flwr/server/superlink/ffs/__init__.py +24 -0
  234. flwr/server/superlink/ffs/disk_ffs.py +108 -0
  235. flwr/server/superlink/ffs/ffs.py +79 -0
  236. flwr/server/superlink/ffs/ffs_factory.py +47 -0
  237. flwr/server/superlink/fleet/__init__.py +1 -1
  238. flwr/server/superlink/fleet/grpc_adapter/__init__.py +15 -0
  239. flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py +162 -0
  240. flwr/server/superlink/fleet/grpc_bidi/__init__.py +1 -1
  241. flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py +4 -2
  242. flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py +3 -2
  243. flwr/server/superlink/fleet/grpc_bidi/grpc_client_proxy.py +1 -1
  244. flwr/server/superlink/fleet/grpc_bidi/grpc_server.py +5 -154
  245. flwr/server/superlink/fleet/grpc_rere/__init__.py +1 -1
  246. flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py +120 -13
  247. flwr/server/superlink/fleet/grpc_rere/server_interceptor.py +228 -0
  248. flwr/server/superlink/fleet/message_handler/__init__.py +1 -1
  249. flwr/server/superlink/fleet/message_handler/message_handler.py +156 -13
  250. flwr/server/superlink/fleet/rest_rere/__init__.py +1 -1
  251. flwr/server/superlink/fleet/rest_rere/rest_api.py +119 -81
  252. flwr/server/superlink/fleet/vce/__init__.py +1 -0
  253. flwr/server/superlink/fleet/vce/backend/__init__.py +4 -4
  254. flwr/server/superlink/fleet/vce/backend/backend.py +8 -9
  255. flwr/server/superlink/fleet/vce/backend/raybackend.py +87 -68
  256. flwr/server/superlink/fleet/vce/vce_api.py +208 -146
  257. flwr/server/superlink/linkstate/__init__.py +28 -0
  258. flwr/server/superlink/linkstate/in_memory_linkstate.py +569 -0
  259. flwr/server/superlink/linkstate/linkstate.py +376 -0
  260. flwr/server/superlink/{state/state_factory.py → linkstate/linkstate_factory.py} +19 -10
  261. flwr/server/superlink/linkstate/sqlite_linkstate.py +1196 -0
  262. flwr/server/superlink/linkstate/utils.py +399 -0
  263. flwr/server/superlink/simulation/__init__.py +15 -0
  264. flwr/server/superlink/simulation/simulationio_grpc.py +65 -0
  265. flwr/server/superlink/simulation/simulationio_servicer.py +186 -0
  266. flwr/server/superlink/utils.py +65 -0
  267. flwr/server/typing.py +2 -0
  268. flwr/server/utils/__init__.py +1 -1
  269. flwr/server/utils/tensorboard.py +5 -5
  270. flwr/server/utils/validator.py +40 -45
  271. flwr/server/workflow/default_workflows.py +70 -26
  272. flwr/server/workflow/secure_aggregation/secagg_workflow.py +1 -0
  273. flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +40 -27
  274. flwr/simulation/__init__.py +12 -5
  275. flwr/simulation/app.py +247 -315
  276. flwr/simulation/legacy_app.py +404 -0
  277. flwr/simulation/ray_transport/__init__.py +1 -1
  278. flwr/simulation/ray_transport/ray_actor.py +42 -67
  279. flwr/simulation/ray_transport/ray_client_proxy.py +37 -17
  280. flwr/simulation/ray_transport/utils.py +1 -0
  281. flwr/simulation/run_simulation.py +306 -163
  282. flwr/simulation/simulationio_connection.py +89 -0
  283. flwr/superexec/__init__.py +15 -0
  284. flwr/superexec/app.py +59 -0
  285. flwr/superexec/deployment.py +188 -0
  286. flwr/superexec/exec_grpc.py +80 -0
  287. flwr/superexec/exec_servicer.py +231 -0
  288. flwr/superexec/exec_user_auth_interceptor.py +101 -0
  289. flwr/superexec/executor.py +96 -0
  290. flwr/superexec/simulation.py +124 -0
  291. {flwr_nightly-1.8.0.dev20240315.dist-info → flwr_nightly-1.15.0.dev20250115.dist-info}/METADATA +33 -26
  292. flwr_nightly-1.15.0.dev20250115.dist-info/RECORD +328 -0
  293. flwr_nightly-1.15.0.dev20250115.dist-info/entry_points.txt +12 -0
  294. flwr/cli/flower_toml.py +0 -140
  295. flwr/cli/new/templates/app/flower.toml.tpl +0 -13
  296. flwr/cli/new/templates/app/requirements.numpy.txt.tpl +0 -2
  297. flwr/cli/new/templates/app/requirements.pytorch.txt.tpl +0 -4
  298. flwr/cli/new/templates/app/requirements.tensorflow.txt.tpl +0 -4
  299. flwr/client/node_state.py +0 -48
  300. flwr/client/node_state_tests.py +0 -65
  301. flwr/proto/driver_pb2.py +0 -44
  302. flwr/proto/driver_pb2_grpc.py +0 -169
  303. flwr/proto/driver_pb2_grpc.pyi +0 -66
  304. flwr/server/superlink/driver/driver_grpc.py +0 -54
  305. flwr/server/superlink/driver/driver_servicer.py +0 -129
  306. flwr/server/superlink/state/in_memory_state.py +0 -230
  307. flwr/server/superlink/state/sqlite_state.py +0 -630
  308. flwr/server/superlink/state/state.py +0 -154
  309. flwr_nightly-1.8.0.dev20240315.dist-info/RECORD +0 -211
  310. flwr_nightly-1.8.0.dev20240315.dist-info/entry_points.txt +0 -9
  311. {flwr_nightly-1.8.0.dev20240315.dist-info → flwr_nightly-1.15.0.dev20250115.dist-info}/LICENSE +0 -0
  312. {flwr_nightly-1.8.0.dev20240315.dist-info → flwr_nightly-1.15.0.dev20250115.dist-info}/WHEEL +0 -0
@@ -0,0 +1,87 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ from flwr_datasets import FederatedDataset
4
+ from flwr_datasets.partitioner import IidPartitioner
5
+ from transformers import AutoTokenizer
6
+ from trl import DataCollatorForCompletionOnlyLM
7
+
8
+ FDS = None # Cache FederatedDataset
9
+
10
+
11
+ def formatting_prompts_func(example):
12
+ """Construct prompts."""
13
+ output_texts = []
14
+ # Constructing a standard Alpaca
15
+ # (https://github.com/tatsu-lab/stanford_alpaca#data-release) prompt
16
+ mssg = (
17
+ "Below is an instruction that describes a task. "
18
+ "Write a response that appropriately completes the request."
19
+ )
20
+ for i in range(len(example["instruction"])):
21
+ text = (
22
+ f"{mssg}\n### Instruction:\n{example['instruction'][i]}\n"
23
+ f"### Response: {example['response'][i]}"
24
+ )
25
+ output_texts.append(text)
26
+ return output_texts
27
+
28
+
29
+ def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str):
30
+ """Get tokenizer, data_collator and prompt formatting."""
31
+ tokenizer = AutoTokenizer.from_pretrained(
32
+ model_name, use_fast=True, padding_side="right"
33
+ )
34
+ tokenizer.pad_token = tokenizer.eos_token
35
+ response_template_with_context = "\n### Response:" # alpaca response tag
36
+ response_template_ids = tokenizer.encode(
37
+ response_template_with_context, add_special_tokens=False
38
+ )[2:]
39
+ data_collator = DataCollatorForCompletionOnlyLM(
40
+ response_template_ids, tokenizer=tokenizer
41
+ )
42
+
43
+ return tokenizer, data_collator, formatting_prompts_func
44
+
45
+
46
+ def formatting(dataset):
47
+ """Format dataset."""
48
+ dataset["instruction"] = dataset["instruction"] + " " + dataset["input"]
49
+ return dataset
50
+
51
+
52
+ def reformat(dataset, llm_task):
53
+ """Reformat datasets."""
54
+ dataset = dataset.rename_column("output", "response")
55
+ if llm_task in ["finance", "code"]:
56
+ dataset = dataset.map(formatting, remove_columns=["input"])
57
+ if llm_task == "medical":
58
+ dataset = dataset.remove_columns(["instruction"])
59
+ dataset = dataset.rename_column("input", "instruction")
60
+ return dataset
61
+
62
+
63
+ def load_data(partition_id: int, num_partitions: int, dataset_name: str):
64
+ """Load partition data."""
65
+ # Only initialize `FederatedDataset` once
66
+ global FDS
67
+ if FDS is None:
68
+ partitioner = IidPartitioner(num_partitions=num_partitions)
69
+ FDS = FederatedDataset(
70
+ dataset=dataset_name,
71
+ partitioners={"train": partitioner},
72
+ )
73
+ client_trainset = FDS.load_partition(partition_id, "train")
74
+ client_trainset = reformat(client_trainset, llm_task="$llm_challenge_str")
75
+ return client_trainset
76
+
77
+
78
+ def replace_keys(input_dict, match="-", target="_"):
79
+ """Recursively replace match string with target string in dictionary keys."""
80
+ new_dict = {}
81
+ for key, value in input_dict.items():
82
+ new_key = key.replace(match, target)
83
+ if isinstance(value, dict):
84
+ new_dict[new_key] = replace_keys(value, match, target)
85
+ else:
86
+ new_dict[new_key] = value
87
+ return new_dict
@@ -0,0 +1,78 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ import math
4
+
5
+ import torch
6
+ from omegaconf import DictConfig
7
+ from collections import OrderedDict
8
+ from peft import (
9
+ LoraConfig,
10
+ get_peft_model,
11
+ get_peft_model_state_dict,
12
+ set_peft_model_state_dict,
13
+ )
14
+ from peft.utils import prepare_model_for_kbit_training
15
+ from transformers import AutoModelForCausalLM, BitsAndBytesConfig
16
+
17
+ from flwr.common.typing import NDArrays
18
+
19
+
20
+ def cosine_annealing(
21
+ current_round: int,
22
+ total_round: int,
23
+ lrate_max: float = 0.001,
24
+ lrate_min: float = 0.0,
25
+ ) -> float:
26
+ """Implement cosine annealing learning rate schedule."""
27
+ cos_inner = math.pi * current_round / total_round
28
+ return lrate_min + 0.5 * (lrate_max - lrate_min) * (1 + math.cos(cos_inner))
29
+
30
+
31
+ def get_model(model_cfg: DictConfig):
32
+ """Load model with appropriate quantization config and other optimizations.
33
+ """
34
+ if model_cfg.quantization == 4:
35
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
36
+ elif model_cfg.quantization == 8:
37
+ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
38
+ else:
39
+ raise ValueError(
40
+ f"Use 4-bit or 8-bit quantization. You passed: {model_cfg.quantization}/"
41
+ )
42
+
43
+ model = AutoModelForCausalLM.from_pretrained(
44
+ model_cfg.name,
45
+ quantization_config=quantization_config,
46
+ torch_dtype=torch.bfloat16,
47
+ low_cpu_mem_usage=True,
48
+ )
49
+
50
+ model = prepare_model_for_kbit_training(
51
+ model, use_gradient_checkpointing=model_cfg.gradient_checkpointing
52
+ )
53
+
54
+ peft_config = LoraConfig(
55
+ r=model_cfg.lora.peft_lora_r,
56
+ lora_alpha=model_cfg.lora.peft_lora_alpha,
57
+ lora_dropout=0.075,
58
+ task_type="CAUSAL_LM",
59
+ )
60
+
61
+ if model_cfg.gradient_checkpointing:
62
+ model.config.use_cache = False
63
+
64
+ return get_peft_model(model, peft_config)
65
+
66
+
67
+ def set_parameters(model, parameters: NDArrays) -> None:
68
+ """Change the parameters of the model using the given ones."""
69
+ peft_state_dict_keys = get_peft_model_state_dict(model).keys()
70
+ params_dict = zip(peft_state_dict_keys, parameters)
71
+ state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
72
+ set_peft_model_state_dict(model, state_dict)
73
+
74
+
75
+ def get_parameters(model) -> NDArrays:
76
+ """Return the parameters of the current net."""
77
+ state_dict = get_peft_model_state_dict(model)
78
+ return [val.cpu().numpy() for _, val in state_dict.items()]
@@ -0,0 +1,94 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ import os
4
+ from datetime import datetime
5
+
6
+ from flwr.common import Context, ndarrays_to_parameters
7
+ from flwr.common.config import unflatten_dict
8
+ from flwr.server import ServerApp, ServerAppComponents, ServerConfig
9
+ from omegaconf import DictConfig
10
+
11
+ from $import_name.models import get_model, get_parameters, set_parameters
12
+ from $import_name.dataset import replace_keys
13
+ from $import_name.strategy import FlowerTuneLlm
14
+
15
+
16
+ # Get function that will be executed by the strategy's evaluate() method
17
+ # Here we use it to save global model checkpoints
18
+ def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path):
19
+ """Return an evaluation function for saving global model."""
20
+
21
+ def evaluate(server_round: int, parameters, config):
22
+ # Save model
23
+ if server_round != 0 and (
24
+ server_round == total_round or server_round % save_every_round == 0
25
+ ):
26
+ # Init model
27
+ model = get_model(model_cfg)
28
+ set_parameters(model, parameters)
29
+
30
+ model.save_pretrained(f"{save_path}/peft_{server_round}")
31
+
32
+ return 0.0, {}
33
+
34
+ return evaluate
35
+
36
+
37
+ def get_on_fit_config(save_path):
38
+ """Return a function that will be used to construct the config that the
39
+ client's fit() method will receive."""
40
+
41
+ def fit_config_fn(server_round: int):
42
+ fit_config = {}
43
+ fit_config["current_round"] = server_round
44
+ fit_config["save_path"] = save_path
45
+ return fit_config
46
+
47
+ return fit_config_fn
48
+
49
+
50
+ def fit_weighted_average(metrics):
51
+ """Aggregate (federated) evaluation metrics."""
52
+ # Multiply accuracy of each client by number of examples used
53
+ losses = [num_examples * m["train_loss"] for num_examples, m in metrics]
54
+ examples = [num_examples for num_examples, _ in metrics]
55
+
56
+ # Aggregate and return custom metric (weighted average)
57
+ return {"train_loss": sum(losses) / sum(examples)}
58
+
59
+
60
+ def server_fn(context: Context):
61
+ """Construct components that set the ServerApp behaviour."""
62
+ # Create output directory given current timestamp
63
+ current_time = datetime.now()
64
+ folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S")
65
+ save_path = os.path.join(os.getcwd(), f"results/{folder_name}")
66
+ os.makedirs(save_path, exist_ok=True)
67
+
68
+ # Read from config
69
+ num_rounds = context.run_config["num-server-rounds"]
70
+ cfg = DictConfig(replace_keys(unflatten_dict(context.run_config)))
71
+
72
+ # Get initial model weights
73
+ init_model = get_model(cfg.model)
74
+ init_model_parameters = get_parameters(init_model)
75
+ init_model_parameters = ndarrays_to_parameters(init_model_parameters)
76
+
77
+ # Define strategy
78
+ strategy = FlowerTuneLlm(
79
+ fraction_fit=cfg.strategy.fraction_fit,
80
+ fraction_evaluate=cfg.strategy.fraction_evaluate,
81
+ on_fit_config_fn=get_on_fit_config(save_path),
82
+ fit_metrics_aggregation_fn=fit_weighted_average,
83
+ initial_parameters=init_model_parameters,
84
+ evaluate_fn=get_evaluate_fn(
85
+ cfg.model, cfg.train.save_every_round, num_rounds, save_path
86
+ ),
87
+ )
88
+ config = ServerConfig(num_rounds=num_rounds)
89
+
90
+ return ServerAppComponents(strategy=strategy, config=config)
91
+
92
+
93
+ # Flower ServerApp
94
+ app = ServerApp(server_fn=server_fn)
@@ -0,0 +1,83 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ from io import BytesIO
4
+ from logging import INFO, WARN
5
+ from typing import List, Tuple, Union
6
+
7
+ from flwr.common import FitIns, FitRes, Parameters, log, parameters_to_ndarrays
8
+ from flwr.server.client_manager import ClientManager
9
+ from flwr.server.client_proxy import ClientProxy
10
+ from flwr.server.strategy import FedAvg
11
+
12
+
13
+ class FlowerTuneLlm(FedAvg):
14
+ """Customised FedAvg strategy implementation.
15
+
16
+ This class behaves just like FedAvg but also tracks the communication
17
+ costs associated with `fit` over FL rounds.
18
+ """
19
+ def __init__(self, **kwargs):
20
+ super().__init__(**kwargs)
21
+ self.comm_tracker = CommunicationTracker()
22
+
23
+ def configure_fit(
24
+ self, server_round: int, parameters: Parameters, client_manager: ClientManager
25
+ ):
26
+ """Configure the next round of training."""
27
+ return_clients = super().configure_fit(server_round, parameters, client_manager)
28
+
29
+ # Test communication costs
30
+ fit_ins_list = [fit_ins for _, fit_ins in return_clients]
31
+ self.comm_tracker.track(fit_ins_list)
32
+
33
+ return return_clients
34
+
35
+ def aggregate_fit(
36
+ self,
37
+ server_round: int,
38
+ results: List[Tuple[ClientProxy, FitRes]],
39
+ failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],
40
+ ):
41
+ """Aggregate fit results using weighted average."""
42
+ # Test communication costs
43
+ fit_res_list = [fit_res for _, fit_res in results]
44
+ self.comm_tracker.track(fit_res_list)
45
+
46
+ parameters_aggregated, metrics_aggregated = super().aggregate_fit(
47
+ server_round, results, failures
48
+ )
49
+
50
+ return parameters_aggregated, metrics_aggregated
51
+
52
+
53
+ class CommunicationTracker:
54
+ """Communication costs tracker over FL rounds."""
55
+ def __init__(self):
56
+ self.curr_comm_cost = 0.0
57
+
58
+ @staticmethod
59
+ def _compute_bytes(parameters):
60
+ return sum([BytesIO(t).getbuffer().nbytes for t in parameters.tensors])
61
+
62
+ def track(self, fit_list: List[Union[FitIns, FitRes]]):
63
+ size_bytes_list = [
64
+ self._compute_bytes(fit_ele.parameters)
65
+ for fit_ele in fit_list
66
+ ]
67
+ comm_cost = sum(size_bytes_list) / 1024**2
68
+
69
+ self.curr_comm_cost += comm_cost
70
+ log(
71
+ INFO,
72
+ "Communication budget: used %.2f MB (+%.2f MB this round) / 200,000 MB",
73
+ self.curr_comm_cost,
74
+ comm_cost,
75
+ )
76
+
77
+ if self.curr_comm_cost > 2e5:
78
+ log(
79
+ WARN,
80
+ "The accumulated communication cost has exceeded 200,000 MB. "
81
+ "Please consider reducing it if you plan to participate "
82
+ "FlowerTune LLM Leaderboard.",
83
+ )
@@ -0,0 +1,80 @@
1
+ """$project_name: A Flower Baseline."""
2
+
3
+ from collections import OrderedDict
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import nn
8
+
9
+
10
+ class Net(nn.Module):
11
+ """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')."""
12
+
13
+ def __init__(self):
14
+ super().__init__()
15
+ self.conv1 = nn.Conv2d(3, 6, 5)
16
+ self.pool = nn.MaxPool2d(2, 2)
17
+ self.conv2 = nn.Conv2d(6, 16, 5)
18
+ self.fc1 = nn.Linear(16 * 5 * 5, 120)
19
+ self.fc2 = nn.Linear(120, 84)
20
+ self.fc3 = nn.Linear(84, 10)
21
+
22
+ def forward(self, x):
23
+ """Do forward."""
24
+ x = self.pool(F.relu(self.conv1(x)))
25
+ x = self.pool(F.relu(self.conv2(x)))
26
+ x = x.view(-1, 16 * 5 * 5)
27
+ x = F.relu(self.fc1(x))
28
+ x = F.relu(self.fc2(x))
29
+ return self.fc3(x)
30
+
31
+
32
+ def train(net, trainloader, epochs, device):
33
+ """Train the model on the training set."""
34
+ net.to(device) # move model to GPU if available
35
+ criterion = torch.nn.CrossEntropyLoss()
36
+ criterion.to(device)
37
+ optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9)
38
+ net.train()
39
+ running_loss = 0.0
40
+ for _ in range(epochs):
41
+ for batch in trainloader:
42
+ images = batch["img"]
43
+ labels = batch["label"]
44
+ optimizer.zero_grad()
45
+ loss = criterion(net(images.to(device)), labels.to(device))
46
+ loss.backward()
47
+ optimizer.step()
48
+ running_loss += loss.item()
49
+
50
+ avg_trainloss = running_loss / len(trainloader)
51
+ return avg_trainloss
52
+
53
+
54
+ def test(net, testloader, device):
55
+ """Validate the model on the test set."""
56
+ net.to(device)
57
+ criterion = torch.nn.CrossEntropyLoss()
58
+ correct, loss = 0, 0.0
59
+ with torch.no_grad():
60
+ for batch in testloader:
61
+ images = batch["img"].to(device)
62
+ labels = batch["label"].to(device)
63
+ outputs = net(images)
64
+ loss += criterion(outputs, labels).item()
65
+ correct += (torch.max(outputs.data, 1)[1] == labels).sum().item()
66
+ accuracy = correct / len(testloader.dataset)
67
+ loss = loss / len(testloader)
68
+ return loss, accuracy
69
+
70
+
71
+ def get_weights(net):
72
+ """Extract model parameters as numpy arrays from state_dict."""
73
+ return [val.cpu().numpy() for _, val in net.state_dict().items()]
74
+
75
+
76
+ def set_weights(net, parameters):
77
+ """Apply parameters to an existing model."""
78
+ params_dict = zip(net.state_dict().keys(), parameters)
79
+ state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
80
+ net.load_state_dict(state_dict, strict=True)
@@ -0,0 +1,46 @@
1
+ """$project_name: A Flower Baseline."""
2
+
3
+ from typing import List, Tuple
4
+
5
+ from flwr.common import Context, Metrics, ndarrays_to_parameters
6
+ from flwr.server import ServerApp, ServerAppComponents, ServerConfig
7
+ from flwr.server.strategy import FedAvg
8
+ from $import_name.model import Net, get_weights
9
+
10
+
11
+ # Define metric aggregation function
12
+ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:
13
+ """Do weighted average of accuracy metric."""
14
+ # Multiply accuracy of each client by number of examples used
15
+ accuracies = [num_examples * float(m["accuracy"]) for num_examples, m in metrics]
16
+ examples = [num_examples for num_examples, _ in metrics]
17
+
18
+ # Aggregate and return custom metric (weighted average)
19
+ return {"accuracy": sum(accuracies) / sum(examples)}
20
+
21
+
22
+ def server_fn(context: Context):
23
+ """Construct components that set the ServerApp behaviour."""
24
+ # Read from config
25
+ num_rounds = context.run_config["num-server-rounds"]
26
+ fraction_fit = context.run_config["fraction-fit"]
27
+
28
+ # Initialize model parameters
29
+ ndarrays = get_weights(Net())
30
+ parameters = ndarrays_to_parameters(ndarrays)
31
+
32
+ # Define strategy
33
+ strategy = FedAvg(
34
+ fraction_fit=float(fraction_fit),
35
+ fraction_evaluate=1.0,
36
+ min_available_clients=2,
37
+ initial_parameters=parameters,
38
+ evaluate_metrics_aggregation_fn=weighted_average,
39
+ )
40
+ config = ServerConfig(num_rounds=int(num_rounds))
41
+
42
+ return ServerAppComponents(strategy=strategy, config=config)
43
+
44
+
45
+ # Create ServerApp
46
+ app = ServerApp(server_fn=server_fn)
@@ -0,0 +1,38 @@
1
+ """$project_name: A Flower / $framework_str app."""
2
+
3
+ from flwr.common import Context, ndarrays_to_parameters
4
+ from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
+ from flwr.server.strategy import FedAvg
6
+ from transformers import AutoModelForSequenceClassification
7
+
8
+ from $import_name.task import get_weights
9
+
10
+
11
+ def server_fn(context: Context):
12
+ # Read from config
13
+ num_rounds = context.run_config["num-server-rounds"]
14
+ fraction_fit = context.run_config["fraction-fit"]
15
+
16
+ # Initialize global model
17
+ model_name = context.run_config["model-name"]
18
+ num_labels = context.run_config["num-labels"]
19
+ net = AutoModelForSequenceClassification.from_pretrained(
20
+ model_name, num_labels=num_labels
21
+ )
22
+
23
+ weights = get_weights(net)
24
+ initial_parameters = ndarrays_to_parameters(weights)
25
+
26
+ # Define strategy
27
+ strategy = FedAvg(
28
+ fraction_fit=fraction_fit,
29
+ fraction_evaluate=1.0,
30
+ initial_parameters=initial_parameters,
31
+ )
32
+ config = ServerConfig(num_rounds=num_rounds)
33
+
34
+ return ServerAppComponents(strategy=strategy, config=config)
35
+
36
+
37
+ # Create ServerApp
38
+ app = ServerApp(server_fn=server_fn)
@@ -0,0 +1,26 @@
1
+ """$project_name: A Flower / $framework_str app."""
2
+
3
+ from flwr.common import Context, ndarrays_to_parameters
4
+ from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
+ from flwr.server.strategy import FedAvg
6
+ from $import_name.task import get_params, load_model
7
+
8
+
9
+ def server_fn(context: Context):
10
+ # Read from config
11
+ num_rounds = context.run_config["num-server-rounds"]
12
+ input_dim = context.run_config["input-dim"]
13
+
14
+ # Initialize global model
15
+ params = get_params(load_model((input_dim,)))
16
+ initial_parameters = ndarrays_to_parameters(params)
17
+
18
+ # Define strategy
19
+ strategy = FedAvg(initial_parameters=initial_parameters)
20
+ config = ServerConfig(num_rounds=num_rounds)
21
+
22
+ return ServerAppComponents(strategy=strategy, config=config)
23
+
24
+
25
+ # Create ServerApp
26
+ app = ServerApp(server_fn=server_fn)
@@ -0,0 +1,31 @@
1
+ """$project_name: A Flower / $framework_str app."""
2
+
3
+ from flwr.common import Context, ndarrays_to_parameters
4
+ from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
+ from flwr.server.strategy import FedAvg
6
+ from $import_name.task import MLP, get_params
7
+
8
+
9
+ def server_fn(context: Context):
10
+ # Read from config
11
+ num_rounds = context.run_config["num-server-rounds"]
12
+
13
+ num_classes = 10
14
+ num_layers = context.run_config["num-layers"]
15
+ input_dim = context.run_config["input-dim"]
16
+ hidden_dim = context.run_config["hidden-dim"]
17
+
18
+ # Initialize global model
19
+ model = MLP(num_layers, input_dim, hidden_dim, num_classes)
20
+ params = get_params(model)
21
+ initial_parameters = ndarrays_to_parameters(params)
22
+
23
+ # Define strategy
24
+ strategy = FedAvg(initial_parameters=initial_parameters)
25
+ config = ServerConfig(num_rounds=num_rounds)
26
+
27
+ return ServerAppComponents(strategy=strategy, config=config)
28
+
29
+
30
+ # Create ServerApp
31
+ app = ServerApp(server_fn=server_fn)
@@ -1,12 +1,25 @@
1
- """$project_name: A Flower / NumPy app."""
1
+ """$project_name: A Flower / $framework_str app."""
2
2
 
3
- import flwr as fl
3
+ from flwr.common import Context, ndarrays_to_parameters
4
+ from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
+ from flwr.server.strategy import FedAvg
6
+ from $import_name.task import get_dummy_model
4
7
 
5
- # Configure the strategy
6
- strategy = fl.server.strategy.FedAvg()
7
8
 
8
- # Flower ServerApp
9
- app = fl.server.ServerApp(
10
- config=fl.server.ServerConfig(num_rounds=1),
11
- strategy=strategy,
12
- )
9
+ def server_fn(context: Context):
10
+ # Read from config
11
+ num_rounds = context.run_config["num-server-rounds"]
12
+
13
+ # Initial model
14
+ model = get_dummy_model()
15
+ dummy_parameters = ndarrays_to_parameters([model])
16
+
17
+ # Define strategy
18
+ strategy = FedAvg(initial_parameters=dummy_parameters)
19
+ config = ServerConfig(num_rounds=num_rounds)
20
+
21
+ return ServerAppComponents(strategy=strategy, config=config)
22
+
23
+
24
+ # Create ServerApp
25
+ app = ServerApp(server_fn=server_fn)
@@ -1,28 +1,31 @@
1
- """$project_name: A Flower / PyTorch app."""
1
+ """$project_name: A Flower / $framework_str app."""
2
2
 
3
- from flwr.common import ndarrays_to_parameters
4
- from flwr.server import ServerApp, ServerConfig
3
+ from flwr.common import Context, ndarrays_to_parameters
4
+ from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
5
  from flwr.server.strategy import FedAvg
6
+ from $import_name.task import Net, get_weights
6
7
 
7
- from $project_name.task import Net, get_weights
8
8
 
9
+ def server_fn(context: Context):
10
+ # Read from config
11
+ num_rounds = context.run_config["num-server-rounds"]
12
+ fraction_fit = context.run_config["fraction-fit"]
9
13
 
10
- # Initialize model parameters
11
- ndarrays = get_weights(Net())
12
- parameters = ndarrays_to_parameters(ndarrays)
14
+ # Initialize model parameters
15
+ ndarrays = get_weights(Net())
16
+ parameters = ndarrays_to_parameters(ndarrays)
13
17
 
18
+ # Define strategy
19
+ strategy = FedAvg(
20
+ fraction_fit=fraction_fit,
21
+ fraction_evaluate=1.0,
22
+ min_available_clients=2,
23
+ initial_parameters=parameters,
24
+ )
25
+ config = ServerConfig(num_rounds=num_rounds)
14
26
 
15
- # Define strategy
16
- strategy = FedAvg(
17
- fraction_fit=1.0,
18
- fraction_evaluate=1.0,
19
- min_available_clients=2,
20
- initial_parameters=parameters,
21
- )
27
+ return ServerAppComponents(strategy=strategy, config=config)
22
28
 
23
29
 
24
30
  # Create ServerApp
25
- app = ServerApp(
26
- config=ServerConfig(num_rounds=3),
27
- strategy=strategy,
28
- )
31
+ app = ServerApp(server_fn=server_fn)