flwr 1.24.0__py3-none-any.whl → 1.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. flwr/__init__.py +1 -1
  2. flwr/app/__init__.py +4 -1
  3. flwr/app/message_type.py +29 -0
  4. flwr/app/metadata.py +5 -2
  5. flwr/app/user_config.py +19 -0
  6. flwr/cli/app.py +37 -19
  7. flwr/cli/app_cmd/publish.py +25 -75
  8. flwr/cli/app_cmd/review.py +25 -66
  9. flwr/cli/auth_plugin/auth_plugin.py +5 -10
  10. flwr/cli/auth_plugin/noop_auth_plugin.py +1 -2
  11. flwr/cli/auth_plugin/oidc_cli_plugin.py +38 -38
  12. flwr/cli/build.py +15 -28
  13. flwr/cli/config/__init__.py +21 -0
  14. flwr/cli/config/ls.py +71 -0
  15. flwr/cli/config_migration.py +297 -0
  16. flwr/cli/config_utils.py +63 -156
  17. flwr/cli/constant.py +71 -0
  18. flwr/cli/federation/__init__.py +0 -2
  19. flwr/cli/federation/ls.py +256 -64
  20. flwr/cli/flower_config.py +429 -0
  21. flwr/cli/install.py +23 -62
  22. flwr/cli/log.py +23 -37
  23. flwr/cli/login/login.py +29 -63
  24. flwr/cli/ls.py +72 -61
  25. flwr/cli/new/new.py +98 -309
  26. flwr/cli/pull.py +19 -37
  27. flwr/cli/run/run.py +87 -100
  28. flwr/cli/run_utils.py +23 -5
  29. flwr/cli/stop.py +33 -74
  30. flwr/cli/supernode/ls.py +35 -62
  31. flwr/cli/supernode/register.py +31 -80
  32. flwr/cli/supernode/unregister.py +24 -70
  33. flwr/cli/typing.py +200 -0
  34. flwr/cli/utils.py +160 -412
  35. flwr/client/grpc_adapter_client/connection.py +2 -2
  36. flwr/client/grpc_rere_client/connection.py +9 -6
  37. flwr/client/grpc_rere_client/grpc_adapter.py +1 -1
  38. flwr/client/message_handler/message_handler.py +2 -1
  39. flwr/client/mod/centraldp_mods.py +1 -1
  40. flwr/client/mod/localdp_mod.py +1 -1
  41. flwr/client/mod/secure_aggregation/secaggplus_mod.py +1 -1
  42. flwr/client/rest_client/connection.py +6 -4
  43. flwr/client/run_info_store.py +2 -1
  44. flwr/clientapp/client_app.py +2 -1
  45. flwr/common/__init__.py +3 -2
  46. flwr/common/args.py +5 -5
  47. flwr/common/config.py +12 -17
  48. flwr/common/constant.py +3 -16
  49. flwr/common/context.py +2 -1
  50. flwr/common/exit/exit.py +4 -4
  51. flwr/common/exit/exit_code.py +6 -0
  52. flwr/common/grpc.py +2 -1
  53. flwr/common/logger.py +1 -1
  54. flwr/common/message.py +1 -1
  55. flwr/common/retry_invoker.py +13 -5
  56. flwr/common/secure_aggregation/ndarrays_arithmetic.py +5 -2
  57. flwr/common/serde.py +13 -5
  58. flwr/common/telemetry.py +1 -1
  59. flwr/common/typing.py +10 -3
  60. flwr/compat/client/app.py +6 -9
  61. flwr/compat/client/grpc_client/connection.py +2 -1
  62. flwr/compat/common/constant.py +29 -0
  63. flwr/compat/server/app.py +1 -1
  64. flwr/proto/clientappio_pb2.py +2 -2
  65. flwr/proto/clientappio_pb2_grpc.py +104 -88
  66. flwr/proto/clientappio_pb2_grpc.pyi +140 -80
  67. flwr/proto/federation_pb2.py +5 -3
  68. flwr/proto/federation_pb2.pyi +32 -2
  69. flwr/proto/fleet_pb2.py +10 -10
  70. flwr/proto/fleet_pb2.pyi +5 -1
  71. flwr/proto/run_pb2.py +18 -26
  72. flwr/proto/run_pb2.pyi +10 -58
  73. flwr/proto/serverappio_pb2.py +2 -2
  74. flwr/proto/serverappio_pb2_grpc.py +138 -207
  75. flwr/proto/serverappio_pb2_grpc.pyi +189 -155
  76. flwr/proto/simulationio_pb2.py +2 -2
  77. flwr/proto/simulationio_pb2_grpc.py +62 -90
  78. flwr/proto/simulationio_pb2_grpc.pyi +95 -55
  79. flwr/server/app.py +7 -13
  80. flwr/server/compat/grid_client_proxy.py +2 -1
  81. flwr/server/grid/grpc_grid.py +5 -5
  82. flwr/server/serverapp/app.py +11 -4
  83. flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py +1 -1
  84. flwr/server/superlink/fleet/grpc_rere/node_auth_server_interceptor.py +13 -12
  85. flwr/server/superlink/fleet/message_handler/message_handler.py +42 -2
  86. flwr/server/superlink/linkstate/__init__.py +2 -2
  87. flwr/server/superlink/linkstate/in_memory_linkstate.py +36 -10
  88. flwr/server/superlink/linkstate/linkstate.py +34 -21
  89. flwr/server/superlink/linkstate/linkstate_factory.py +16 -8
  90. flwr/server/superlink/linkstate/{sqlite_linkstate.py → sql_linkstate.py} +471 -516
  91. flwr/server/superlink/linkstate/utils.py +49 -2
  92. flwr/server/superlink/serverappio/serverappio_servicer.py +1 -33
  93. flwr/server/superlink/simulation/simulationio_servicer.py +0 -19
  94. flwr/server/utils/validator.py +1 -1
  95. flwr/server/workflow/default_workflows.py +2 -1
  96. flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +1 -1
  97. flwr/serverapp/strategy/bulyan.py +7 -1
  98. flwr/serverapp/strategy/dp_fixed_clipping.py +9 -1
  99. flwr/serverapp/strategy/fedavg.py +1 -1
  100. flwr/serverapp/strategy/fedxgb_cyclic.py +1 -1
  101. flwr/simulation/ray_transport/ray_client_proxy.py +2 -6
  102. flwr/simulation/run_simulation.py +3 -12
  103. flwr/simulation/simulationio_connection.py +3 -3
  104. flwr/{common → supercore}/address.py +7 -33
  105. flwr/supercore/app_utils.py +2 -1
  106. flwr/supercore/constant.py +27 -2
  107. flwr/supercore/corestate/{sqlite_corestate.py → sql_corestate.py} +19 -23
  108. flwr/supercore/credential_store/__init__.py +33 -0
  109. flwr/supercore/credential_store/credential_store.py +34 -0
  110. flwr/supercore/credential_store/file_credential_store.py +76 -0
  111. flwr/{common → supercore}/date.py +0 -11
  112. flwr/supercore/ffs/disk_ffs.py +1 -1
  113. flwr/supercore/object_store/object_store_factory.py +14 -6
  114. flwr/supercore/object_store/{sqlite_object_store.py → sql_object_store.py} +115 -117
  115. flwr/supercore/sql_mixin.py +315 -0
  116. flwr/{cli/new/templates → supercore/state}/__init__.py +2 -2
  117. flwr/{cli/new/templates/app/code/flwr_tune → supercore/state/alembic}/__init__.py +2 -2
  118. flwr/supercore/state/alembic/env.py +103 -0
  119. flwr/supercore/state/alembic/script.py.mako +43 -0
  120. flwr/supercore/state/alembic/utils.py +239 -0
  121. flwr/{cli/new/templates/app → supercore/state/alembic/versions}/__init__.py +2 -2
  122. flwr/supercore/state/alembic/versions/rev_2026_01_28_initialize_migration_of_state_tables.py +200 -0
  123. flwr/supercore/state/schema/README.md +121 -0
  124. flwr/{cli/new/templates/app/code → supercore/state/schema}/__init__.py +2 -2
  125. flwr/supercore/state/schema/corestate_tables.py +36 -0
  126. flwr/supercore/state/schema/linkstate_tables.py +152 -0
  127. flwr/supercore/state/schema/objectstore_tables.py +90 -0
  128. flwr/supercore/superexec/run_superexec.py +2 -2
  129. flwr/supercore/utils.py +225 -0
  130. flwr/superlink/federation/federation_manager.py +2 -2
  131. flwr/superlink/federation/noop_federation_manager.py +8 -6
  132. flwr/superlink/servicer/control/control_grpc.py +2 -0
  133. flwr/superlink/servicer/control/control_servicer.py +106 -21
  134. flwr/supernode/cli/flower_supernode.py +2 -1
  135. flwr/supernode/nodestate/in_memory_nodestate.py +62 -1
  136. flwr/supernode/nodestate/nodestate.py +45 -0
  137. flwr/supernode/runtime/run_clientapp.py +14 -14
  138. flwr/supernode/servicer/clientappio/clientappio_servicer.py +13 -5
  139. flwr/supernode/start_client_internal.py +17 -10
  140. {flwr-1.24.0.dist-info → flwr-1.26.0.dist-info}/METADATA +8 -8
  141. {flwr-1.24.0.dist-info → flwr-1.26.0.dist-info}/RECORD +144 -184
  142. flwr/cli/federation/show.py +0 -317
  143. flwr/cli/new/templates/app/.gitignore.tpl +0 -163
  144. flwr/cli/new/templates/app/LICENSE.tpl +0 -202
  145. flwr/cli/new/templates/app/README.baseline.md.tpl +0 -127
  146. flwr/cli/new/templates/app/README.flowertune.md.tpl +0 -68
  147. flwr/cli/new/templates/app/README.md.tpl +0 -37
  148. flwr/cli/new/templates/app/code/__init__.baseline.py.tpl +0 -1
  149. flwr/cli/new/templates/app/code/__init__.py.tpl +0 -1
  150. flwr/cli/new/templates/app/code/__init__.pytorch_legacy_api.py.tpl +0 -1
  151. flwr/cli/new/templates/app/code/client.baseline.py.tpl +0 -75
  152. flwr/cli/new/templates/app/code/client.huggingface.py.tpl +0 -93
  153. flwr/cli/new/templates/app/code/client.jax.py.tpl +0 -71
  154. flwr/cli/new/templates/app/code/client.mlx.py.tpl +0 -102
  155. flwr/cli/new/templates/app/code/client.numpy.py.tpl +0 -46
  156. flwr/cli/new/templates/app/code/client.pytorch.py.tpl +0 -80
  157. flwr/cli/new/templates/app/code/client.pytorch_legacy_api.py.tpl +0 -55
  158. flwr/cli/new/templates/app/code/client.sklearn.py.tpl +0 -108
  159. flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +0 -82
  160. flwr/cli/new/templates/app/code/client.xgboost.py.tpl +0 -110
  161. flwr/cli/new/templates/app/code/dataset.baseline.py.tpl +0 -36
  162. flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl +0 -92
  163. flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl +0 -87
  164. flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +0 -56
  165. flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl +0 -73
  166. flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl +0 -78
  167. flwr/cli/new/templates/app/code/model.baseline.py.tpl +0 -66
  168. flwr/cli/new/templates/app/code/server.baseline.py.tpl +0 -43
  169. flwr/cli/new/templates/app/code/server.huggingface.py.tpl +0 -42
  170. flwr/cli/new/templates/app/code/server.jax.py.tpl +0 -39
  171. flwr/cli/new/templates/app/code/server.mlx.py.tpl +0 -41
  172. flwr/cli/new/templates/app/code/server.numpy.py.tpl +0 -38
  173. flwr/cli/new/templates/app/code/server.pytorch.py.tpl +0 -41
  174. flwr/cli/new/templates/app/code/server.pytorch_legacy_api.py.tpl +0 -31
  175. flwr/cli/new/templates/app/code/server.sklearn.py.tpl +0 -44
  176. flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +0 -38
  177. flwr/cli/new/templates/app/code/server.xgboost.py.tpl +0 -56
  178. flwr/cli/new/templates/app/code/strategy.baseline.py.tpl +0 -1
  179. flwr/cli/new/templates/app/code/task.huggingface.py.tpl +0 -98
  180. flwr/cli/new/templates/app/code/task.jax.py.tpl +0 -57
  181. flwr/cli/new/templates/app/code/task.mlx.py.tpl +0 -102
  182. flwr/cli/new/templates/app/code/task.numpy.py.tpl +0 -7
  183. flwr/cli/new/templates/app/code/task.pytorch.py.tpl +0 -99
  184. flwr/cli/new/templates/app/code/task.pytorch_legacy_api.py.tpl +0 -111
  185. flwr/cli/new/templates/app/code/task.sklearn.py.tpl +0 -67
  186. flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +0 -52
  187. flwr/cli/new/templates/app/code/task.xgboost.py.tpl +0 -67
  188. flwr/cli/new/templates/app/code/utils.baseline.py.tpl +0 -1
  189. flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +0 -146
  190. flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +0 -80
  191. flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +0 -65
  192. flwr/cli/new/templates/app/pyproject.jax.toml.tpl +0 -52
  193. flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +0 -56
  194. flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +0 -49
  195. flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +0 -53
  196. flwr/cli/new/templates/app/pyproject.pytorch_legacy_api.toml.tpl +0 -53
  197. flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +0 -52
  198. flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +0 -53
  199. flwr/cli/new/templates/app/pyproject.xgboost.toml.tpl +0 -61
  200. flwr/common/pyproject.py +0 -42
  201. flwr/supercore/sqlite_mixin.py +0 -159
  202. /flwr/{common → supercore}/version.py +0 -0
  203. {flwr-1.24.0.dist-info → flwr-1.26.0.dist-info}/WHEEL +0 -0
  204. {flwr-1.24.0.dist-info → flwr-1.26.0.dist-info}/entry_points.txt +0 -0
@@ -1,80 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import torch
4
- from flwr.app import ArrayRecord, Context, Message, MetricRecord, RecordDict
5
- from flwr.clientapp import ClientApp
6
-
7
- from $import_name.task import Net, load_data
8
- from $import_name.task import test as test_fn
9
- from $import_name.task import train as train_fn
10
-
11
- # Flower ClientApp
12
- app = ClientApp()
13
-
14
-
15
- @app.train()
16
- def train(msg: Message, context: Context):
17
- """Train the model on local data."""
18
-
19
- # Load the model and initialize it with the received weights
20
- model = Net()
21
- model.load_state_dict(msg.content["arrays"].to_torch_state_dict())
22
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23
- model.to(device)
24
-
25
- # Load the data
26
- partition_id = context.node_config["partition-id"]
27
- num_partitions = context.node_config["num-partitions"]
28
- trainloader, _ = load_data(partition_id, num_partitions)
29
-
30
- # Call the training function
31
- train_loss = train_fn(
32
- model,
33
- trainloader,
34
- context.run_config["local-epochs"],
35
- msg.content["config"]["lr"],
36
- device,
37
- )
38
-
39
- # Construct and return reply Message
40
- model_record = ArrayRecord(model.state_dict())
41
- metrics = {
42
- "train_loss": train_loss,
43
- "num-examples": len(trainloader.dataset),
44
- }
45
- metric_record = MetricRecord(metrics)
46
- content = RecordDict({"arrays": model_record, "metrics": metric_record})
47
- return Message(content=content, reply_to=msg)
48
-
49
-
50
- @app.evaluate()
51
- def evaluate(msg: Message, context: Context):
52
- """Evaluate the model on local data."""
53
-
54
- # Load the model and initialize it with the received weights
55
- model = Net()
56
- model.load_state_dict(msg.content["arrays"].to_torch_state_dict())
57
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
58
- model.to(device)
59
-
60
- # Load the data
61
- partition_id = context.node_config["partition-id"]
62
- num_partitions = context.node_config["num-partitions"]
63
- _, valloader = load_data(partition_id, num_partitions)
64
-
65
- # Call the evaluation function
66
- eval_loss, eval_acc = test_fn(
67
- model,
68
- valloader,
69
- device,
70
- )
71
-
72
- # Construct and return reply Message
73
- metrics = {
74
- "eval_loss": eval_loss,
75
- "eval_acc": eval_acc,
76
- "num-examples": len(valloader.dataset),
77
- }
78
- metric_record = MetricRecord(metrics)
79
- content = RecordDict({"metrics": metric_record})
80
- return Message(content=content, reply_to=msg)
@@ -1,55 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import torch
4
-
5
- from flwr.client import ClientApp, NumPyClient
6
- from flwr.common import Context
7
- from $import_name.task import Net, get_weights, load_data, set_weights, test, train
8
-
9
-
10
- # Define Flower Client and client_fn
11
- class FlowerClient(NumPyClient):
12
- def __init__(self, net, trainloader, valloader, local_epochs):
13
- self.net = net
14
- self.trainloader = trainloader
15
- self.valloader = valloader
16
- self.local_epochs = local_epochs
17
- self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
18
- self.net.to(self.device)
19
-
20
- def fit(self, parameters, config):
21
- set_weights(self.net, parameters)
22
- train_loss = train(
23
- self.net,
24
- self.trainloader,
25
- self.local_epochs,
26
- self.device,
27
- )
28
- return (
29
- get_weights(self.net),
30
- len(self.trainloader.dataset),
31
- {"train_loss": train_loss},
32
- )
33
-
34
- def evaluate(self, parameters, config):
35
- set_weights(self.net, parameters)
36
- loss, accuracy = test(self.net, self.valloader, self.device)
37
- return loss, len(self.valloader.dataset), {"accuracy": accuracy}
38
-
39
-
40
- def client_fn(context: Context):
41
- # Load model and data
42
- net = Net()
43
- partition_id = context.node_config["partition-id"]
44
- num_partitions = context.node_config["num-partitions"]
45
- trainloader, valloader = load_data(partition_id, num_partitions)
46
- local_epochs = context.run_config["local-epochs"]
47
-
48
- # Return Client instance
49
- return FlowerClient(net, trainloader, valloader, local_epochs).to_client()
50
-
51
-
52
- # Flower ClientApp
53
- app = ClientApp(
54
- client_fn,
55
- )
@@ -1,108 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import warnings
4
-
5
- from flwr.app import ArrayRecord, Context, Message, MetricRecord, RecordDict
6
- from flwr.clientapp import ClientApp
7
- from sklearn.metrics import (
8
- accuracy_score,
9
- f1_score,
10
- log_loss,
11
- precision_score,
12
- recall_score,
13
- )
14
-
15
- from $import_name.task import (
16
- get_model,
17
- get_model_params,
18
- load_data,
19
- set_initial_params,
20
- set_model_params,
21
- )
22
-
23
- # Flower ClientApp
24
- app = ClientApp()
25
-
26
-
27
- @app.train()
28
- def train(msg: Message, context: Context):
29
- """Train the model on local data."""
30
-
31
- # Create LogisticRegression Model
32
- penalty = context.run_config["penalty"]
33
- local_epochs = context.run_config["local-epochs"]
34
- model = get_model(penalty, local_epochs)
35
- # Setting initial parameters, akin to model.compile for keras models
36
- set_initial_params(model)
37
-
38
- # Apply received pararameters
39
- ndarrays = msg.content["arrays"].to_numpy_ndarrays()
40
- set_model_params(model, ndarrays)
41
-
42
- # Load the data
43
- partition_id = context.node_config["partition-id"]
44
- num_partitions = context.node_config["num-partitions"]
45
- X_train, _, y_train, _ = load_data(partition_id, num_partitions)
46
-
47
- # Ignore convergence failure due to low local epochs
48
- with warnings.catch_warnings():
49
- warnings.simplefilter("ignore")
50
- # Train the model on local data
51
- model.fit(X_train, y_train)
52
-
53
- # Let's compute train loss
54
- y_train_pred_proba = model.predict_proba(X_train)
55
- train_logloss = log_loss(y_train, y_train_pred_proba)
56
-
57
- # Construct and return reply Message
58
- ndarrays = get_model_params(model)
59
- model_record = ArrayRecord(ndarrays)
60
- metrics = {"num-examples": len(X_train), "train_logloss": train_logloss}
61
- metric_record = MetricRecord(metrics)
62
- content = RecordDict({"arrays": model_record, "metrics": metric_record})
63
- return Message(content=content, reply_to=msg)
64
-
65
-
66
- @app.evaluate()
67
- def evaluate(msg: Message, context: Context):
68
- """Evaluate the model on test data."""
69
-
70
- # Create LogisticRegression Model
71
- penalty = context.run_config["penalty"]
72
- local_epochs = context.run_config["local-epochs"]
73
- model = get_model(penalty, local_epochs)
74
-
75
- # Setting initial parameters, akin to model.compile for keras models
76
- set_initial_params(model)
77
-
78
- # Apply received pararameters
79
- ndarrays = msg.content["arrays"].to_numpy_ndarrays()
80
- set_model_params(model, ndarrays)
81
-
82
- # Load the data
83
- partition_id = context.node_config["partition-id"]
84
- num_partitions = context.node_config["num-partitions"]
85
- _, X_test, _, y_test = load_data(partition_id, num_partitions)
86
-
87
- # Evaluate the model on local data
88
- y_train_pred = model.predict(X_test)
89
- y_train_pred_proba = model.predict_proba(X_test)
90
-
91
- accuracy = accuracy_score(y_test, y_train_pred)
92
- loss = log_loss(y_test, y_train_pred_proba)
93
- precision = precision_score(y_test, y_train_pred, average="macro", zero_division=0)
94
- recall = recall_score(y_test, y_train_pred, average="macro", zero_division=0)
95
- f1 = f1_score(y_test, y_train_pred, average="macro", zero_division=0)
96
-
97
- # Construct and return reply Message
98
- metrics = {
99
- "num-examples": len(X_test),
100
- "test_logloss": loss,
101
- "accuracy": accuracy,
102
- "precision": precision,
103
- "recall": recall,
104
- "f1": f1,
105
- }
106
- metric_record = MetricRecord(metrics)
107
- content = RecordDict({"metrics": metric_record})
108
- return Message(content=content, reply_to=msg)
@@ -1,82 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- from flwr.app import ArrayRecord, Context, Message, MetricRecord, RecordDict
4
- from flwr.clientapp import ClientApp
5
-
6
- from $import_name.task import load_data, load_model
7
-
8
- # Flower ClientApp
9
- app = ClientApp()
10
-
11
-
12
- @app.train()
13
- def train(msg: Message, context: Context):
14
- """Train the model on local data."""
15
-
16
- # Load the model and initialize it with the received weights
17
- model = load_model()
18
- ndarrays = msg.content["arrays"].to_numpy_ndarrays()
19
- model.set_weights(ndarrays)
20
-
21
- # Read from config
22
- epochs = context.run_config["local-epochs"]
23
- batch_size = context.run_config["batch-size"]
24
- verbose = context.run_config.get("verbose")
25
-
26
- # Load the data
27
- partition_id = context.node_config["partition-id"]
28
- num_partitions = context.node_config["num-partitions"]
29
- x_train, y_train, _, _ = load_data(partition_id, num_partitions)
30
-
31
- # Train the model on local data
32
- history = model.fit(
33
- x_train,
34
- y_train,
35
- epochs=epochs,
36
- batch_size=batch_size,
37
- verbose=verbose,
38
- )
39
-
40
- # Get final training loss and accuracy
41
- train_loss = history.history["loss"][-1] if "loss" in history.history else None
42
- train_acc = history.history.get("accuracy")
43
- train_acc = train_acc[-1] if train_acc is not None else None
44
-
45
- # Construct and return reply Message
46
- model_record = ArrayRecord(model.get_weights())
47
- metrics = {"num-examples": len(x_train)}
48
- if train_loss is not None:
49
- metrics["train_loss"] = train_loss
50
- if train_acc is not None:
51
- metrics["train_acc"] = train_acc
52
- metric_record = MetricRecord(metrics)
53
- content = RecordDict({"arrays": model_record, "metrics": metric_record})
54
- return Message(content=content, reply_to=msg)
55
-
56
-
57
- @app.evaluate()
58
- def evaluate(msg: Message, context: Context):
59
- """Evaluate the model on local data."""
60
-
61
- # Load the model and initialize it with the received weights
62
- model = load_model()
63
- ndarrays = msg.content["arrays"].to_numpy_ndarrays()
64
- model.set_weights(ndarrays)
65
-
66
- # Load the data
67
- partition_id = context.node_config["partition-id"]
68
- num_partitions = context.node_config["num-partitions"]
69
- _, _, x_test, y_test = load_data(partition_id, num_partitions)
70
-
71
- # Evaluate the model on local data
72
- loss, accuracy = model.evaluate(x_test, y_test, verbose=0)
73
-
74
- # Construct and return reply Message
75
- metrics = {
76
- "eval_loss": loss,
77
- "eval_acc": accuracy,
78
- "num-examples": len(x_test),
79
- }
80
- metric_record = MetricRecord(metrics)
81
- content = RecordDict({"metrics": metric_record})
82
- return Message(content=content, reply_to=msg)
@@ -1,110 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import warnings
4
-
5
- import numpy as np
6
- import xgboost as xgb
7
- from flwr.app import ArrayRecord, Context, Message, MetricRecord, RecordDict
8
- from flwr.clientapp import ClientApp
9
- from flwr.common.config import unflatten_dict
10
-
11
- from $import_name.task import load_data, replace_keys
12
-
13
- warnings.filterwarnings("ignore", category=UserWarning)
14
-
15
-
16
- # Flower ClientApp
17
- app = ClientApp()
18
-
19
-
20
- def _local_boost(bst_input, num_local_round, train_dmatrix):
21
- # Update trees based on local training data.
22
- for i in range(num_local_round):
23
- bst_input.update(train_dmatrix, bst_input.num_boosted_rounds())
24
-
25
- # Bagging: extract the last N=num_local_round trees for sever aggregation
26
- bst = bst_input[
27
- bst_input.num_boosted_rounds()
28
- - num_local_round : bst_input.num_boosted_rounds()
29
- ]
30
- return bst
31
-
32
-
33
- @app.train()
34
- def train(msg: Message, context: Context) -> Message:
35
- # Load model and data
36
- partition_id = context.node_config["partition-id"]
37
- num_partitions = context.node_config["num-partitions"]
38
- train_dmatrix, _, num_train, _ = load_data(partition_id, num_partitions)
39
-
40
- # Read from run config
41
- num_local_round = context.run_config["local-epochs"]
42
- # Flatted config dict and replace "-" with "_"
43
- cfg = replace_keys(unflatten_dict(context.run_config))
44
- params = cfg["params"]
45
-
46
- global_round = msg.content["config"]["server-round"]
47
- if global_round == 1:
48
- # First round local training
49
- bst = xgb.train(
50
- params,
51
- train_dmatrix,
52
- num_boost_round=num_local_round,
53
- )
54
- else:
55
- bst = xgb.Booster(params=params)
56
- global_model = bytearray(msg.content["arrays"]["0"].numpy().tobytes())
57
-
58
- # Load global model into booster
59
- bst.load_model(global_model)
60
-
61
- # Local training
62
- bst = _local_boost(bst, num_local_round, train_dmatrix)
63
-
64
- # Save model
65
- local_model = bst.save_raw("json")
66
- model_np = np.frombuffer(local_model, dtype=np.uint8)
67
-
68
- # Construct reply message
69
- # Note: we store the model as the first item in a list into ArrayRecord,
70
- # which can be accessed using index ["0"].
71
- model_record = ArrayRecord([model_np])
72
- metrics = {
73
- "num-examples": num_train,
74
- }
75
- metric_record = MetricRecord(metrics)
76
- content = RecordDict({"arrays": model_record, "metrics": metric_record})
77
- return Message(content=content, reply_to=msg)
78
-
79
-
80
- @app.evaluate()
81
- def evaluate(msg: Message, context: Context) -> Message:
82
- # Load model and data
83
- partition_id = context.node_config["partition-id"]
84
- num_partitions = context.node_config["num-partitions"]
85
- _, valid_dmatrix, _, num_val = load_data(partition_id, num_partitions)
86
-
87
- # Load config
88
- cfg = replace_keys(unflatten_dict(context.run_config))
89
- params = cfg["params"]
90
-
91
- # Load global model
92
- bst = xgb.Booster(params=params)
93
- global_model = bytearray(msg.content["arrays"]["0"].numpy().tobytes())
94
- bst.load_model(global_model)
95
-
96
- # Run evaluation
97
- eval_results = bst.eval_set(
98
- evals=[(valid_dmatrix, "valid")],
99
- iteration=bst.num_boosted_rounds() - 1,
100
- )
101
- auc = float(eval_results.split("\t")[1].split(":")[1])
102
-
103
- # Construct and return reply Message
104
- metrics = {
105
- "auc": auc,
106
- "num-examples": num_val,
107
- }
108
- metric_record = MetricRecord(metrics)
109
- content = RecordDict({"metrics": metric_record})
110
- return Message(content=content, reply_to=msg)
@@ -1,36 +0,0 @@
1
- """$project_name: A Flower Baseline."""
2
-
3
- from flwr_datasets import FederatedDataset
4
- from flwr_datasets.partitioner import IidPartitioner
5
- from torch.utils.data import DataLoader
6
- from torchvision.transforms import Compose, Normalize, ToTensor
7
-
8
- FDS = None # Cache FederatedDataset
9
-
10
-
11
- def load_data(partition_id: int, num_partitions: int):
12
- """Load partition CIFAR10 data."""
13
- # Only initialize `FederatedDataset` once
14
- global FDS # pylint: disable=global-statement
15
- if FDS is None:
16
- partitioner = IidPartitioner(num_partitions=num_partitions)
17
- FDS = FederatedDataset(
18
- dataset="uoft-cs/cifar10",
19
- partitioners={"train": partitioner},
20
- )
21
- partition = FDS.load_partition(partition_id)
22
- # Divide data on each node: 80% train, 20% test
23
- partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
24
- pytorch_transforms = Compose(
25
- [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
26
- )
27
-
28
- def apply_transforms(batch):
29
- """Apply transforms to the partition from FederatedDataset."""
30
- batch["img"] = [pytorch_transforms(img) for img in batch["img"]]
31
- return batch
32
-
33
- partition_train_test = partition_train_test.with_transform(apply_transforms)
34
- trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True)
35
- testloader = DataLoader(partition_train_test["test"], batch_size=32)
36
- return trainloader, testloader
@@ -1,92 +0,0 @@
1
- """$project_name: A Flower / FlowerTune app."""
2
-
3
- import os
4
- import warnings
5
-
6
- from flwr.app import ArrayRecord, Context, Message, MetricRecord, RecordDict
7
- from flwr.clientapp import ClientApp
8
- from flwr.common.config import unflatten_dict
9
- from omegaconf import DictConfig
10
- from peft import get_peft_model_state_dict, set_peft_model_state_dict
11
- from transformers import TrainingArguments
12
- from trl import SFTTrainer
13
-
14
- from $import_name.dataset import (
15
- get_tokenizer_and_data_collator_and_propt_formatting,
16
- load_data,
17
- replace_keys,
18
- )
19
- from $import_name.models import cosine_annealing, get_model
20
-
21
- # Avoid warnings
22
- os.environ["TOKENIZERS_PARALLELISM"] = "true"
23
- os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1"
24
- warnings.filterwarnings("ignore", category=UserWarning)
25
-
26
-
27
- # Avoid warnings
28
- os.environ["TOKENIZERS_PARALLELISM"] = "true"
29
- os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1"
30
- warnings.filterwarnings("ignore", category=UserWarning)
31
-
32
-
33
- # Flower ClientApp
34
- app = ClientApp()
35
-
36
-
37
- @app.train()
38
- def train(msg: Message, context: Context):
39
- """Train the model on local data."""
40
- # Parse config
41
- partition_id = context.node_config["partition-id"]
42
- num_partitions = context.node_config["num-partitions"]
43
- num_rounds = context.run_config["num-server-rounds"]
44
- cfg = DictConfig(replace_keys(unflatten_dict(context.run_config)))
45
- training_arguments = TrainingArguments(**cfg.train.training_arguments)
46
-
47
- # Let's get the client partition
48
- trainset = load_data(partition_id, num_partitions, cfg.static.dataset.name)
49
- (
50
- tokenizer,
51
- data_collator,
52
- formatting_prompts_func,
53
- ) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name)
54
-
55
- # Load the model and initialize it with the received weights
56
- model = get_model(cfg.model)
57
- set_peft_model_state_dict(model, msg.content["arrays"].to_torch_state_dict())
58
-
59
- # Set learning rate for current round
60
- new_lr = cosine_annealing(
61
- msg.content["config"]["server-round"],
62
- num_rounds,
63
- cfg.train.learning_rate_max,
64
- cfg.train.learning_rate_min,
65
- )
66
-
67
- training_arguments.learning_rate = new_lr
68
- training_arguments.output_dir = msg.content["config"]["save_path"]
69
-
70
- # Construct trainer
71
- trainer = SFTTrainer(
72
- model=model,
73
- tokenizer=tokenizer,
74
- args=training_arguments,
75
- max_seq_length=cfg.train.seq_length,
76
- train_dataset=trainset,
77
- formatting_func=formatting_prompts_func,
78
- data_collator=data_collator,
79
- )
80
-
81
- # Do local training
82
- results = trainer.train()
83
-
84
- # Construct and return reply Message
85
- model_record = ArrayRecord(get_peft_model_state_dict(model))
86
- metrics = {
87
- "train_loss": results.training_loss,
88
- "num-examples": len(trainset),
89
- }
90
- metric_record = MetricRecord(metrics)
91
- content = RecordDict({"arrays": model_record, "metrics": metric_record})
92
- return Message(content=content, reply_to=msg)
@@ -1,87 +0,0 @@
1
- """$project_name: A Flower / FlowerTune app."""
2
-
3
- from flwr_datasets import FederatedDataset
4
- from flwr_datasets.partitioner import IidPartitioner
5
- from transformers import AutoTokenizer
6
- from trl import DataCollatorForCompletionOnlyLM
7
-
8
- FDS = None # Cache FederatedDataset
9
-
10
-
11
- def formatting_prompts_func(example):
12
- """Construct prompts."""
13
- output_texts = []
14
- # Constructing a standard Alpaca
15
- # (https://github.com/tatsu-lab/stanford_alpaca#data-release) prompt
16
- mssg = (
17
- "Below is an instruction that describes a task. "
18
- "Write a response that appropriately completes the request."
19
- )
20
- for i in range(len(example["instruction"])):
21
- text = (
22
- f"{mssg}\n### Instruction:\n{example['instruction'][i]}\n"
23
- f"### Response: {example['response'][i]}"
24
- )
25
- output_texts.append(text)
26
- return output_texts
27
-
28
-
29
- def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str):
30
- """Get tokenizer, data_collator and prompt formatting."""
31
- tokenizer = AutoTokenizer.from_pretrained(
32
- model_name, use_fast=True, padding_side="right"
33
- )
34
- tokenizer.pad_token = tokenizer.eos_token
35
- response_template_with_context = "\n### Response:" # alpaca response tag
36
- response_template_ids = tokenizer.encode(
37
- response_template_with_context, add_special_tokens=False
38
- )[2:]
39
- data_collator = DataCollatorForCompletionOnlyLM(
40
- response_template_ids, tokenizer=tokenizer
41
- )
42
-
43
- return tokenizer, data_collator, formatting_prompts_func
44
-
45
-
46
- def formatting(dataset):
47
- """Format dataset."""
48
- dataset["instruction"] = dataset["instruction"] + " " + dataset["input"]
49
- return dataset
50
-
51
-
52
- def reformat(dataset, llm_task):
53
- """Reformat datasets."""
54
- dataset = dataset.rename_column("output", "response")
55
- if llm_task in ["finance", "code"]:
56
- dataset = dataset.map(formatting, remove_columns=["input"])
57
- if llm_task == "medical":
58
- dataset = dataset.remove_columns(["instruction"])
59
- dataset = dataset.rename_column("input", "instruction")
60
- return dataset
61
-
62
-
63
- def load_data(partition_id: int, num_partitions: int, dataset_name: str):
64
- """Load partition data."""
65
- # Only initialize `FederatedDataset` once
66
- global FDS
67
- if FDS is None:
68
- partitioner = IidPartitioner(num_partitions=num_partitions)
69
- FDS = FederatedDataset(
70
- dataset=dataset_name,
71
- partitioners={"train": partitioner},
72
- )
73
- client_trainset = FDS.load_partition(partition_id, "train")
74
- client_trainset = reformat(client_trainset, llm_task="$llm_challenge_str")
75
- return client_trainset
76
-
77
-
78
- def replace_keys(input_dict, match="-", target="_"):
79
- """Recursively replace match string with target string in dictionary keys."""
80
- new_dict = {}
81
- for key, value in input_dict.items():
82
- new_key = key.replace(match, target)
83
- if isinstance(value, dict):
84
- new_dict[new_key] = replace_keys(value, match, target)
85
- else:
86
- new_dict[new_key] = value
87
- return new_dict