flwr-nightly 1.21.0.dev20250828__py3-none-any.whl → 1.21.0.dev20250830__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
flwr/__init__.py CHANGED
@@ -17,12 +17,15 @@
17
17
 
18
18
  from flwr.common.version import package_version as _package_version
19
19
 
20
- from . import client, common, server, simulation
20
+ from . import app, client, clientapp, common, server, serverapp, simulation
21
21
 
22
22
  __all__ = [
23
+ "app",
23
24
  "client",
25
+ "clientapp",
24
26
  "common",
25
27
  "server",
28
+ "serverapp",
26
29
  "simulation",
27
30
  ]
28
31
 
flwr/cli/new/new.py CHANGED
@@ -35,6 +35,7 @@ class MlFramework(str, Enum):
35
35
  """Available frameworks."""
36
36
 
37
37
  PYTORCH = "PyTorch"
38
+ PYTORCH_MSG_API = "PyTorch (Message API)"
38
39
  TENSORFLOW = "TensorFlow"
39
40
  SKLEARN = "sklearn"
40
41
  HUGGINGFACE = "HuggingFace"
@@ -154,6 +155,9 @@ def new(
154
155
  if framework_str == MlFramework.BASELINE:
155
156
  framework_str = "baseline"
156
157
 
158
+ if framework_str == MlFramework.PYTORCH_MSG_API:
159
+ framework_str = "pytorch_msg_api"
160
+
157
161
  print(
158
162
  typer.style(
159
163
  f"\n🔨 Creating Flower App {app_name}...",
@@ -243,12 +247,19 @@ def new(
243
247
  MlFramework.TENSORFLOW.value,
244
248
  MlFramework.SKLEARN.value,
245
249
  MlFramework.NUMPY.value,
250
+ "pytorch_msg_api",
246
251
  ]
247
252
  if framework_str in frameworks_with_tasks:
248
253
  files[f"{import_name}/task.py"] = {
249
254
  "template": f"app/code/task.{template_name}.py.tpl"
250
255
  }
251
256
 
257
+ if framework_str == "pytorch_msg_api":
258
+ # Use custom __init__ that better captures name of framework
259
+ files[f"{import_name}/__init__.py"] = {
260
+ "template": f"app/code/__init__.{framework_str}.py.tpl"
261
+ }
262
+
252
263
  if framework_str == "baseline":
253
264
  # Include additional files for baseline template
254
265
  for file_name in ["model", "dataset", "strategy", "utils", "__init__"]:
@@ -0,0 +1 @@
1
+ """$project_name: A Flower / PyTorch app."""
@@ -0,0 +1,80 @@
1
+ """$project_name: A Flower / $framework_str app."""
2
+
3
+ import torch
4
+ from flwr.client import ClientApp
5
+ from flwr.common import ArrayRecord, Context, Message, MetricRecord, RecordDict
6
+
7
+ from $import_name.task import Net, load_data
8
+ from $import_name.task import test as test_fn
9
+ from $import_name.task import train as train_fn
10
+
11
+ # Flower ClientApp
12
+ app = ClientApp()
13
+
14
+
15
+ @app.train()
16
+ def train(msg: Message, context: Context):
17
+ """Train the model on local data."""
18
+
19
+ # Load the model and initialize it with the received weights
20
+ model = Net()
21
+ model.load_state_dict(msg.content["arrays"].to_torch_state_dict())
22
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23
+ model.to(device)
24
+
25
+ # Load the data
26
+ partition_id = context.node_config["partition-id"]
27
+ num_partitions = context.node_config["num-partitions"]
28
+ trainloader, _ = load_data(partition_id, num_partitions)
29
+
30
+ # Call the training function
31
+ train_loss = train_fn(
32
+ model,
33
+ trainloader,
34
+ context.run_config["local-epochs"],
35
+ msg.content["config"]["lr"],
36
+ device,
37
+ )
38
+
39
+ # Construct and return reply Message
40
+ model_record = ArrayRecord(model.state_dict())
41
+ metrics = {
42
+ "train_loss": train_loss,
43
+ "num-examples": len(trainloader.dataset),
44
+ }
45
+ metric_record = MetricRecord(metrics)
46
+ content = RecordDict({"arrays": model_record, "metrics": metric_record})
47
+ return Message(content=content, reply_to=msg)
48
+
49
+
50
+ @app.evaluate()
51
+ def evaluate(msg: Message, context: Context):
52
+ """Evaluate the model on local data."""
53
+
54
+ # Load the model and initialize it with the received weights
55
+ model = Net()
56
+ model.load_state_dict(msg.content["arrays"].to_torch_state_dict())
57
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
58
+ model.to(device)
59
+
60
+ # Load the data
61
+ partition_id = context.node_config["partition-id"]
62
+ num_partitions = context.node_config["num-partitions"]
63
+ _, valloader = load_data(partition_id, num_partitions)
64
+
65
+ # Call the evaluation function
66
+ eval_loss, eval_acc = test_fn(
67
+ model,
68
+ valloader,
69
+ device,
70
+ )
71
+
72
+ # Construct and return reply Message
73
+ metrics = {
74
+ "eval_loss": eval_loss,
75
+ "eval_acc": eval_acc,
76
+ "num-examples": len(valloader.dataset),
77
+ }
78
+ metric_record = MetricRecord(metrics)
79
+ content = RecordDict({"metrics": metric_record})
80
+ return Message(content=content, reply_to=msg)
@@ -0,0 +1,49 @@
1
+ """$project_name: A Flower / $framework_str app."""
2
+
3
+ from pprint import pprint
4
+
5
+ import torch
6
+ from flwr.common import ArrayRecord, ConfigRecord, Context
7
+ from flwr.server import Grid, ServerApp
8
+ from flwr.serverapp import FedAvg
9
+
10
+ from $import_name.task import Net
11
+
12
+ # Create ServerApp
13
+ app = ServerApp()
14
+
15
+
16
+ @app.main()
17
+ def main(grid: Grid, context: Context) -> None:
18
+ """Main entry point for the ServerApp."""
19
+
20
+ # Read run config
21
+ fraction_train: float = context.run_config["fraction-train"]
22
+ num_rounds: int = context.run_config["num-server-rounds"]
23
+ lr: float = context.run_config["lr"]
24
+
25
+ # Load global model
26
+ global_model = Net()
27
+ arrays = ArrayRecord(global_model.state_dict())
28
+
29
+ # Initialize FedAvg strategy
30
+ strategy = FedAvg(fraction_train=fraction_train)
31
+
32
+ # Start strategy, run FedAvg for `num_rounds`
33
+ result = strategy.start(
34
+ grid=grid,
35
+ initial_arrays=arrays,
36
+ train_config=ConfigRecord({"lr": lr}),
37
+ num_rounds=num_rounds,
38
+ )
39
+
40
+ # Log resulting metrics
41
+ print("\nDistributed train metrics:")
42
+ pprint(result.train_metrics_clientapp)
43
+ print("\nDistributed evaluate metrics:")
44
+ pprint(result.evaluate_metrics_clientapp)
45
+
46
+ # Save final model to disk
47
+ print("\nSaving final model to disk...")
48
+ state_dict = result.arrays.to_torch_state_dict()
49
+ torch.save(state_dict, "final_model.pt")
@@ -0,0 +1,98 @@
1
+ """$project_name: A Flower / $framework_str app."""
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from flwr_datasets import FederatedDataset
7
+ from flwr_datasets.partitioner import IidPartitioner
8
+ from torch.utils.data import DataLoader
9
+ from torchvision.transforms import Compose, Normalize, ToTensor
10
+
11
+
12
+ class Net(nn.Module):
13
+ """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')"""
14
+
15
+ def __init__(self):
16
+ super(Net, self).__init__()
17
+ self.conv1 = nn.Conv2d(3, 6, 5)
18
+ self.pool = nn.MaxPool2d(2, 2)
19
+ self.conv2 = nn.Conv2d(6, 16, 5)
20
+ self.fc1 = nn.Linear(16 * 5 * 5, 120)
21
+ self.fc2 = nn.Linear(120, 84)
22
+ self.fc3 = nn.Linear(84, 10)
23
+
24
+ def forward(self, x):
25
+ x = self.pool(F.relu(self.conv1(x)))
26
+ x = self.pool(F.relu(self.conv2(x)))
27
+ x = x.view(-1, 16 * 5 * 5)
28
+ x = F.relu(self.fc1(x))
29
+ x = F.relu(self.fc2(x))
30
+ return self.fc3(x)
31
+
32
+
33
+ fds = None # Cache FederatedDataset
34
+
35
+ pytorch_transforms = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
36
+
37
+
38
+ def apply_transforms(batch):
39
+ """Apply transforms to the partition from FederatedDataset."""
40
+ batch["img"] = [pytorch_transforms(img) for img in batch["img"]]
41
+ return batch
42
+
43
+
44
+ def load_data(partition_id: int, num_partitions: int):
45
+ """Load partition CIFAR10 data."""
46
+ # Only initialize `FederatedDataset` once
47
+ global fds
48
+ if fds is None:
49
+ partitioner = IidPartitioner(num_partitions=num_partitions)
50
+ fds = FederatedDataset(
51
+ dataset="uoft-cs/cifar10",
52
+ partitioners={"train": partitioner},
53
+ )
54
+ partition = fds.load_partition(partition_id)
55
+ # Divide data on each node: 80% train, 20% test
56
+ partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
57
+ # Construct dataloaders
58
+ partition_train_test = partition_train_test.with_transform(apply_transforms)
59
+ trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True)
60
+ testloader = DataLoader(partition_train_test["test"], batch_size=32)
61
+ return trainloader, testloader
62
+
63
+
64
+ def train(net, trainloader, epochs, lr, device):
65
+ """Train the model on the training set."""
66
+ net.to(device) # move model to GPU if available
67
+ criterion = torch.nn.CrossEntropyLoss().to(device)
68
+ optimizer = torch.optim.Adam(net.parameters(), lr=lr)
69
+ net.train()
70
+ running_loss = 0.0
71
+ for _ in range(epochs):
72
+ for batch in trainloader:
73
+ images = batch["img"].to(device)
74
+ labels = batch["label"].to(device)
75
+ optimizer.zero_grad()
76
+ loss = criterion(net(images), labels)
77
+ loss.backward()
78
+ optimizer.step()
79
+ running_loss += loss.item()
80
+ avg_trainloss = running_loss / len(trainloader)
81
+ return avg_trainloss
82
+
83
+
84
+ def test(net, testloader, device):
85
+ """Validate the model on the test set."""
86
+ net.to(device)
87
+ criterion = torch.nn.CrossEntropyLoss()
88
+ correct, loss = 0, 0.0
89
+ with torch.no_grad():
90
+ for batch in testloader:
91
+ images = batch["img"].to(device)
92
+ labels = batch["label"].to(device)
93
+ outputs = net(images)
94
+ loss += criterion(outputs, labels).item()
95
+ correct += (torch.max(outputs.data, 1)[1] == labels).sum().item()
96
+ accuracy = correct / len(testloader.dataset)
97
+ loss = loss / len(testloader)
98
+ return loss, accuracy
@@ -0,0 +1,53 @@
1
+ # =====================================================================
2
+ # For a full TOML configuration guide, check the Flower docs:
3
+ # https://flower.ai/docs/framework/how-to-configure-pyproject-toml.html
4
+ # =====================================================================
5
+
6
+ [build-system]
7
+ requires = ["hatchling"]
8
+ build-backend = "hatchling.build"
9
+
10
+ [project]
11
+ name = "$package_name"
12
+ version = "1.0.0"
13
+ description = ""
14
+ license = "Apache-2.0"
15
+ # Dependencies for your Flower App
16
+ dependencies = [
17
+ "flwr[simulation]>=1.21.0",
18
+ "flwr-datasets[vision]>=0.5.0",
19
+ "torch==2.7.1",
20
+ "torchvision==0.22.1",
21
+ ]
22
+
23
+ [tool.hatch.build.targets.wheel]
24
+ packages = ["."]
25
+
26
+ [tool.flwr.app]
27
+ publisher = "$username"
28
+
29
+ # Point to your ServerApp and ClientApp objects
30
+ [tool.flwr.app.components]
31
+ serverapp = "$import_name.server_app:app"
32
+ clientapp = "$import_name.client_app:app"
33
+
34
+ # Custom config values accessible via `context.run_config`
35
+ [tool.flwr.app.config]
36
+ num-server-rounds = 3
37
+ fraction-train = 0.5
38
+ local-epochs = 1
39
+ lr = 0.01
40
+
41
+ # Default federation to use when running the app
42
+ [tool.flwr.federations]
43
+ default = "local-simulation"
44
+
45
+ # Local simulation federation with 10 virtual SuperNodes
46
+ [tool.flwr.federations.local-simulation]
47
+ options.num-supernodes = 10
48
+
49
+ # Remote federation example for use with SuperLink
50
+ [tool.flwr.federations.remote-federation]
51
+ address = "<SUPERLINK-ADDRESS>:<PORT>"
52
+ insecure = true # Remove this line to enable TLS
53
+ # root-certificates = "<PATH/TO/ca.crt>" # For TLS setup
flwr/serverapp/fedavg.py CHANGED
@@ -197,7 +197,7 @@ class FedAvg(Strategy):
197
197
  log(
198
198
  INFO,
199
199
  "aggregate_train: Received %s results and %s failures",
200
- len(replies_with_content) - num_errors,
200
+ len(replies_with_content),
201
201
  num_errors,
202
202
  )
203
203
 
@@ -208,17 +208,19 @@ class FedAvg(Strategy):
208
208
  check_arrayrecord=True,
209
209
  )
210
210
 
211
- # Aggregate ArrayRecords
212
- arrays = aggregate_arrayrecords(
213
- replies_with_content,
214
- self.weighted_by_key,
215
- )
211
+ arrays, metrics = None, None
212
+ if replies_with_content:
213
+ # Aggregate ArrayRecords
214
+ arrays = aggregate_arrayrecords(
215
+ replies_with_content,
216
+ self.weighted_by_key,
217
+ )
216
218
 
217
- # Aggregate MetricRecords
218
- metrics = self.train_metrics_aggr_fn(
219
- replies_with_content,
220
- self.weighted_by_key,
221
- )
219
+ # Aggregate MetricRecords
220
+ metrics = self.train_metrics_aggr_fn(
221
+ replies_with_content,
222
+ self.weighted_by_key,
223
+ )
222
224
  return arrays, metrics
223
225
 
224
226
  def configure_evaluate(
@@ -273,7 +275,7 @@ class FedAvg(Strategy):
273
275
  log(
274
276
  INFO,
275
277
  "aggregate_evaluate: Received %s results and %s failures",
276
- len(replies_with_content) - num_errors,
278
+ len(replies_with_content),
277
279
  num_errors,
278
280
  )
279
281
 
@@ -283,10 +285,11 @@ class FedAvg(Strategy):
283
285
  weighted_by_key=self.weighted_by_key,
284
286
  check_arrayrecord=False,
285
287
  )
286
-
287
- # Aggregate MetricRecords
288
- metrics = self.evaluate_metrics_aggr_fn(
289
- replies_with_content,
290
- self.weighted_by_key,
291
- )
288
+ metrics = None
289
+ if replies_with_content:
290
+ # Aggregate MetricRecords
291
+ metrics = self.evaluate_metrics_aggr_fn(
292
+ replies_with_content,
293
+ self.weighted_by_key,
294
+ )
292
295
  return metrics
@@ -33,7 +33,6 @@ from flwr.common import (
33
33
  from flwr.server import Grid
34
34
 
35
35
 
36
- # Define a new exception
37
36
  class InconsistentMessageReplies(Exception):
38
37
  """Exception triggered when replies are inconsistent and therefore aggregation must
39
38
  be skipped."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: flwr-nightly
3
- Version: 1.21.0.dev20250828
3
+ Version: 1.21.0.dev20250830
4
4
  Summary: Flower: A Friendly Federated AI Framework
5
5
  License: Apache-2.0
6
6
  Keywords: Artificial Intelligence,Federated AI,Federated Analytics,Federated Evaluation,Federated Learning,Flower,Machine Learning
@@ -1,4 +1,4 @@
1
- flwr/__init__.py,sha256=5JdNd_I_ZxHJv2tbnM_Ug2_LQ4DkhZ2FiA7l23V13hU,937
1
+ flwr/__init__.py,sha256=CXTKPZSm83CVKTxw9j_ge6AEVnqgd1rlcsm_2kPoVkU,1009
2
2
  flwr/app/__init__.py,sha256=VNahoTMYIbIQt8EMit-UvliLoJib7uSsvKANJJXUWzM,713
3
3
  flwr/app/error.py,sha256=0PwA-E_CAs5P_nWAA0kksVO1A44t4CNLEf7u-Su-uJ0,2342
4
4
  flwr/app/metadata.py,sha256=rdMBn0zhIOYmCvmGENQWSQqDwcxwsMJzCle4PQdlc_Y,7331
@@ -17,7 +17,7 @@ flwr/cli/login/__init__.py,sha256=B1SXKU3HCQhWfFDMJhlC7FOl8UsvH4mxysxeBnrfyUE,80
17
17
  flwr/cli/login/login.py,sha256=RM1Jiv_VFm3oz4rTHSr3D87X90lW3WzErjBBU7WviWY,4309
18
18
  flwr/cli/ls.py,sha256=3YK7cpoImJ7PbjlP_JgYRQWz1GymX2q7Reu-mKJEpao,10957
19
19
  flwr/cli/new/__init__.py,sha256=QA1E2QtzPvFCjLTUHnFnJbufuFiGyT_0Y53Wpbvg1F0,790
20
- flwr/cli/new/new.py,sha256=Y8m4t8SmI_myr1tAcncHPE1_-y-dpn6BwKIVW1wj8xk,10071
20
+ flwr/cli/new/new.py,sha256=46QuAi7Act3_TbD0IkejUhognXPXlo2r3LRPvN8pEkA,10503
21
21
  flwr/cli/new/templates/__init__.py,sha256=FpjWCfIySU2DB4kh0HOXLAjlZNNFDTVU4w3HoE2TzcI,725
22
22
  flwr/cli/new/templates/app/.gitignore.tpl,sha256=HZJcGQoxp7aUzaPg8Uqch3kNrIESwr9yjimDxJYgXVY,3104
23
23
  flwr/cli/new/templates/app/LICENSE.tpl,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
@@ -28,12 +28,14 @@ flwr/cli/new/templates/app/__init__.py,sha256=LbR0ksGiF566JcHM_H5m1Tc4-oYUEilWFl
28
28
  flwr/cli/new/templates/app/code/__init__.baseline.py.tpl,sha256=YkHAgppUeD2BnBoGfVB6dEvBfjuIPGsU1gw4CiUi3qA,40
29
29
  flwr/cli/new/templates/app/code/__init__.py,sha256=zXa2YU1swzHxOKDQbwlDMEwVPOUswVeosjkiXNMTgFo,736
30
30
  flwr/cli/new/templates/app/code/__init__.py.tpl,sha256=J0Gn74E7khpLyKJVNqOPu7ev93vkcu1PZugsbxtABMw,52
31
+ flwr/cli/new/templates/app/code/__init__.pytorch_msg_api.py.tpl,sha256=mKIS8MK_X8T9NlmcX1-_c9Bbexc-ueqDIBI7uN6c4dE,45
31
32
  flwr/cli/new/templates/app/code/client.baseline.py.tpl,sha256=IYlCZqnaxT2ucP1ReffRNohOkYwNrhtrnDoQBBcrThY,1901
32
33
  flwr/cli/new/templates/app/code/client.huggingface.py.tpl,sha256=ifD08KwjdoGieV26hFCgf3PQB6rMhj_NZLo5iUUndm8,1846
33
34
  flwr/cli/new/templates/app/code/client.jax.py.tpl,sha256=4EkcGGmbPAa6dgw8GYII-GfrGsu8VU6amRHpJvF0WuA,1319
34
35
  flwr/cli/new/templates/app/code/client.mlx.py.tpl,sha256=gOxt_QUTfGFpofdNaxdwTSLZlkTWHPYGix2OGHC1hYE,2376
35
36
  flwr/cli/new/templates/app/code/client.numpy.py.tpl,sha256=DKcnz5-KUf693Va056QTKVofFV3ozJZutK4rQyfvRXc,548
36
37
  flwr/cli/new/templates/app/code/client.pytorch.py.tpl,sha256=fuxVmZpjHIueNy_aHWF81531vmi8DGu4CYjYDqmUwWo,1705
38
+ flwr/cli/new/templates/app/code/client.pytorch_msg_api.py.tpl,sha256=iOTWSzArdw70QI7BEhRJbB-eNGjzdEOA020x6Q75OMU,2473
37
39
  flwr/cli/new/templates/app/code/client.sklearn.py.tpl,sha256=MfhMN-hayGCc3cZ1XpN0A6f67GRveI_tGbq5kjOeP0Q,1871
38
40
  flwr/cli/new/templates/app/code/client.tensorflow.py.tpl,sha256=yBiiU7B9Kf70U52cPkNs_dUpYrrTwbUi2os-PAyheaM,1680
39
41
  flwr/cli/new/templates/app/code/dataset.baseline.py.tpl,sha256=jbd_exHAk2-Blu_kVutjPO6a_dkJQWb232zxSeXIZ1k,1453
@@ -50,6 +52,7 @@ flwr/cli/new/templates/app/code/server.jax.py.tpl,sha256=IHk57syZhvO4nWVHGxE9S8f
50
52
  flwr/cli/new/templates/app/code/server.mlx.py.tpl,sha256=GAqalaI-U2uRdttNeRn75k1FzdEW3rmgT-ywuKkFdK4,988
51
53
  flwr/cli/new/templates/app/code/server.numpy.py.tpl,sha256=xbQlLCKutnOqlbLQPZsaL9WM7vnebTceiU8a0HaUcZk,740
52
54
  flwr/cli/new/templates/app/code/server.pytorch.py.tpl,sha256=gvBsGA_Jg9kAH8xTxjzTjMcvBtciuccOwQFbO7ey8tU,916
55
+ flwr/cli/new/templates/app/code/server.pytorch_msg_api.py.tpl,sha256=IsIdTrA316ciO2_1uvy5mlCnIblNgtuNBVZXS2ocabQ,1381
53
56
  flwr/cli/new/templates/app/code/server.sklearn.py.tpl,sha256=JoDYjPU99aKTTfjKsCtKHzMICiOR9pi8JGVBsxFpWO4,1133
54
57
  flwr/cli/new/templates/app/code/server.tensorflow.py.tpl,sha256=xMhQ7AumowgLkgUilgjVK7IbpRhPjslhVJU-vID6NY8,856
55
58
  flwr/cli/new/templates/app/code/strategy.baseline.py.tpl,sha256=YkHAgppUeD2BnBoGfVB6dEvBfjuIPGsU1gw4CiUi3qA,40
@@ -58,6 +61,7 @@ flwr/cli/new/templates/app/code/task.jax.py.tpl,sha256=jK03Y0HUvVFjUB-cYnvYB-WCR
58
61
  flwr/cli/new/templates/app/code/task.mlx.py.tpl,sha256=YxH5z4s5kOh5_9DIY9pvzqURckLDfgdanTA68_iM_Wo,2946
59
62
  flwr/cli/new/templates/app/code/task.numpy.py.tpl,sha256=MsjJK8DAvM3ex6JTfZuBVqoBKJfCCjIHMUOPkspiSQ0,124
60
63
  flwr/cli/new/templates/app/code/task.pytorch.py.tpl,sha256=XlJqA4Ix_PloO_zJLhjiN5vDj16w3I4CPVGdmbe8asE,3800
64
+ flwr/cli/new/templates/app/code/task.pytorch_msg_api.py.tpl,sha256=RKA5lV6O6OnVKZ2r75pbzwy9arg5o2lzXqG2kNrLIUU,3446
61
65
  flwr/cli/new/templates/app/code/task.sklearn.py.tpl,sha256=vHdhtMp0FHxbYafXyhDT9aKmmmA0Jvpx5Oum1Yu9lWY,1850
62
66
  flwr/cli/new/templates/app/code/task.tensorflow.py.tpl,sha256=SKXAZdgBnPpbAbJ90Rb7oQ5ilnopBx_j_JNFoUDeEAI,1732
63
67
  flwr/cli/new/templates/app/code/utils.baseline.py.tpl,sha256=YkHAgppUeD2BnBoGfVB6dEvBfjuIPGsU1gw4CiUi3qA,40
@@ -68,6 +72,7 @@ flwr/cli/new/templates/app/pyproject.jax.toml.tpl,sha256=BHGb2N57Xm2scO1mqDSNgK_
68
72
  flwr/cli/new/templates/app/pyproject.mlx.toml.tpl,sha256=O6eN6Zqx2ieh-WbaiMYmxfhCrLKJjwymn_nhUDEOldM,1542
69
73
  flwr/cli/new/templates/app/pyproject.numpy.toml.tpl,sha256=oMTI8qXSgQsGlZextUpkWFvNOMlnWbzn2EocPSwDrtw,1409
70
74
  flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl,sha256=bu65oSrM85fP_H0-RlMS2i8XgL_8O5TfSHLW87lb30s,1508
75
+ flwr/cli/new/templates/app/pyproject.pytorch_msg_api.toml.tpl,sha256=fS9Brr-dXYEWGhH33ejpNK5C4J7Monf8VFYYfwo7UHo,1490
71
76
  flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl,sha256=mAEPeBfGyrINgRuP6-nX_KJNTQjC4E5N1Nrcddxiffs,1484
72
77
  flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl,sha256=mK8wOWqoQOVxZG6-OVwA2ChmKxexC7TfQV0ztPE4BWY,1508
73
78
  flwr/cli/run/__init__.py,sha256=RPyB7KbYTFl6YRiilCch6oezxrLQrl1kijV7BMGkLbA,790
@@ -321,10 +326,10 @@ flwr/server/workflow/secure_aggregation/__init__.py,sha256=vGkycLb65CxdaMkKsANxQ
321
326
  flwr/server/workflow/secure_aggregation/secagg_workflow.py,sha256=b_pKk7gmbahwyj0ftOOLXvu-AMtRHEc82N9PJTEO8dc,5839
322
327
  flwr/server/workflow/secure_aggregation/secaggplus_workflow.py,sha256=DkayCsnlAya6Y2PZsueLgoUCMRtV-GbnW08RfWx_SXM,29460
323
328
  flwr/serverapp/__init__.py,sha256=SRPsqsa4pOfcF9J3_i1hb9KJi3z4KDTTCqCTwv7DcK0,864
324
- flwr/serverapp/fedavg.py,sha256=Z051Z3XBYmaMzIKRn5uSlqb9FrRTUAXxuoMurMZn3PE,10861
329
+ flwr/serverapp/fedavg.py,sha256=C8UUvLTjodMpGRb4PNej5gW2cPbXsPKebGX1zPfAMUo,11020
325
330
  flwr/serverapp/result.py,sha256=rw1ZoCGBosSVSNrTLLUFMxP1XzDwJWWsn1qdBR7JtlI,1229
326
331
  flwr/serverapp/strategy.py,sha256=1mxxtA5Pyg9lZ1d3g4OCL-m8YR_0E3HUGl8Gv5BGOXY,10982
327
- flwr/serverapp/strategy_utils.py,sha256=P2DO3pcrDTDYcrjkmYuL79Bbv2boj7T4bZ42EeRTyYk,9412
332
+ flwr/serverapp/strategy_utils.py,sha256=aDlDh1TJT7oU29FiJ6tckomRAOzhhMYccrrXbynQh9o,9387
328
333
  flwr/serverapp/strategy_utils_tests.py,sha256=taG6HwApwutkjUuMY3R8Ib48Xepw6g5xl9HEB_-leoY,9232
329
334
  flwr/simulation/__init__.py,sha256=Gg6OsP1Z-ixc3-xxzvl7j7rz2Fijy9rzyEPpxgAQCeM,1556
330
335
  flwr/simulation/app.py,sha256=LbGLMvN9Ap119yBqsUcNNmVLRnCySnr4VechqcQ1hpA,10401
@@ -385,7 +390,7 @@ flwr/supernode/servicer/__init__.py,sha256=lucTzre5WPK7G1YLCfaqg3rbFWdNSb7ZTt-ca
385
390
  flwr/supernode/servicer/clientappio/__init__.py,sha256=7Oy62Y_oijqF7Dxi6tpcUQyOpLc_QpIRZ83NvwmB0Yg,813
386
391
  flwr/supernode/servicer/clientappio/clientappio_servicer.py,sha256=nIHRu38EWK-rpNOkcgBRAAKwYQQWFeCwu0lkO7OPZGQ,10239
387
392
  flwr/supernode/start_client_internal.py,sha256=ftS8GOyT9M1tOWpbobN_Xrz4xwPAPOvsTGiWSfzhheE,20269
388
- flwr_nightly-1.21.0.dev20250828.dist-info/METADATA,sha256=Hqd4_UojWeV3HrmRXzHI8iwLUoZ_6rkUi9pWd0dNbhg,15967
389
- flwr_nightly-1.21.0.dev20250828.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
390
- flwr_nightly-1.21.0.dev20250828.dist-info/entry_points.txt,sha256=hxHD2ixb_vJFDOlZV-zB4Ao32_BQlL34ftsDh1GXv14,420
391
- flwr_nightly-1.21.0.dev20250828.dist-info/RECORD,,
393
+ flwr_nightly-1.21.0.dev20250830.dist-info/METADATA,sha256=YofYgYz-tDJLkDA7ow_0v4LeCtwEzQauJdLGtqdgh3A,15967
394
+ flwr_nightly-1.21.0.dev20250830.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
395
+ flwr_nightly-1.21.0.dev20250830.dist-info/entry_points.txt,sha256=hxHD2ixb_vJFDOlZV-zB4Ao32_BQlL34ftsDh1GXv14,420
396
+ flwr_nightly-1.21.0.dev20250830.dist-info/RECORD,,