flwr-nightly 1.9.0.dev20240420__py3-none-any.whl → 1.9.0.dev20240509__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flwr-nightly might be problematic. Click here for more details.

Files changed (71) hide show
  1. flwr/cli/app.py +2 -0
  2. flwr/cli/build.py +151 -0
  3. flwr/cli/config_utils.py +18 -46
  4. flwr/cli/new/new.py +44 -18
  5. flwr/cli/new/templates/app/code/client.hf.py.tpl +55 -0
  6. flwr/cli/new/templates/app/code/client.mlx.py.tpl +70 -0
  7. flwr/cli/new/templates/app/code/client.pytorch.py.tpl +1 -1
  8. flwr/cli/new/templates/app/code/client.sklearn.py.tpl +94 -0
  9. flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +15 -29
  10. flwr/cli/new/templates/app/code/server.hf.py.tpl +17 -0
  11. flwr/cli/new/templates/app/code/server.mlx.py.tpl +15 -0
  12. flwr/cli/new/templates/app/code/server.pytorch.py.tpl +1 -1
  13. flwr/cli/new/templates/app/code/server.sklearn.py.tpl +17 -0
  14. flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +9 -1
  15. flwr/cli/new/templates/app/code/task.hf.py.tpl +87 -0
  16. flwr/cli/new/templates/app/code/task.mlx.py.tpl +89 -0
  17. flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +29 -0
  18. flwr/cli/new/templates/app/pyproject.hf.toml.tpl +31 -0
  19. flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +28 -0
  20. flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +7 -4
  21. flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +7 -4
  22. flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +27 -0
  23. flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +7 -4
  24. flwr/cli/run/run.py +1 -1
  25. flwr/cli/utils.py +18 -17
  26. flwr/client/__init__.py +1 -1
  27. flwr/client/app.py +17 -93
  28. flwr/client/grpc_client/connection.py +6 -1
  29. flwr/client/grpc_rere_client/client_interceptor.py +158 -0
  30. flwr/client/grpc_rere_client/connection.py +17 -2
  31. flwr/client/mod/centraldp_mods.py +4 -2
  32. flwr/client/mod/localdp_mod.py +9 -3
  33. flwr/client/rest_client/connection.py +5 -1
  34. flwr/client/supernode/__init__.py +2 -0
  35. flwr/client/supernode/app.py +181 -7
  36. flwr/common/grpc.py +5 -1
  37. flwr/common/logger.py +37 -4
  38. flwr/common/message.py +105 -86
  39. flwr/common/record/parametersrecord.py +0 -1
  40. flwr/common/record/recordset.py +17 -5
  41. flwr/common/secure_aggregation/crypto/symmetric_encryption.py +35 -1
  42. flwr/server/__init__.py +0 -2
  43. flwr/server/app.py +118 -2
  44. flwr/server/compat/app.py +5 -56
  45. flwr/server/compat/app_utils.py +1 -1
  46. flwr/server/compat/driver_client_proxy.py +27 -72
  47. flwr/server/driver/__init__.py +3 -0
  48. flwr/server/driver/driver.py +12 -242
  49. flwr/server/driver/grpc_driver.py +315 -0
  50. flwr/server/history.py +20 -20
  51. flwr/server/run_serverapp.py +18 -4
  52. flwr/server/server.py +2 -5
  53. flwr/server/strategy/dp_adaptive_clipping.py +5 -3
  54. flwr/server/strategy/dp_fixed_clipping.py +6 -3
  55. flwr/server/superlink/driver/driver_servicer.py +1 -1
  56. flwr/server/superlink/fleet/grpc_bidi/grpc_server.py +3 -1
  57. flwr/server/superlink/fleet/grpc_rere/server_interceptor.py +215 -0
  58. flwr/server/superlink/fleet/vce/backend/raybackend.py +9 -6
  59. flwr/server/superlink/fleet/vce/vce_api.py +1 -1
  60. flwr/server/superlink/state/in_memory_state.py +76 -8
  61. flwr/server/superlink/state/sqlite_state.py +116 -11
  62. flwr/server/superlink/state/state.py +35 -3
  63. flwr/simulation/__init__.py +2 -2
  64. flwr/simulation/app.py +16 -1
  65. flwr/simulation/run_simulation.py +14 -9
  66. {flwr_nightly-1.9.0.dev20240420.dist-info → flwr_nightly-1.9.0.dev20240509.dist-info}/METADATA +3 -2
  67. {flwr_nightly-1.9.0.dev20240420.dist-info → flwr_nightly-1.9.0.dev20240509.dist-info}/RECORD +70 -55
  68. {flwr_nightly-1.9.0.dev20240420.dist-info → flwr_nightly-1.9.0.dev20240509.dist-info}/entry_points.txt +1 -1
  69. flwr/server/driver/abc_driver.py +0 -140
  70. {flwr_nightly-1.9.0.dev20240420.dist-info → flwr_nightly-1.9.0.dev20240509.dist-info}/LICENSE +0 -0
  71. {flwr_nightly-1.9.0.dev20240420.dist-info → flwr_nightly-1.9.0.dev20240509.dist-info}/WHEEL +0 -0
@@ -1,21 +1,19 @@
1
1
  """$project_name: A Flower / TensorFlow app."""
2
2
 
3
- import os
3
+ from flwr.client import NumPyClient, ClientApp
4
4
 
5
- import tensorflow as tf
6
- from flwr.client import ClientApp, NumPyClient
7
- from flwr_datasets import FederatedDataset
5
+ from $import_name.task import load_data, load_model
8
6
 
9
7
 
10
- os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
11
-
12
- # Define Flower client
8
+ # Define Flower Client and client_fn
13
9
  class FlowerClient(NumPyClient):
14
- def __init__(self, model, train_data, test_data):
10
+ def __init__(self, model, x_train, y_train, x_test, y_test):
15
11
  self.model = model
16
- self.x_train, self.y_train = train_data
17
- self.x_test, self.y_test = test_data
18
-
12
+ self.x_train = x_train
13
+ self.y_train = y_train
14
+ self.x_test = x_test
15
+ self.y_test = y_test
16
+
19
17
  def get_parameters(self, config):
20
18
  return self.model.get_weights()
21
19
 
@@ -30,25 +28,13 @@ class FlowerClient(NumPyClient):
30
28
  return loss, len(self.x_test), {"accuracy": accuracy}
31
29
 
32
30
 
33
- fds = FederatedDataset(dataset="cifar10", partitioners={"train": 2})
34
-
35
- def client_fn(cid: str):
36
- """Create and return an instance of Flower `Client`."""
37
-
38
- # Load model and data (MobileNetV2, CIFAR-10)
39
- model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)
40
- model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"])
41
-
42
- # Download and partition dataset
43
- partition = fds.load_partition(int(cid), "train")
44
- partition.set_format("numpy")
45
-
46
- # Divide data on each node: 80% train, 20% test
47
- partition = partition.train_test_split(test_size=0.2, seed=42)
48
- train_data = partition["train"]["img"] / 255.0, partition["train"]["label"]
49
- test_data = partition["test"]["img"] / 255.0, partition["test"]["label"]
31
+ def client_fn(cid):
32
+ # Load model and data
33
+ net = load_model()
34
+ x_train, y_train, x_test, y_test = load_data(int(cid), 2)
50
35
 
51
- return FlowerClient(model, train_data, test_data).to_client()
36
+ # Return Client instance
37
+ return FlowerClient(net, x_train, y_train, x_test, y_test).to_client()
52
38
 
53
39
 
54
40
  # Flower ClientApp
@@ -0,0 +1,17 @@
1
+ """$project_name: A Flower / HuggingFace Transformers app."""
2
+
3
+ from flwr.server.strategy import FedAvg
4
+ from flwr.server import ServerApp, ServerConfig
5
+
6
+
7
+ # Define strategy
8
+ strategy = FedAvg(
9
+ fraction_fit=1.0,
10
+ fraction_evaluate=1.0,
11
+ )
12
+
13
+ # Start server
14
+ app = ServerApp(
15
+ config=ServerConfig(num_rounds=3),
16
+ strategy=strategy,
17
+ )
@@ -0,0 +1,15 @@
1
+ """$project_name: A Flower / MLX app."""
2
+
3
+ from flwr.server import ServerApp, ServerConfig
4
+ from flwr.server.strategy import FedAvg
5
+
6
+
7
+ # Define strategy
8
+ strategy = FedAvg()
9
+
10
+
11
+ # Create ServerApp
12
+ app = ServerApp(
13
+ config=ServerConfig(num_rounds=3),
14
+ strategy=strategy,
15
+ )
@@ -4,7 +4,7 @@ from flwr.common import ndarrays_to_parameters
4
4
  from flwr.server import ServerApp, ServerConfig
5
5
  from flwr.server.strategy import FedAvg
6
6
 
7
- from $project_name.task import Net, get_weights
7
+ from $import_name.task import Net, get_weights
8
8
 
9
9
 
10
10
  # Initialize model parameters
@@ -0,0 +1,17 @@
1
+ """$project_name: A Flower / Scikit-Learn app."""
2
+
3
+ from flwr.server import ServerApp, ServerConfig
4
+ from flwr.server.strategy import FedAvg
5
+
6
+
7
+ strategy = FedAvg(
8
+ fraction_fit=1.0,
9
+ fraction_evaluate=1.0,
10
+ min_available_clients=2,
11
+ )
12
+
13
+ # Create ServerApp
14
+ app = ServerApp(
15
+ config=ServerConfig(num_rounds=3),
16
+ strategy=strategy,
17
+ )
@@ -1,18 +1,26 @@
1
1
  """$project_name: A Flower / TensorFlow app."""
2
2
 
3
+ from flwr.common import ndarrays_to_parameters
3
4
  from flwr.server import ServerApp, ServerConfig
4
5
  from flwr.server.strategy import FedAvg
5
6
 
7
+ from $import_name.task import load_model
8
+
6
9
  # Define config
7
10
  config = ServerConfig(num_rounds=3)
8
11
 
12
+ parameters = ndarrays_to_parameters(load_model().get_weights())
13
+
14
+ # Define strategy
9
15
  strategy = FedAvg(
10
16
  fraction_fit=1.0,
11
17
  fraction_evaluate=1.0,
12
18
  min_available_clients=2,
19
+ initial_parameters=parameters,
13
20
  )
14
21
 
15
- # Flower ServerApp
22
+
23
+ # Create ServerApp
16
24
  app = ServerApp(
17
25
  config=config,
18
26
  strategy=strategy,
@@ -0,0 +1,87 @@
1
+ """$project_name: A Flower / HuggingFace Transformers app."""
2
+
3
+ import warnings
4
+ from collections import OrderedDict
5
+
6
+ import torch
7
+ from evaluate import load as load_metric
8
+ from torch.optim import AdamW
9
+ from torch.utils.data import DataLoader
10
+ from transformers import AutoTokenizer, DataCollatorWithPadding
11
+
12
+ from flwr_datasets import FederatedDataset
13
+
14
+ warnings.filterwarnings("ignore", category=UserWarning)
15
+ DEVICE = torch.device("cpu")
16
+ CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint
17
+
18
+
19
+ def load_data(partition_id, num_clients):
20
+ """Load IMDB data (training and eval)"""
21
+ fds = FederatedDataset(dataset="imdb", partitioners={"train": num_clients})
22
+ partition = fds.load_partition(partition_id)
23
+ # Divide data: 80% train, 20% test
24
+ partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
25
+
26
+ tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
27
+
28
+ def tokenize_function(examples):
29
+ return tokenizer(examples["text"], truncation=True)
30
+
31
+ partition_train_test = partition_train_test.map(tokenize_function, batched=True)
32
+ partition_train_test = partition_train_test.remove_columns("text")
33
+ partition_train_test = partition_train_test.rename_column("label", "labels")
34
+
35
+ data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
36
+ trainloader = DataLoader(
37
+ partition_train_test["train"],
38
+ shuffle=True,
39
+ batch_size=32,
40
+ collate_fn=data_collator,
41
+ )
42
+
43
+ testloader = DataLoader(
44
+ partition_train_test["test"], batch_size=32, collate_fn=data_collator
45
+ )
46
+
47
+ return trainloader, testloader
48
+
49
+
50
+ def train(net, trainloader, epochs):
51
+ optimizer = AdamW(net.parameters(), lr=5e-5)
52
+ net.train()
53
+ for _ in range(epochs):
54
+ for batch in trainloader:
55
+ batch = {k: v.to(DEVICE) for k, v in batch.items()}
56
+ outputs = net(**batch)
57
+ loss = outputs.loss
58
+ loss.backward()
59
+ optimizer.step()
60
+ optimizer.zero_grad()
61
+
62
+
63
+ def test(net, testloader):
64
+ metric = load_metric("accuracy")
65
+ loss = 0
66
+ net.eval()
67
+ for batch in testloader:
68
+ batch = {k: v.to(DEVICE) for k, v in batch.items()}
69
+ with torch.no_grad():
70
+ outputs = net(**batch)
71
+ logits = outputs.logits
72
+ loss += outputs.loss.item()
73
+ predictions = torch.argmax(logits, dim=-1)
74
+ metric.add_batch(predictions=predictions, references=batch["labels"])
75
+ loss /= len(testloader.dataset)
76
+ accuracy = metric.compute()["accuracy"]
77
+ return loss, accuracy
78
+
79
+
80
+ def get_weights(net):
81
+ return [val.cpu().numpy() for _, val in net.state_dict().items()]
82
+
83
+
84
+ def set_weights(net, parameters):
85
+ params_dict = zip(net.state_dict().keys(), parameters)
86
+ state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
87
+ net.load_state_dict(state_dict, strict=True)
@@ -0,0 +1,89 @@
1
+ """$project_name: A Flower / MLX app."""
2
+
3
+ import mlx.core as mx
4
+ import mlx.nn as nn
5
+ import numpy as np
6
+ from datasets.utils.logging import disable_progress_bar
7
+ from flwr_datasets import FederatedDataset
8
+
9
+
10
+ disable_progress_bar()
11
+
12
+ class MLP(nn.Module):
13
+ """A simple MLP."""
14
+
15
+ def __init__(
16
+ self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int
17
+ ):
18
+ super().__init__()
19
+ layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim]
20
+ self.layers = [
21
+ nn.Linear(idim, odim)
22
+ for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:])
23
+ ]
24
+
25
+ def __call__(self, x):
26
+ for l in self.layers[:-1]:
27
+ x = mx.maximum(l(x), 0.0)
28
+ return self.layers[-1](x)
29
+
30
+
31
+ def loss_fn(model, X, y):
32
+ return mx.mean(nn.losses.cross_entropy(model(X), y))
33
+
34
+
35
+ def eval_fn(model, X, y):
36
+ return mx.mean(mx.argmax(model(X), axis=1) == y)
37
+
38
+
39
+ def batch_iterate(batch_size, X, y):
40
+ perm = mx.array(np.random.permutation(y.size))
41
+ for s in range(0, y.size, batch_size):
42
+ ids = perm[s : s + batch_size]
43
+ yield X[ids], y[ids]
44
+
45
+
46
+ def load_data(partition_id, num_clients):
47
+ fds = FederatedDataset(dataset="mnist", partitioners={"train": num_clients})
48
+ partition = fds.load_partition(partition_id)
49
+ partition_splits = partition.train_test_split(test_size=0.2, seed=42)
50
+
51
+ partition_splits["train"].set_format("numpy")
52
+ partition_splits["test"].set_format("numpy")
53
+
54
+ train_partition = partition_splits["train"].map(
55
+ lambda img: {
56
+ "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0
57
+ },
58
+ input_columns="image",
59
+ )
60
+ test_partition = partition_splits["test"].map(
61
+ lambda img: {
62
+ "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0
63
+ },
64
+ input_columns="image",
65
+ )
66
+
67
+ data = (
68
+ train_partition["img"],
69
+ train_partition["label"].astype(np.uint32),
70
+ test_partition["img"],
71
+ test_partition["label"].astype(np.uint32),
72
+ )
73
+
74
+ train_images, train_labels, test_images, test_labels = map(mx.array, data)
75
+ return train_images, train_labels, test_images, test_labels
76
+
77
+
78
+ def get_params(model):
79
+ layers = model.parameters()["layers"]
80
+ return [np.array(val) for layer in layers for _, val in layer.items()]
81
+
82
+
83
+ def set_params(model, parameters):
84
+ new_params = {}
85
+ new_params["layers"] = [
86
+ {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])}
87
+ for i in range(0, len(parameters), 2)
88
+ ]
89
+ model.update(new_params)
@@ -0,0 +1,29 @@
1
+ """$project_name: A Flower / TensorFlow app."""
2
+
3
+ import os
4
+
5
+ import tensorflow as tf
6
+ from flwr_datasets import FederatedDataset
7
+
8
+
9
+ # Make TensorFlow log less verbose
10
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
11
+
12
+ def load_model():
13
+ # Load model and data (MobileNetV2, CIFAR-10)
14
+ model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)
15
+ model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"])
16
+ return model
17
+
18
+
19
+ def load_data(partition_id, num_partitions):
20
+ # Download and partition dataset
21
+ fds = FederatedDataset(dataset="cifar10", partitioners={"train": num_partitions})
22
+ partition = fds.load_partition(partition_id, "train")
23
+ partition.set_format("numpy")
24
+
25
+ # Divide data on each node: 80% train, 20% test
26
+ partition = partition.train_test_split(test_size=0.2)
27
+ x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"]
28
+ x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"]
29
+ return x_train, y_train, x_test, y_test
@@ -0,0 +1,31 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "$package_name"
7
+ version = "1.0.0"
8
+ description = ""
9
+ authors = [
10
+ { name = "The Flower Authors", email = "hello@flower.ai" },
11
+ ]
12
+ license = { text = "Apache License (2.0)" }
13
+ dependencies = [
14
+ "flwr[simulation]>=1.8.0,<2.0",
15
+ "flwr-datasets>=0.0.2,<1.0.0",
16
+ "torch==2.2.1",
17
+ "transformers>=4.30.0,<5.0"
18
+ "evaluate>=0.4.0,<1.0"
19
+ "datasets>=2.0.0, <3.0"
20
+ "scikit-learn>=1.3.1, <2.0"
21
+ ]
22
+
23
+ [tool.hatch.build.targets.wheel]
24
+ packages = ["."]
25
+
26
+ [flower]
27
+ publisher = "$username"
28
+
29
+ [flower.components]
30
+ serverapp = "$import_name.server:app"
31
+ clientapp = "$import_name.client:app"
@@ -0,0 +1,28 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "$package_name"
7
+ version = "1.0.0"
8
+ description = ""
9
+ authors = [
10
+ { name = "The Flower Authors", email = "hello@flower.ai" },
11
+ ]
12
+ license = { text = "Apache License (2.0)" }
13
+ dependencies = [
14
+ "flwr[simulation]>=1.8.0,<2.0",
15
+ "flwr-datasets[vision]>=0.0.2,<1.0.0",
16
+ "mlx==0.10.0",
17
+ "numpy==1.24.4",
18
+ ]
19
+
20
+ [tool.hatch.build.targets.wheel]
21
+ packages = ["."]
22
+
23
+ [flower]
24
+ publisher = "$username"
25
+
26
+ [flower.components]
27
+ serverapp = "$import_name.server:app"
28
+ clientapp = "$import_name.client:app"
@@ -3,13 +3,13 @@ requires = ["hatchling"]
3
3
  build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
- name = "$project_name"
6
+ name = "$package_name"
7
7
  version = "1.0.0"
8
8
  description = ""
9
9
  authors = [
10
10
  { name = "The Flower Authors", email = "hello@flower.ai" },
11
11
  ]
12
- license = {text = "Apache License (2.0)"}
12
+ license = { text = "Apache License (2.0)" }
13
13
  dependencies = [
14
14
  "flwr[simulation]>=1.8.0,<2.0",
15
15
  "numpy>=1.21.0",
@@ -18,6 +18,9 @@ dependencies = [
18
18
  [tool.hatch.build.targets.wheel]
19
19
  packages = ["."]
20
20
 
21
+ [flower]
22
+ publisher = "$username"
23
+
21
24
  [flower.components]
22
- serverapp = "$project_name.server:app"
23
- clientapp = "$project_name.client:app"
25
+ serverapp = "$import_name.server:app"
26
+ clientapp = "$import_name.client:app"
@@ -3,13 +3,13 @@ requires = ["hatchling"]
3
3
  build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
- name = "$project_name"
6
+ name = "$package_name"
7
7
  version = "1.0.0"
8
8
  description = ""
9
9
  authors = [
10
10
  { name = "The Flower Authors", email = "hello@flower.ai" },
11
11
  ]
12
- license = {text = "Apache License (2.0)"}
12
+ license = { text = "Apache License (2.0)" }
13
13
  dependencies = [
14
14
  "flwr[simulation]>=1.8.0,<2.0",
15
15
  "flwr-datasets[vision]>=0.0.2,<1.0.0",
@@ -20,6 +20,9 @@ dependencies = [
20
20
  [tool.hatch.build.targets.wheel]
21
21
  packages = ["."]
22
22
 
23
+ [flower]
24
+ publisher = "$username"
25
+
23
26
  [flower.components]
24
- serverapp = "$project_name.server:app"
25
- clientapp = "$project_name.client:app"
27
+ serverapp = "$import_name.server:app"
28
+ clientapp = "$import_name.client:app"
@@ -0,0 +1,27 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "$package_name"
7
+ version = "1.0.0"
8
+ description = ""
9
+ authors = [
10
+ { name = "The Flower Authors", email = "hello@flower.ai" },
11
+ ]
12
+ license = { text = "Apache License (2.0)" }
13
+ dependencies = [
14
+ "flwr[simulation]>=1.8.0,<2.0",
15
+ "flwr-datasets[vision]>=0.0.2,<1.0.0",
16
+ "scikit-learn>=1.1.1",
17
+ ]
18
+
19
+ [tool.hatch.build.targets.wheel]
20
+ packages = ["."]
21
+
22
+ [flower]
23
+ publisher = "$username"
24
+
25
+ [flower.components]
26
+ serverapp = "$import_name.server:app"
27
+ clientapp = "$import_name.client:app"
@@ -3,13 +3,13 @@ requires = ["hatchling"]
3
3
  build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
- name = "$project_name"
6
+ name = "$package_name"
7
7
  version = "1.0.0"
8
8
  description = ""
9
9
  authors = [
10
10
  { name = "The Flower Authors", email = "hello@flower.ai" },
11
11
  ]
12
- license = {text = "Apache License (2.0)"}
12
+ license = { text = "Apache License (2.0)" }
13
13
  dependencies = [
14
14
  "flwr[simulation]>=1.8.0,<2.0",
15
15
  "flwr-datasets[vision]>=0.0.2,<1.0.0",
@@ -19,6 +19,9 @@ dependencies = [
19
19
  [tool.hatch.build.targets.wheel]
20
20
  packages = ["."]
21
21
 
22
+ [flower]
23
+ publisher = "$username"
24
+
22
25
  [flower.components]
23
- serverapp = "$project_name.server:app"
24
- clientapp = "$project_name.client:app"
26
+ serverapp = "$import_name.server:app"
27
+ clientapp = "$import_name.client:app"
flwr/cli/run/run.py CHANGED
@@ -30,7 +30,7 @@ def run() -> None:
30
30
 
31
31
  if config is None:
32
32
  typer.secho(
33
- "Project configuration could not be loaded.\nflower.toml is invalid:\n"
33
+ "Project configuration could not be loaded.\npyproject.toml is invalid:\n"
34
34
  + "\n".join([f"- {line}" for line in errors]),
35
35
  fg=typer.colors.RED,
36
36
  bold=True,
flwr/cli/utils.py CHANGED
@@ -14,6 +14,7 @@
14
14
  # ==============================================================================
15
15
  """Flower command line interface utils."""
16
16
 
17
+ import re
17
18
  from typing import Callable, List, Optional, cast
18
19
 
19
20
  import typer
@@ -73,51 +74,51 @@ def prompt_options(text: str, options: List[str]) -> str:
73
74
 
74
75
 
75
76
  def is_valid_project_name(name: str) -> bool:
76
- """Check if the given string is a valid Python module name.
77
+ """Check if the given string is a valid Python project name.
77
78
 
78
- A valid module name must start with a letter or an underscore, and can only contain
79
- letters, digits, and underscores.
79
+ A valid project name must start with a letter and can only contain letters, digits,
80
+ and hyphens.
80
81
  """
81
82
  if not name:
82
83
  return False
83
84
 
84
- # Check if the first character is a letter or underscore
85
- if not (name[0].isalpha() or name[0] == "_"):
85
+ # Check if the first character is a letter
86
+ if not name[0].isalpha():
86
87
  return False
87
88
 
88
- # Check if the rest of the characters are valid (letter, digit, or underscore)
89
+ # Check if the rest of the characters are valid (letter, digit, or dash)
89
90
  for char in name[1:]:
90
- if not (char.isalnum() or char == "_"):
91
+ if not (char.isalnum() or char in "-"):
91
92
  return False
92
93
 
93
94
  return True
94
95
 
95
96
 
96
97
  def sanitize_project_name(name: str) -> str:
97
- """Sanitize the given string to make it a valid Python module name.
98
+ """Sanitize the given string to make it a valid Python project name.
98
99
 
99
- This version replaces hyphens with underscores, removes any characters not allowed
100
- in Python module names, makes the string lowercase, and ensures it starts with a
101
- valid character.
100
+ This version replaces spaces, dots, slashes, and underscores with dashes, removes
101
+ any characters not allowed in Python project names, makes the string lowercase, and
102
+ ensures it starts with a valid character.
102
103
  """
103
- # Replace '-' with '_'
104
- name_with_underscores = name.replace("-", "_").replace(" ", "_")
104
+ # Replace whitespace with '_'
105
+ name_with_hyphens = re.sub(r"[ ./_]", "-", name)
105
106
 
106
107
  # Allowed characters in a module name: letters, digits, underscore
107
108
  allowed_chars = set(
108
- "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
109
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-"
109
110
  )
110
111
 
111
112
  # Make the string lowercase
112
- sanitized_name = name_with_underscores.lower()
113
+ sanitized_name = name_with_hyphens.lower()
113
114
 
114
115
  # Remove any characters not allowed in Python module names
115
116
  sanitized_name = "".join(c for c in sanitized_name if c in allowed_chars)
116
117
 
117
118
  # Ensure the first character is a letter or underscore
118
- if sanitized_name and (
119
+ while sanitized_name and (
119
120
  sanitized_name[0].isdigit() or sanitized_name[0] not in allowed_chars
120
121
  ):
121
- sanitized_name = "_" + sanitized_name
122
+ sanitized_name = sanitized_name[1:]
122
123
 
123
124
  return sanitized_name
flwr/client/__init__.py CHANGED
@@ -15,12 +15,12 @@
15
15
  """Flower client."""
16
16
 
17
17
 
18
- from .app import run_client_app as run_client_app
19
18
  from .app import start_client as start_client
20
19
  from .app import start_numpy_client as start_numpy_client
21
20
  from .client import Client as Client
22
21
  from .client_app import ClientApp as ClientApp
23
22
  from .numpy_client import NumPyClient as NumPyClient
23
+ from .supernode import run_client_app as run_client_app
24
24
  from .supernode import run_supernode as run_supernode
25
25
  from .typing import ClientFn as ClientFn
26
26