flwr-nightly 1.10.0.dev20240620__py3-none-any.whl → 1.10.0.dev20240621__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flwr-nightly might be problematic. Click here for more details.

flwr/cli/new/new.py CHANGED
@@ -41,6 +41,16 @@ class MlFramework(str, Enum):
41
41
  HUGGINGFACE = "HF"
42
42
  MLX = "MLX"
43
43
  SKLEARN = "sklearn"
44
+ FLOWERTUNE = "FlowerTune"
45
+
46
+
47
+ class LlmChallengeName(str, Enum):
48
+ """Available LLM challenges."""
49
+
50
+ GENERALNLP = "GeneralNLP"
51
+ FINANCE = "Finance"
52
+ MEDICAL = "Medical"
53
+ CODE = "Code"
44
54
 
45
55
 
46
56
  class TemplateNotFound(Exception):
@@ -81,6 +91,7 @@ def render_and_create(file_path: str, template: str, context: Dict[str, str]) ->
81
91
  create_file(file_path, content)
82
92
 
83
93
 
94
+ # pylint: disable=too-many-locals,too-many-branches,too-many-statements
84
95
  def new(
85
96
  project_name: Annotated[
86
97
  Optional[str],
@@ -125,6 +136,19 @@ def new(
125
136
 
126
137
  framework_str = framework_str.lower()
127
138
 
139
+ if framework_str == "flowertune":
140
+ llm_challenge_value = prompt_options(
141
+ "Please select LLM challenge by typing in the number",
142
+ sorted([challenge.value for challenge in LlmChallengeName]),
143
+ )
144
+ selected_value = [
145
+ name
146
+ for name, value in vars(LlmChallengeName).items()
147
+ if value == llm_challenge_value
148
+ ]
149
+ llm_challenge_str = selected_value[0]
150
+ llm_challenge_str = llm_challenge_str.lower()
151
+
128
152
  print(
129
153
  typer.style(
130
154
  f"\n🔨 Creating Flower project {project_name}...",
@@ -139,33 +163,6 @@ def new(
139
163
  import_name = package_name.replace("-", "_")
140
164
  project_dir = os.path.join(cwd, package_name)
141
165
 
142
- # List of files to render
143
- files = {
144
- ".gitignore": {"template": "app/.gitignore.tpl"},
145
- "README.md": {"template": "app/README.md.tpl"},
146
- "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"},
147
- f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"},
148
- f"{import_name}/server.py": {
149
- "template": f"app/code/server.{framework_str}.py.tpl"
150
- },
151
- f"{import_name}/client.py": {
152
- "template": f"app/code/client.{framework_str}.py.tpl"
153
- },
154
- }
155
-
156
- # Depending on the framework, generate task.py file
157
- frameworks_with_tasks = [
158
- MlFramework.PYTORCH.value.lower(),
159
- MlFramework.JAX.value.lower(),
160
- MlFramework.HUGGINGFACE.value.lower(),
161
- MlFramework.MLX.value.lower(),
162
- MlFramework.TENSORFLOW.value.lower(),
163
- ]
164
- if framework_str in frameworks_with_tasks:
165
- files[f"{import_name}/task.py"] = {
166
- "template": f"app/code/task.{framework_str}.py.tpl"
167
- }
168
-
169
166
  context = {
170
167
  "project_name": project_name,
171
168
  "package_name": package_name,
@@ -173,6 +170,85 @@ def new(
173
170
  "username": username,
174
171
  }
175
172
 
173
+ # List of files to render
174
+ if framework_str == "flowertune":
175
+ files = {
176
+ ".gitignore": {"template": "app/.gitignore.tpl"},
177
+ "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"},
178
+ "README.md": {"template": f"app/README.{framework_str}.md.tpl"},
179
+ f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"},
180
+ f"{import_name}/server.py": {
181
+ "template": "app/code/flwr_tune/server.py.tpl"
182
+ },
183
+ f"{import_name}/client.py": {
184
+ "template": "app/code/flwr_tune/client.py.tpl"
185
+ },
186
+ f"{import_name}/app.py": {"template": "app/code/flwr_tune/app.py.tpl"},
187
+ f"{import_name}/models.py": {
188
+ "template": "app/code/flwr_tune/models.py.tpl"
189
+ },
190
+ f"{import_name}/dataset.py": {
191
+ "template": "app/code/flwr_tune/dataset.py.tpl"
192
+ },
193
+ f"{import_name}/conf/config.yaml": {
194
+ "template": "app/code/flwr_tune/config.yaml.tpl"
195
+ },
196
+ f"{import_name}/conf/static_config.yaml": {
197
+ "template": "app/code/flwr_tune/static_config.yaml.tpl"
198
+ },
199
+ }
200
+
201
+ # Challenge specific context
202
+ fraction_fit = "0.2" if llm_challenge_str == "code" else "0.1"
203
+ if llm_challenge_str == "generalnlp":
204
+ challenge_name = "General NLP"
205
+ num_clients = "20"
206
+ dataset_name = "vicgalle/alpaca-gpt4"
207
+ elif llm_challenge_str == "finance":
208
+ challenge_name = "Finance"
209
+ num_clients = "50"
210
+ dataset_name = "FinGPT/fingpt-sentiment-train"
211
+ elif llm_challenge_str == "medical":
212
+ challenge_name = "Medical"
213
+ num_clients = "20"
214
+ dataset_name = "medalpaca/medical_meadow_medical_flashcards"
215
+ else:
216
+ challenge_name = "Code"
217
+ num_clients = "10"
218
+ dataset_name = "lucasmccabe-lmi/CodeAlpaca-20k"
219
+
220
+ context["llm_challenge_str"] = llm_challenge_str
221
+ context["fraction_fit"] = fraction_fit
222
+ context["challenge_name"] = challenge_name
223
+ context["num_clients"] = num_clients
224
+ context["dataset_name"] = dataset_name
225
+ else:
226
+ files = {
227
+ ".gitignore": {"template": "app/.gitignore.tpl"},
228
+ "README.md": {"template": "app/README.md.tpl"},
229
+ "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"},
230
+ f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"},
231
+ f"{import_name}/server.py": {
232
+ "template": f"app/code/server.{framework_str}.py.tpl"
233
+ },
234
+ f"{import_name}/client.py": {
235
+ "template": f"app/code/client.{framework_str}.py.tpl"
236
+ },
237
+ }
238
+
239
+ # Depending on the framework, generate task.py file
240
+ frameworks_with_tasks = [
241
+ MlFramework.PYTORCH.value.lower(),
242
+ MlFramework.JAX.value.lower(),
243
+ MlFramework.HUGGINGFACE.value.lower(),
244
+ MlFramework.MLX.value.lower(),
245
+ MlFramework.TENSORFLOW.value.lower(),
246
+ ]
247
+ if framework_str in frameworks_with_tasks:
248
+ files[f"{import_name}/task.py"] = {
249
+ "template": f"app/code/task.{framework_str}.py.tpl"
250
+ }
251
+
176
252
  for file_path, value in files.items():
177
253
  render_and_create(
178
254
  file_path=os.path.join(project_dir, file_path),
@@ -0,0 +1,56 @@
1
+ # FlowerTune LLM on $challenge_name Dataset
2
+
3
+ This directory conducts federated instruction tuning with a pretrained [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.3) model on a [$challenge_name dataset](https://huggingface.co/datasets/$dataset_name).
4
+ We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset.
5
+ Flower's Simulation Engine is used to simulate the LLM fine-tuning process in federated way,
6
+ which allows users to perform the training on a single GPU.
7
+
8
+
9
+ ## Methodology
10
+
11
+ This baseline performs federated LLM fine-tuning with [LoRA](https://arxiv.org/pdf/2106.09685) using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library.
12
+ The clients' models are aggregated with FedAvg strategy.
13
+ This provides a baseline performance for the leaderboard of $challenge_name challenge.
14
+
15
+
16
+ ## Environments setup
17
+
18
+ Project dependencies are defined in `pyproject.toml`. Install them in an activated Python environment with:
19
+
20
+ ```shell
21
+ pip install -e .
22
+ ```
23
+
24
+ ## Experimental setup
25
+
26
+ The dataset is partitioned into $num_clients shards with IID fashion serving as clients.
27
+ We randomly sample $fraction_fit clients to be available for each round,
28
+ and the federated fine-tuning lasts for `200` rounds.
29
+ All settings are defined in `$project_name/conf/static_config.yaml`, which is not allowed to be modified for fair competition if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard).
30
+
31
+
32
+ ## Running the challenge
33
+
34
+ First make sure that you have got the access to [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.3) model with your Hugging-Face account. You can request access directly from the Hugging-Face website.
35
+ Then, follow the instruction [here](https://huggingface.co/docs/huggingface_hub/en/quick-start#login-command) to log in your account. Note you only need to complete this stage once in your development machine:
36
+
37
+ ```bash
38
+ huggingface-cli login
39
+ ```
40
+
41
+ Run the challenge with default config values.
42
+ The configs are in `$project_name/conf/config.yaml` and `$project_name/conf/static_config.yaml`, and are loaded automatically.
43
+
44
+ ```bash
45
+ flwr run
46
+ ```
47
+
48
+ ## VRAM consumption
49
+
50
+ We use Mistral-7B model with 4-bit quantization as default. The estimated VRAM consumption per client for each challenge is shown below:
51
+
52
+ | Challenges | GeneralNLP | Finance | Medical | Code |
53
+ | :--------: | :--------: | :--------: | :--------: | :--------: |
54
+ | VRAM | ~25.50 GB | ~17.30 GB | ~22.80 GB | ~17.40 GB |
55
+
56
+ You can adjust the CPU/GPU resources you assign to each of the clients based on your device, which is specified with `flower.engine.simulation` in `pyproject.toml`.
@@ -0,0 +1,15 @@
1
+ # Copyright 2024 Flower Labs GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Flower CLI `new` command app / code / flwr_tune templates."""
@@ -0,0 +1,86 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ import os
4
+ import warnings
5
+ from datetime import datetime
6
+
7
+ from flwr_datasets import FederatedDataset
8
+ from hydra import compose, initialize
9
+ from hydra.utils import instantiate
10
+
11
+ from flwr.client import ClientApp
12
+ from flwr.common import ndarrays_to_parameters
13
+ from flwr.server import ServerApp, ServerConfig
14
+
15
+ from $import_name.client import gen_client_fn, get_parameters
16
+ from $import_name.dataset import get_tokenizer_and_data_collator_and_propt_formatting
17
+ from $import_name.models import get_model
18
+ from $import_name.server import fit_weighted_average, get_evaluate_fn, get_on_fit_config
19
+
20
+ # Avoid warnings
21
+ warnings.filterwarnings("ignore", category=UserWarning)
22
+ os.environ["TOKENIZERS_PARALLELISM"] = "true"
23
+ os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1"
24
+
25
+ # Initialise regular config
26
+ with initialize(config_path="conf", version_base="1.1"):
27
+ cfg = compose(config_name="config")
28
+
29
+ # Initialise static config
30
+ with initialize(config_path="conf", version_base="1.1"):
31
+ cfg_static = compose(config_name="static_config")
32
+
33
+ cfg.train.num_rounds = cfg_static.num_rounds
34
+
35
+ # Create output directory given current timestamp
36
+ current_time = datetime.now()
37
+ folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S")
38
+ save_path = os.path.join(os.getcwd(), f"results/{folder_name}")
39
+ os.makedirs(save_path, exist_ok=True)
40
+
41
+ # Partition dataset and get dataloaders
42
+ partitioner = instantiate(cfg_static.partitioner)
43
+ fds = FederatedDataset(
44
+ dataset=cfg_static.dataset.name, partitioners={"train": partitioner}
45
+ )
46
+ (
47
+ tokenizer,
48
+ data_collator,
49
+ formatting_prompts_func,
50
+ ) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name)
51
+
52
+ # ClientApp for Flower Next
53
+ client = ClientApp(
54
+ client_fn=gen_client_fn(
55
+ fds,
56
+ tokenizer,
57
+ formatting_prompts_func,
58
+ data_collator,
59
+ cfg.model,
60
+ cfg.train,
61
+ save_path,
62
+ ),
63
+ )
64
+
65
+ # Get initial model weights
66
+ init_model = get_model(cfg.model)
67
+ init_model_parameters = get_parameters(init_model)
68
+ init_model_parameters = ndarrays_to_parameters(init_model_parameters)
69
+
70
+ # Instantiate strategy according to config. Here we pass other arguments
71
+ # that are only defined at runtime.
72
+ strategy = instantiate(
73
+ cfg.strategy,
74
+ on_fit_config_fn=get_on_fit_config(),
75
+ fit_metrics_aggregation_fn=fit_weighted_average,
76
+ initial_parameters=init_model_parameters,
77
+ evaluate_fn=get_evaluate_fn(
78
+ cfg.model, cfg.train.save_every_round, cfg_static.num_rounds, save_path
79
+ ),
80
+ )
81
+
82
+ # ServerApp for Flower Next
83
+ server = ServerApp(
84
+ config=ServerConfig(num_rounds=cfg_static.num_rounds),
85
+ strategy=strategy,
86
+ )
@@ -0,0 +1,124 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ from collections import OrderedDict
4
+ from typing import Callable, Dict, Tuple
5
+
6
+ import torch
7
+ from omegaconf import DictConfig
8
+ from peft import get_peft_model_state_dict, set_peft_model_state_dict
9
+ from transformers import TrainingArguments
10
+ from trl import SFTTrainer
11
+
12
+ from flwr.client import NumPyClient
13
+ from flwr.common.typing import NDArrays, Scalar
14
+ from $import_name.dataset import reformat
15
+ from $import_name.models import cosine_annealing, get_model
16
+
17
+
18
+ # pylint: disable=too-many-arguments
19
+ # pylint: disable=too-many-instance-attributes
20
+ class FlowerClient(NumPyClient):
21
+ """Standard Flower client for CNN training."""
22
+
23
+ def __init__(
24
+ self,
25
+ model_cfg: DictConfig,
26
+ train_cfg: DictConfig,
27
+ trainset,
28
+ tokenizer,
29
+ formatting_prompts_func,
30
+ data_collator,
31
+ save_path,
32
+ ): # pylint: disable=too-many-arguments
33
+ self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
34
+ self.train_cfg = train_cfg
35
+ self.training_argumnets = TrainingArguments(**train_cfg.training_arguments)
36
+ self.tokenizer = tokenizer
37
+ self.formatting_prompts_func = formatting_prompts_func
38
+ self.data_collator = data_collator
39
+ self.save_path = save_path
40
+
41
+ # instantiate model
42
+ self.model = get_model(model_cfg)
43
+
44
+ self.trainset = trainset
45
+
46
+ def fit(
47
+ self, parameters: NDArrays, config: Dict[str, Scalar]
48
+ ) -> Tuple[NDArrays, int, Dict]:
49
+ """Implement distributed fit function for a given client."""
50
+ set_parameters(self.model, parameters)
51
+
52
+ new_lr = cosine_annealing(
53
+ int(config["current_round"]),
54
+ self.train_cfg.num_rounds,
55
+ self.train_cfg.learning_rate_max,
56
+ self.train_cfg.learning_rate_min,
57
+ )
58
+
59
+ self.training_argumnets.learning_rate = new_lr
60
+ self.training_argumnets.output_dir = self.save_path
61
+
62
+ # Construct trainer
63
+ trainer = SFTTrainer(
64
+ model=self.model,
65
+ tokenizer=self.tokenizer,
66
+ args=self.training_argumnets,
67
+ max_seq_length=self.train_cfg.seq_length,
68
+ train_dataset=self.trainset,
69
+ formatting_func=self.formatting_prompts_func,
70
+ data_collator=self.data_collator,
71
+ )
72
+
73
+ # Do local training
74
+ results = trainer.train()
75
+
76
+ return (
77
+ get_parameters(self.model),
78
+ len(self.trainset),
79
+ {"train_loss": results.training_loss},
80
+ )
81
+
82
+
83
+ def set_parameters(model, parameters: NDArrays) -> None:
84
+ """Change the parameters of the model using the given ones."""
85
+ peft_state_dict_keys = get_peft_model_state_dict(model).keys()
86
+ params_dict = zip(peft_state_dict_keys, parameters)
87
+ state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
88
+ set_peft_model_state_dict(model, state_dict)
89
+
90
+
91
+ def get_parameters(model) -> NDArrays:
92
+ """Return the parameters of the current net."""
93
+ state_dict = get_peft_model_state_dict(model)
94
+ return [val.cpu().numpy() for _, val in state_dict.items()]
95
+
96
+
97
+ def gen_client_fn(
98
+ fds,
99
+ tokenizer,
100
+ formatting_prompts_func,
101
+ data_collator,
102
+ model_cfg: DictConfig,
103
+ train_cfg: DictConfig,
104
+ save_path: str,
105
+ ) -> Callable[[str], FlowerClient]: # pylint: disable=too-many-arguments
106
+ """Generate the client function that creates the Flower Clients."""
107
+
108
+ def client_fn(cid: str) -> FlowerClient:
109
+ """Create a Flower client representing a single organization."""
110
+ # Let's get the partition corresponding to the i-th client
111
+ client_trainset = fds.load_partition(int(cid), "train")
112
+ client_trainset = reformat(client_trainset, llm_task="$llm_challenge_str")
113
+
114
+ return FlowerClient(
115
+ model_cfg,
116
+ train_cfg,
117
+ client_trainset,
118
+ tokenizer,
119
+ formatting_prompts_func,
120
+ data_collator,
121
+ save_path,
122
+ ).to_client()
123
+
124
+ return client_fn
@@ -0,0 +1,34 @@
1
+ # Federated Instruction Tuning
2
+ ---
3
+ model:
4
+ name: "mistralai/Mistral-7B-v0.3"
5
+ quantization: 4 # 8 or 4 if you want to do quantization with BitsAndBytes
6
+ gradient_checkpointing: True
7
+ lora:
8
+ peft_lora_r: 32
9
+ peft_lora_alpha: 64
10
+
11
+ train:
12
+ num_rounds: null
13
+ save_every_round: 5
14
+ learning_rate_max: 5e-5
15
+ learning_rate_min: 1e-6
16
+ seq_length: 512
17
+ training_arguments:
18
+ output_dir: null # to be set by hydra
19
+ learning_rate: null # to be set by the client
20
+ per_device_train_batch_size: 16
21
+ gradient_accumulation_steps: 1
22
+ logging_steps: 10
23
+ num_train_epochs: 3
24
+ max_steps: 10
25
+ report_to: null
26
+ save_steps: 1000
27
+ save_total_limit: 10
28
+ gradient_checkpointing: True
29
+ lr_scheduler_type: "constant"
30
+
31
+ strategy:
32
+ _target_: flwr.server.strategy.FedAvg
33
+ fraction_fit: $fraction_fit
34
+ fraction_evaluate: 0.0 # no client evaluation
@@ -0,0 +1,57 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ from transformers import AutoTokenizer
4
+ from trl import DataCollatorForCompletionOnlyLM
5
+
6
+
7
+ def formatting_prompts_func(example):
8
+ """Construct prompts."""
9
+ output_texts = []
10
+ # Constructing a standard Alpaca
11
+ # (https://github.com/tatsu-lab/stanford_alpaca#data-release) prompt
12
+ mssg = (
13
+ "Below is an instruction that describes a task. "
14
+ "Write a response that appropriately completes the request."
15
+ )
16
+ for i in range(len(example["instruction"])):
17
+ text = (
18
+ f"{mssg}\n### Instruction:\n{example['instruction'][i]}\n"
19
+ f"### Response: {example['response'][i]}"
20
+ )
21
+ output_texts.append(text)
22
+ return output_texts
23
+
24
+
25
+ def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str):
26
+ """Get tokenizer, data_collator and prompt formatting."""
27
+ # From: https://huggingface.co/docs/trl/en/sft_trainer
28
+ tokenizer = AutoTokenizer.from_pretrained(
29
+ model_name, use_fast=True, padding_side="right"
30
+ )
31
+ tokenizer.pad_token = tokenizer.eos_token
32
+ response_template_with_context = "\n### Response:" # alpaca response tag
33
+ response_template_ids = tokenizer.encode(
34
+ response_template_with_context, add_special_tokens=False
35
+ )[2:]
36
+ data_collator = DataCollatorForCompletionOnlyLM(
37
+ response_template_ids, tokenizer=tokenizer
38
+ )
39
+
40
+ return tokenizer, data_collator, formatting_prompts_func
41
+
42
+
43
+ def formatting(dataset):
44
+ """Format dataset."""
45
+ dataset["instruction"] = dataset["instruction"] + " " + dataset["input"]
46
+ return dataset
47
+
48
+
49
+ def reformat(dataset, llm_task):
50
+ """Reformat datasets."""
51
+ dataset = dataset.rename_column("output", "response")
52
+ if llm_task == "finance" or llm_task == "code":
53
+ dataset = dataset.map(formatting, remove_columns=["input"])
54
+ if llm_task == "medical":
55
+ dataset = dataset.remove_columns(["instruction"])
56
+ dataset = dataset.rename_column("input", "instruction")
57
+ return dataset
@@ -0,0 +1,59 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ import math
4
+
5
+ import torch
6
+ from omegaconf import DictConfig
7
+ from peft import LoraConfig, get_peft_model
8
+ from peft.utils import prepare_model_for_kbit_training
9
+ from transformers import AutoModelForCausalLM, BitsAndBytesConfig
10
+
11
+
12
+ def cosine_annealing(
13
+ current_round: int,
14
+ total_round: int,
15
+ lrate_max: float = 0.001,
16
+ lrate_min: float = 0.0,
17
+ ) -> float:
18
+ """Implement cosine annealing learning rate schedule."""
19
+ cos_inner = math.pi * current_round / total_round
20
+ return lrate_min + 0.5 * (lrate_max - lrate_min) * (1 + math.cos(cos_inner))
21
+
22
+
23
+ def get_model(model_cfg: DictConfig):
24
+ """Load model with appropriate quantization config and other optimizations.
25
+
26
+ Please refer to this example for `peft + BitsAndBytes`:
27
+ https://github.com/huggingface/peft/blob/main/examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py
28
+ """
29
+ if model_cfg.quantization == 4:
30
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
31
+ elif model_cfg.quantization == 8:
32
+ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
33
+ else:
34
+ raise ValueError(
35
+ f"Use 4-bit or 8-bit quantization. You passed: {model_cfg.quantization}/"
36
+ )
37
+
38
+ model = AutoModelForCausalLM.from_pretrained(
39
+ model_cfg.name,
40
+ quantization_config=quantization_config,
41
+ torch_dtype=torch.bfloat16,
42
+ low_cpu_mem_usage=True,
43
+ )
44
+
45
+ model = prepare_model_for_kbit_training(
46
+ model, use_gradient_checkpointing=model_cfg.gradient_checkpointing
47
+ )
48
+
49
+ peft_config = LoraConfig(
50
+ r=model_cfg.lora.peft_lora_r,
51
+ lora_alpha=model_cfg.lora.peft_lora_alpha,
52
+ lora_dropout=0.075,
53
+ task_type="CAUSAL_LM",
54
+ )
55
+
56
+ if model_cfg.gradient_checkpointing:
57
+ model.config.use_cache = False
58
+
59
+ return get_peft_model(model, peft_config)
@@ -0,0 +1,48 @@
1
+ """$project_name: A Flower / FlowerTune app."""
2
+
3
+ from $import_name.client import set_parameters
4
+ from $import_name.models import get_model
5
+
6
+
7
+ # Get function that will be executed by the strategy's evaluate() method
8
+ # Here we use it to save global model checkpoints
9
+ def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path):
10
+ """Return an evaluation function for saving global model."""
11
+
12
+ def evaluate(server_round: int, parameters, config):
13
+ # Save model
14
+ if server_round != 0 and (
15
+ server_round == total_round or server_round % save_every_round == 0
16
+ ):
17
+ # Init model
18
+ model = get_model(model_cfg)
19
+ set_parameters(model, parameters)
20
+
21
+ model.save_pretrained(f"{save_path}/peft_{server_round}")
22
+
23
+ return 0.0, {}
24
+
25
+ return evaluate
26
+
27
+
28
+ def get_on_fit_config():
29
+ """
30
+ Return a function that will be used to construct the config
31
+ that the client's fit() method will receive.
32
+ """
33
+
34
+ def fit_config_fn(server_round: int):
35
+ fit_config = {"current_round": server_round}
36
+ return fit_config
37
+
38
+ return fit_config_fn
39
+
40
+
41
+ def fit_weighted_average(metrics):
42
+ """Aggregate (federated) evaluation metrics."""
43
+ # Multiply accuracy of each client by number of examples used
44
+ losses = [num_examples * m["train_loss"] for num_examples, m in metrics]
45
+ examples = [num_examples for num_examples, _ in metrics]
46
+
47
+ # Aggregate and return custom metric (weighted average)
48
+ return {"train_loss": sum(losses) / sum(examples)}
@@ -0,0 +1,11 @@
1
+ # Federated Instruction Tuning (static)
2
+ ---
3
+ dataset:
4
+ name: $dataset_name
5
+
6
+ # FL experimental settings
7
+ num_clients: $num_clients # total number of clients
8
+ num_rounds: 200
9
+ partitioner:
10
+ _target_: flwr_datasets.partitioner.IidPartitioner
11
+ num_partitions: $num_clients
@@ -0,0 +1,42 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "$package_name"
7
+ version = "1.0.0"
8
+ description = ""
9
+ authors = [
10
+ { name = "The Flower Authors", email = "hello@flower.ai" },
11
+ ]
12
+ license = { text = "Apache License (2.0)" }
13
+ dependencies = [
14
+ "flwr[simulation]>=1.9.0,<2.0",
15
+ "flwr-datasets>=0.1.0,<1.0.0",
16
+ "hydra-core==1.3.2",
17
+ "trl==0.8.1",
18
+ "bitsandbytes==0.43.0",
19
+ "scipy==1.13.0",
20
+ "peft==0.6.2",
21
+ "transformers==4.39.3",
22
+ "sentencepiece==0.2.0",
23
+ ]
24
+
25
+ [tool.hatch.build.targets.wheel]
26
+ packages = ["."]
27
+
28
+ [flower]
29
+ publisher = "$username"
30
+
31
+ [flower.components]
32
+ serverapp = "$import_name.app:server"
33
+ clientapp = "$import_name.app:client"
34
+
35
+ [flower.engine]
36
+ name = "simulation"
37
+
38
+ [flower.engine.simulation.supernode]
39
+ num = $num_clients
40
+
41
+ [flower.engine.simulation]
42
+ backend_config = { client_resources = { num_cpus = 8, num_gpus = 1.0 } }
@@ -22,6 +22,7 @@ from pathlib import Path
22
22
  from typing import Optional
23
23
 
24
24
  from flwr.common import Context, EventType, RecordSet, event
25
+ from flwr.common.config import get_flwr_dir, get_project_config, get_project_dir
25
26
  from flwr.common.logger import log, update_console_handler, warn_deprecated_feature
26
27
  from flwr.common.object_ref import load_app
27
28
  from flwr.proto.driver_pb2 import CreateRunRequest # pylint: disable=E0611
@@ -43,7 +44,7 @@ def run(
43
44
  if not (server_app_attr is None) ^ (loaded_server_app is None):
44
45
  raise ValueError(
45
46
  "Either `server_app_attr` or `loaded_server_app` should be set "
46
- "but not both. "
47
+ "but not both."
47
48
  )
48
49
 
49
50
  if server_app_dir is not None:
@@ -76,7 +77,7 @@ def run(
76
77
  log(DEBUG, "ServerApp finished running.")
77
78
 
78
79
 
79
- def run_server_app() -> None:
80
+ def run_server_app() -> None: # pylint: disable=too-many-branches
80
81
  """Run Flower server app."""
81
82
  event(EventType.RUN_SERVER_APP_ENTER)
82
83
 
@@ -136,11 +137,43 @@ def run_server_app() -> None:
136
137
  cert_path,
137
138
  )
138
139
 
139
- log(
140
- DEBUG,
141
- "Flower will load ServerApp `%s`",
142
- getattr(args, "server-app"),
140
+ server_app_attr: Optional[str] = getattr(args, "server-app")
141
+ if not (server_app_attr is None) ^ (args.run_id is None):
142
+ raise sys.exit(
143
+ "Please provide either a ServerApp reference or a Run ID, but not both. "
144
+ "For more details, use: ``flower-server-app -h``"
145
+ )
146
+
147
+ stub = GrpcDriverStub(
148
+ driver_service_address=args.superlink, root_certificates=root_certificates
143
149
  )
150
+ if args.run_id is not None:
151
+ # User provided `--run-id`, but not `server-app`
152
+ run_id = args.run_id
153
+ else:
154
+ # User provided `server-app`, but not `--run-id`
155
+ # Create run if run_id is not provided
156
+ stub.connect()
157
+ req = CreateRunRequest(fab_id=args.fab_id, fab_version=args.fab_version)
158
+ res = stub.create_run(req)
159
+ run_id = res.run_id
160
+
161
+ # Initialize GrpcDriver
162
+ driver = GrpcDriver(run_id=run_id, stub=stub)
163
+
164
+ # Dynamically obtain ServerApp path based on run_id
165
+ if args.run_id is not None:
166
+ # User provided `--run-id`, but not `server-app`
167
+ flwr_dir = get_flwr_dir(args.flwr_dir)
168
+ run_ = driver.run
169
+ server_app_dir = str(get_project_dir(run_.fab_id, run_.fab_version, flwr_dir))
170
+ config = get_project_config(server_app_dir)
171
+ server_app_attr = config["flower"]["components"]["serverapp"]
172
+ else:
173
+ # User provided `server-app`, but not `--run-id`
174
+ server_app_dir = str(Path(args.dir).absolute())
175
+
176
+ log(DEBUG, "Flower will load ServerApp `%s` in %s", server_app_attr, server_app_dir)
144
177
 
145
178
  log(
146
179
  DEBUG,
@@ -148,20 +181,6 @@ def run_server_app() -> None:
148
181
  root_certificates,
149
182
  )
150
183
 
151
- server_app_dir = args.dir
152
- server_app_attr = getattr(args, "server-app")
153
-
154
- # Create run
155
- stub = GrpcDriverStub(
156
- driver_service_address=args.superlink, root_certificates=root_certificates
157
- )
158
- stub.connect()
159
- req = CreateRunRequest(fab_id=args.fab_id, fab_version=args.fab_version)
160
- res = stub.create_run(req)
161
-
162
- # Initialize GrpcDriver
163
- driver = GrpcDriver(run_id=res.run_id, stub=stub)
164
-
165
184
  # Run the ServerApp with the Driver
166
185
  run(driver=driver, server_app_dir=server_app_dir, server_app_attr=server_app_attr)
167
186
 
@@ -179,6 +198,8 @@ def _parse_args_run_server_app() -> argparse.ArgumentParser:
179
198
 
180
199
  parser.add_argument(
181
200
  "server-app",
201
+ nargs="?",
202
+ default=None,
182
203
  help="For example: `server:app` or `project.package.module:wrapper.app`",
183
204
  )
184
205
  parser.add_argument(
@@ -228,5 +249,22 @@ def _parse_args_run_server_app() -> argparse.ArgumentParser:
228
249
  type=str,
229
250
  help="The version of the FAB used in the run.",
230
251
  )
252
+ parser.add_argument(
253
+ "--run-id",
254
+ default=None,
255
+ type=int,
256
+ help="The identifier of the run.",
257
+ )
258
+ parser.add_argument(
259
+ "--flwr-dir",
260
+ default=None,
261
+ help="""The path containing installed Flower Apps.
262
+ By default, this value is equal to:
263
+
264
+ - `$FLWR_HOME/` if `$FLWR_HOME` is defined
265
+ - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined
266
+ - `$HOME/.flwr/` in all other cases
267
+ """,
268
+ )
231
269
 
232
270
  return parser
@@ -15,7 +15,7 @@
15
15
  """Ray backend for the Fleet API using the Simulation Engine."""
16
16
 
17
17
  import pathlib
18
- from logging import DEBUG, ERROR, WARNING
18
+ from logging import DEBUG, ERROR
19
19
  from typing import Callable, Dict, List, Tuple, Union
20
20
 
21
21
  import ray
@@ -24,16 +24,15 @@ from flwr.client.client_app import ClientApp
24
24
  from flwr.common.context import Context
25
25
  from flwr.common.logger import log
26
26
  from flwr.common.message import Message
27
- from flwr.simulation.ray_transport.ray_actor import (
28
- BasicActorPool,
29
- ClientAppActor,
30
- init_ray,
31
- )
27
+ from flwr.common.typing import ConfigsRecordValues
28
+ from flwr.simulation.ray_transport.ray_actor import BasicActorPool, ClientAppActor
32
29
  from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth
33
30
 
34
31
  from .backend import Backend, BackendConfig
35
32
 
36
33
  ClientResourcesDict = Dict[str, Union[int, float]]
34
+ ActorArgsDict = Dict[str, Union[int, float, Callable[[], None]]]
35
+ RunTimeEnvDict = Dict[str, Union[str, List[str]]]
37
36
 
38
37
 
39
38
  class RayBackend(Backend):
@@ -51,40 +50,29 @@ class RayBackend(Backend):
51
50
  if not pathlib.Path(work_dir).exists():
52
51
  raise ValueError(f"Specified work_dir {work_dir} does not exist.")
53
52
 
54
- # Init ray and append working dir if needed
55
- runtime_env = (
56
- self._configure_runtime_env(work_dir=work_dir) if work_dir else None
57
- )
58
-
59
- if backend_config.get("mute_logging", False):
60
- init_ray(
61
- logging_level=WARNING, log_to_driver=False, runtime_env=runtime_env
62
- )
63
- elif backend_config.get("silent", False):
64
- init_ray(logging_level=WARNING, log_to_driver=True, runtime_env=runtime_env)
65
- else:
66
- init_ray(runtime_env=runtime_env)
53
+ # Initialise ray
54
+ self.init_args_key = "init_args"
55
+ self.init_ray(backend_config, work_dir)
67
56
 
68
57
  # Validate client resources
69
58
  self.client_resources_key = "client_resources"
59
+ client_resources = self._validate_client_resources(config=backend_config)
70
60
 
71
61
  # Create actor pool
72
- use_tf = backend_config.get("tensorflow", False)
73
- actor_kwargs = {"on_actor_init_fn": enable_tf_gpu_growth} if use_tf else {}
62
+ actor_kwargs = self._validate_actor_arguments(config=backend_config)
74
63
 
75
- client_resources = self._validate_client_resources(config=backend_config)
76
64
  self.pool = BasicActorPool(
77
65
  actor_type=ClientAppActor,
78
66
  client_resources=client_resources,
79
67
  actor_kwargs=actor_kwargs,
80
68
  )
81
69
 
82
- def _configure_runtime_env(self, work_dir: str) -> Dict[str, Union[str, List[str]]]:
70
+ def _configure_runtime_env(self, work_dir: str) -> RunTimeEnvDict:
83
71
  """Return list of files/subdirectories to exclude relative to work_dir.
84
72
 
85
73
  Without this, Ray will push everything to the Ray Cluster.
86
74
  """
87
- runtime_env: Dict[str, Union[str, List[str]]] = {"working_dir": work_dir}
75
+ runtime_env: RunTimeEnvDict = {"working_dir": work_dir}
88
76
 
89
77
  excludes = []
90
78
  path = pathlib.Path(work_dir)
@@ -125,6 +113,37 @@ class RayBackend(Backend):
125
113
 
126
114
  return client_resources
127
115
 
116
+ def _validate_actor_arguments(self, config: BackendConfig) -> ActorArgsDict:
117
+ actor_args_config = config.get("actor", False)
118
+ actor_args: ActorArgsDict = {}
119
+ if actor_args_config:
120
+ use_tf = actor_args.get("tensorflow", False)
121
+ if use_tf:
122
+ actor_args["on_actor_init_fn"] = enable_tf_gpu_growth
123
+ return actor_args
124
+
125
+ def init_ray(self, backend_config: BackendConfig, work_dir: str) -> None:
126
+ """Intialises Ray if not already initialised."""
127
+ if not ray.is_initialized():
128
+ # Init ray and append working dir if needed
129
+ runtime_env = (
130
+ self._configure_runtime_env(work_dir=work_dir) if work_dir else None
131
+ )
132
+
133
+ ray_init_args: Dict[
134
+ str,
135
+ Union[ConfigsRecordValues, RunTimeEnvDict],
136
+ ] = {}
137
+
138
+ if backend_config.get(self.init_args_key):
139
+ for k, v in backend_config[self.init_args_key].items():
140
+ ray_init_args[k] = v
141
+
142
+ if runtime_env is not None:
143
+ ray_init_args["runtime_env"] = runtime_env
144
+
145
+ ray.init(**ray_init_args)
146
+
128
147
  @property
129
148
  def num_workers(self) -> int:
130
149
  """Return number of actors in pool."""
@@ -152,7 +171,7 @@ class RayBackend(Backend):
152
171
  partition_id = message.metadata.partition_id
153
172
 
154
173
  try:
155
- # Submite a task to the pool
174
+ # Submit a task to the pool
156
175
  future = await self.pool.submit(
157
176
  lambda a, a_fn, mssg, cid, state: a.run.remote(a_fn, mssg, cid, state),
158
177
  (app, message, str(partition_id), context),
@@ -399,12 +399,6 @@ class VirtualClientEngineActorPool(ActorPool):
399
399
  return self._fetch_future_result(cid)
400
400
 
401
401
 
402
- def init_ray(*args: Any, **kwargs: Any) -> None:
403
- """Intialises Ray if not already initialised."""
404
- if not ray.is_initialized():
405
- ray.init(*args, **kwargs)
406
-
407
-
408
402
  class BasicActorPool:
409
403
  """A basic actor pool."""
410
404
 
@@ -22,16 +22,17 @@ import threading
22
22
  import traceback
23
23
  from logging import DEBUG, ERROR, INFO, WARNING
24
24
  from time import sleep
25
- from typing import Dict, Optional
25
+ from typing import Optional
26
26
 
27
27
  from flwr.client import ClientApp
28
28
  from flwr.common import EventType, event, log
29
29
  from flwr.common.logger import set_logger_propagation, update_console_handler
30
- from flwr.common.typing import ConfigsRecordValues, Run
30
+ from flwr.common.typing import Run
31
31
  from flwr.server.driver import Driver, InMemoryDriver
32
32
  from flwr.server.run_serverapp import run
33
33
  from flwr.server.server_app import ServerApp
34
34
  from flwr.server.superlink.fleet import vce
35
+ from flwr.server.superlink.fleet.vce.backend.backend import BackendConfig
35
36
  from flwr.server.superlink.state import StateFactory
36
37
  from flwr.simulation.ray_transport.utils import (
37
38
  enable_tf_gpu_growth as enable_gpu_growth,
@@ -66,7 +67,7 @@ def run_simulation(
66
67
  client_app: ClientApp,
67
68
  num_supernodes: int,
68
69
  backend_name: str = "ray",
69
- backend_config: Optional[Dict[str, ConfigsRecordValues]] = None,
70
+ backend_config: Optional[BackendConfig] = None,
70
71
  enable_tf_gpu_growth: bool = False,
71
72
  verbose_logging: bool = False,
72
73
  ) -> None:
@@ -90,9 +91,12 @@ def run_simulation(
90
91
  backend_name : str (default: ray)
91
92
  A simulation backend that runs `ClientApp`s.
92
93
 
93
- backend_config : Optional[Dict[str, ConfigsRecordValues]]
94
- 'A dictionary, e.g {"<keyA>": <value>, "<keyB>": <value>} to configure a
95
- backend. Values supported in <value> are those included by
94
+ backend_config : Optional[BackendConfig]
95
+ 'A dictionary to configure a backend. Separate dictionaries to configure
96
+ different elements of backend. Supported top-level keys are `init_args`
97
+ for values parsed to initialisation of backend, `client_resources`
98
+ to define the resources for clients, and `actor` to define the actor
99
+ parameters. Values supported in <value> are those included by
96
100
  `flwr.common.typing.ConfigsRecordValues`.
97
101
 
98
102
  enable_tf_gpu_growth : bool (default: False)
@@ -104,7 +108,7 @@ def run_simulation(
104
108
  works in the TensorFlow documentation: https://www.tensorflow.org/api/stable.
105
109
 
106
110
  verbose_logging : bool (default: False)
107
- When diabled, only INFO, WARNING and ERROR log messages will be shown. If
111
+ When disabled, only INFO, WARNING and ERROR log messages will be shown. If
108
112
  enabled, DEBUG-level logs will be displayed.
109
113
  """
110
114
  _run_simulation(
@@ -133,7 +137,7 @@ def run_serverapp_th(
133
137
  def server_th_with_start_checks( # type: ignore
134
138
  tf_gpu_growth: bool, stop_event: asyncio.Event, **kwargs
135
139
  ) -> None:
136
- """Run SeverApp, after check if GPU memory grouwth has to be set.
140
+ """Run SeverApp, after check if GPU memory growth has to be set.
137
141
 
138
142
  Upon exception, trigger stop event for Simulation Engine.
139
143
  """
@@ -194,7 +198,7 @@ def _main_loop(
194
198
  ) -> None:
195
199
  """Launch SuperLink with Simulation Engine, then ServerApp on a separate thread.
196
200
 
197
- Everything runs on the main thread or a separate one, depening on whether the main
201
+ Everything runs on the main thread or a separate one, depending on whether the main
198
202
  thread already contains a running Asyncio event loop. This is the case if running
199
203
  the Simulation Engine on a Jupyter/Colab notebook.
200
204
  """
@@ -259,7 +263,7 @@ def _run_simulation(
259
263
  client_app: Optional[ClientApp] = None,
260
264
  server_app: Optional[ServerApp] = None,
261
265
  backend_name: str = "ray",
262
- backend_config: Optional[Dict[str, ConfigsRecordValues]] = None,
266
+ backend_config: Optional[BackendConfig] = None,
263
267
  client_app_attr: Optional[str] = None,
264
268
  server_app_attr: Optional[str] = None,
265
269
  app_dir: str = "",
@@ -286,9 +290,12 @@ def _run_simulation(
286
290
  backend_name : str (default: ray)
287
291
  A simulation backend that runs `ClientApp`s.
288
292
 
289
- backend_config : Optional[Dict[str, ConfigsRecordValues]]
290
- 'A dictionary, e.g {"<keyA>":<value>, "<keyB>":<value>} to configure a
291
- backend. Values supported in <value> are those included by
293
+ backend_config : Optional[BackendConfig]
294
+ 'A dictionary to configure a backend. Separate dictionaries to configure
295
+ different elements of backend. Supported top-level keys are `init_args`
296
+ for values parsed to initialisation of backend, `client_resources`
297
+ to define the resources for clients, and `actor` to define the actor
298
+ parameters. Values supported in <value> are those included by
292
299
  `flwr.common.typing.ConfigsRecordValues`.
293
300
 
294
301
  client_app_attr : str
@@ -310,30 +317,34 @@ def _run_simulation(
310
317
  A boolean to indicate whether to enable GPU growth on the main thread. This is
311
318
  desirable if you make use of a TensorFlow model on your `ServerApp` while
312
319
  having your `ClientApp` running on the same GPU. Without enabling this, you
313
- might encounter an out-of-memory error becasue TensorFlow by default allocates
320
+ might encounter an out-of-memory error because TensorFlow by default allocates
314
321
  all GPU memory. Read mor about how `tf.config.experimental.set_memory_growth()`
315
322
  works in the TensorFlow documentation: https://www.tensorflow.org/api/stable.
316
323
 
317
324
  verbose_logging : bool (default: False)
318
- When diabled, only INFO, WARNING and ERROR log messages will be shown. If
325
+ When disabled, only INFO, WARNING and ERROR log messages will be shown. If
319
326
  enabled, DEBUG-level logs will be displayed.
320
327
  """
321
328
  if backend_config is None:
322
329
  backend_config = {}
323
330
 
331
+ if "init_args" not in backend_config:
332
+ backend_config["init_args"] = {}
333
+
324
334
  # Set logging level
325
335
  logger = logging.getLogger("flwr")
326
336
  if verbose_logging:
327
337
  update_console_handler(level=DEBUG, timestamps=True, colored=True)
328
338
  else:
329
- backend_config["silent"] = True
339
+ backend_config["init_args"]["logging_level"] = WARNING
340
+ backend_config["init_args"]["log_to_driver"] = True
330
341
 
331
342
  if enable_tf_gpu_growth:
332
343
  # Check that Backend config has also enabled using GPU growth
333
- use_tf = backend_config.get("tensorflow", False)
344
+ use_tf = backend_config.get("actor", {}).get("tensorflow", False)
334
345
  if not use_tf:
335
346
  log(WARNING, "Enabling GPU growth for your backend.")
336
- backend_config["tensorflow"] = True
347
+ backend_config["actor"]["tensorflow"] = True
337
348
 
338
349
  # Convert config to original JSON-stream format
339
350
  backend_config_stream = json.dumps(backend_config)
@@ -352,7 +363,7 @@ def _run_simulation(
352
363
  server_app_attr,
353
364
  )
354
365
  # Detect if there is an Asyncio event loop already running.
355
- # If yes, run everything on a separate thread. In environmnets
366
+ # If yes, run everything on a separate thread. In environments
356
367
  # like Jupyter/Colab notebooks, there is an event loop present.
357
368
  run_in_thread = False
358
369
  try:
@@ -364,7 +375,7 @@ def _run_simulation(
364
375
  run_in_thread = True
365
376
 
366
377
  except RuntimeError:
367
- log(DEBUG, "No asyncio event loop runnig")
378
+ log(DEBUG, "No asyncio event loop running")
368
379
 
369
380
  finally:
370
381
  if run_in_thread:
@@ -409,7 +420,8 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser:
409
420
  parser.add_argument(
410
421
  "--backend-config",
411
422
  type=str,
412
- default='{"client_resources": {"num_cpus":2, "num_gpus":0.0}, "tensorflow": 0}',
423
+ default='{"client_resources": {"num_cpus":2, "num_gpus":0.0},'
424
+ '"actor": {"tensorflow": 0}}',
413
425
  help='A JSON formatted stream, e.g \'{"<keyA>":<value>, "<keyB>":<value>}\' to '
414
426
  "configure a backend. Values supported in <value> are those included by "
415
427
  "`flwr.common.typing.ConfigsRecordValues`. ",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: flwr-nightly
3
- Version: 1.10.0.dev20240620
3
+ Version: 1.10.0.dev20240621
4
4
  Summary: Flower: A Friendly Federated Learning Framework
5
5
  Home-page: https://flower.ai
6
6
  License: Apache-2.0
@@ -204,7 +204,7 @@ Other [examples](https://github.com/adap/flower/tree/main/examples):
204
204
  - [Flower with KaplanMeierFitter from the lifelines library](https://github.com/adap/flower/tree/main/examples/federated-kaplan-meier-fitter)
205
205
  - [Sample Level Privacy with Opacus](https://github.com/adap/flower/tree/main/examples/opacus)
206
206
  - [Sample Level Privacy with TensorFlow-Privacy](https://github.com/adap/flower/tree/main/examples/tensorflow-privacy)
207
- - [Flower with a Tabular Dataset] (https://github.com/adap/flower/tree/main/examples/fl-tabular)
207
+ - [Flower with a Tabular Dataset](https://github.com/adap/flower/tree/main/examples/fl-tabular)
208
208
 
209
209
  ## Community
210
210
 
@@ -6,9 +6,10 @@ flwr/cli/config_utils.py,sha256=ugUlqH52yxTPMtKw6q4xv5k2OVWUy89cwyJ5LB2RLgk,6037
6
6
  flwr/cli/example.py,sha256=1bGDYll3BXQY2kRqSN-oICqS5n1b9m0g0RvXTopXHl4,2215
7
7
  flwr/cli/install.py,sha256=Wz7Hqg2PE9N-w5CnqlH9Zr8mzADN2J7NLcUhgldZLWU,6579
8
8
  flwr/cli/new/__init__.py,sha256=cQzK1WH4JP2awef1t2UQ2xjl1agVEz9rwutV18SWV1k,789
9
- flwr/cli/new/new.py,sha256=Ll49RGE-DZN0dBgsw4hsLeru94iaHl3e89D7I8Vfj9E,6223
9
+ flwr/cli/new/new.py,sha256=ySz3yu-3IF_wencjajr4mA2VNoM6hKZZ5scKYZKzAII,9340
10
10
  flwr/cli/new/templates/__init__.py,sha256=4luU8RL-CK8JJCstQ_ON809W9bNTkY1l9zSaPKBkgwY,725
11
11
  flwr/cli/new/templates/app/.gitignore.tpl,sha256=XixnHdyeMB2vwkGtGnwHqoWpH-9WChdyG0GXe57duhc,3078
12
+ flwr/cli/new/templates/app/README.flowertune.md.tpl,sha256=PqzkGm0g6Zy-vZK9_0EO3f_U6g1r69lGc4UL8kds5Q8,2696
12
13
  flwr/cli/new/templates/app/README.md.tpl,sha256=_qGtgpKYKoCJVjQnvlBMKvFs_1gzTcL908I3KJg0oAM,668
13
14
  flwr/cli/new/templates/app/__init__.py,sha256=DU7QMY7IhMQyuwm_tja66xU0KXTWQFqzfTqwg-_NJdE,729
14
15
  flwr/cli/new/templates/app/code/__init__.py,sha256=EM6vfvgAILKPaPn7H1wMV1Wi01WyZCP_Eg6NxD6oWg8,736
@@ -20,6 +21,14 @@ flwr/cli/new/templates/app/code/client.numpy.py.tpl,sha256=mTh7Y_jOJrPUvDYHVJy4w
20
21
  flwr/cli/new/templates/app/code/client.pytorch.py.tpl,sha256=MgCtMSv1Th16Faod11HubVaARkLYt7vS9RYH962-2pk,1172
21
22
  flwr/cli/new/templates/app/code/client.sklearn.py.tpl,sha256=S71SZiHaRXtKqUk3m5Elc_c6HhKAIKLalrKOQ3p20No,2801
22
23
  flwr/cli/new/templates/app/code/client.tensorflow.py.tpl,sha256=dxrTO9JwYrDBjLsmCiRLetN9KxbnWRTeGA0BQbnOu_A,1280
24
+ flwr/cli/new/templates/app/code/flwr_tune/__init__.py,sha256=JgNgBtKdm1jKM9625WxappCAVUGtYAmcjKSsXJ1u3ZQ,748
25
+ flwr/cli/new/templates/app/code/flwr_tune/app.py.tpl,sha256=KXqCaD2NSEaLHT08fgQTzWlrtOdfxRYs0fsLw9yecCY,2647
26
+ flwr/cli/new/templates/app/code/flwr_tune/client.py.tpl,sha256=MvQ5tt1r7CBUR8y-nBcZs4as2m1YimxegLYw_nHmXzc,4048
27
+ flwr/cli/new/templates/app/code/flwr_tune/config.yaml.tpl,sha256=aPjrwFU020tPkJmOjzSUjb9dSi2bhs4ZTMEyd0uRlCA,867
28
+ flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl,sha256=kPG4AIXQfNNHZGYC3amet3ttI23N72N6jjoDkp_wYIA,2028
29
+ flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl,sha256=cEq9ZWM3zImJVceNtxHC_bYBLE8OChK0BdjpWs5Wz-0,1881
30
+ flwr/cli/new/templates/app/code/flwr_tune/server.py.tpl,sha256=Z_JC7-YdjCnnUJPKILwT5Iqc70byJpthbye8RsQp9L0,1548
31
+ flwr/cli/new/templates/app/code/flwr_tune/static_config.yaml.tpl,sha256=cBPpBVN_N7p4T2a3rqChlngmE0dB_jveOLHesNcEHvs,268
23
32
  flwr/cli/new/templates/app/code/server.hf.py.tpl,sha256=Mld452y3SUkejlFzac5hpCjT7_mbA0ZEEMJIUyHtSTI,338
24
33
  flwr/cli/new/templates/app/code/server.jax.py.tpl,sha256=YTi-wroUpjRDY_AZqnoN5X-n3U5V7laL6UJgqFLEbKE,246
25
34
  flwr/cli/new/templates/app/code/server.mlx.py.tpl,sha256=Cqk3PvM0e7hzohXPqD5hG_cthXoxCfc30bpEThqMy7M,272
@@ -32,6 +41,7 @@ flwr/cli/new/templates/app/code/task.jax.py.tpl,sha256=u4o3V019EH79szOw2xzVeC5r9
32
41
  flwr/cli/new/templates/app/code/task.mlx.py.tpl,sha256=y7aVj3F_98-wBnDcbPsCNnFs9BOHTn0y6XIYkByzv7Y,2598
33
42
  flwr/cli/new/templates/app/code/task.pytorch.py.tpl,sha256=NvajdZN-eTyfdqKK0v2MrvWITXw9BjJ3Ri5c1haPJDs,3684
34
43
  flwr/cli/new/templates/app/code/task.tensorflow.py.tpl,sha256=cPOUUS07QbblT9PGFucwu9lY1clRA4-W4DQGA7cpcao,1044
44
+ flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl,sha256=5kAtRcTvn2-s2i2_WGoDNJQ__ALADQjabX7lQlCNOGA,899
35
45
  flwr/cli/new/templates/app/pyproject.hf.toml.tpl,sha256=O3-dgH8_knk9uM49IzX06CYC2Ev5xdPuITB40Phvewc,759
36
46
  flwr/cli/new/templates/app/pyproject.jax.toml.tpl,sha256=QIhp6_eYYFk9aJd_n-tc_Ar76Se1OP6zSibTbGeHV7w,568
37
47
  flwr/cli/new/templates/app/pyproject.mlx.toml.tpl,sha256=OJ15G7CmjevBsUCVJ3ixv01VFwL4nkPcKkVGKeVW8ew,668
@@ -173,7 +183,7 @@ flwr/server/driver/driver.py,sha256=NT_yaeit7_kZEIsCEqOWPID1GrVD3ywH4xZ2wtIh5lM,
173
183
  flwr/server/driver/grpc_driver.py,sha256=Lbsxj5EHp_I1NVkUoKBcUc6ZifBeQ8OOYUsNfxsjA18,12269
174
184
  flwr/server/driver/inmemory_driver.py,sha256=RcK94_NtjGZ4aZDIscnU7A3Uv1u8jGx29-xcbjQvZTM,6444
175
185
  flwr/server/history.py,sha256=bBOHKyX1eQONIsUx4EUU-UnAk1i0EbEl8ioyMq_UWQ8,5063
176
- flwr/server/run_serverapp.py,sha256=UW8kJlAVBM2cV0po3LWM1B_vG73i4tz_r1Qy_woUBz4,7204
186
+ flwr/server/run_serverapp.py,sha256=-tgV2GJFmKeA1r0RT9Vubh6RXOAvlfCn9Jdz81i5OI4,8850
177
187
  flwr/server/server.py,sha256=wsXsxMZ9SQ0B42nBnUlcV83NJPycgrgg5bFwcQ4BYBE,17821
178
188
  flwr/server/server_app.py,sha256=Re5Y9ftXlBRJXYHY_8TrNWsjyOUCPC5F_93H0xiZDhI,4400
179
189
  flwr/server/server_config.py,sha256=CZaHVAsMvGLjpWVcLPkiYxgJN4xfIyAiUrCI3fETKY4,1349
@@ -223,7 +233,7 @@ flwr/server/superlink/fleet/rest_rere/rest_api.py,sha256=yoSU-6nCJF9ASHGNpSY69nZ
223
233
  flwr/server/superlink/fleet/vce/__init__.py,sha256=36MHKiefnJeyjwMQzVUK4m06Ojon3WDcwZGQsAcyVhQ,783
224
234
  flwr/server/superlink/fleet/vce/backend/__init__.py,sha256=oBIzmnrSSRvH_H0vRGEGWhWzQQwqe3zn6e13RsNwlIY,1466
225
235
  flwr/server/superlink/fleet/vce/backend/backend.py,sha256=LJsKl7oixVvptcG98Rd9ejJycNWcEVB0ODvSreLGp-A,2260
226
- flwr/server/superlink/fleet/vce/backend/raybackend.py,sha256=KCzV-n-czXxIKPwNfuD-JEVCl4-xAJaHe4taGmw9cTQ,6722
236
+ flwr/server/superlink/fleet/vce/backend/raybackend.py,sha256=oUTvvKybcN2QFj3641Hr1QxNL86_ALsB7KAIUlTy7KE,7504
227
237
  flwr/server/superlink/fleet/vce/vce_api.py,sha256=o8DqsL4vrq0AiN4qfQkvRH7gFcztkdy1Wr1IkwWaCmY,12534
228
238
  flwr/server/superlink/state/__init__.py,sha256=Gj2OTFLXvA-mAjBvwuKDM3rDrVaQPcIoybSa2uskMTE,1003
229
239
  flwr/server/superlink/state/in_memory_state.py,sha256=_cgJ0Bh_km_c8xqZRI90VibypRBL7uTIOXfa9VuQEQs,12827
@@ -244,17 +254,17 @@ flwr/server/workflow/secure_aggregation/secaggplus_workflow.py,sha256=BRqhlnVe8C
244
254
  flwr/simulation/__init__.py,sha256=9x8OCkK3jpFAPJB1aeEMOddz6V58bExQPtwE8Z3q-RY,1359
245
255
  flwr/simulation/app.py,sha256=7vEV4ytkpJ-zq_5KBSVteTuqR2ZoQdYlfOaYpyzXQcQ,14380
246
256
  flwr/simulation/ray_transport/__init__.py,sha256=wzcEEwUUlulnXsg6raCA1nGpP3LlAQDtJ8zNkCXcVbA,734
247
- flwr/simulation/ray_transport/ray_actor.py,sha256=_wv2eP7qxkCZ-6rMyYWnjLrGPBZRxjvTPjaVk8zIaQ4,19367
257
+ flwr/simulation/ray_transport/ray_actor.py,sha256=bu6gEnbHYtlUxLtzjzpEUtvkQDRzl1PVMjJuCDZvfgQ,19196
248
258
  flwr/simulation/ray_transport/ray_client_proxy.py,sha256=hP0B91tkSl4DUyGTRQPMLOfcpR-gyKJ_5SrHz3r9UGc,6738
249
259
  flwr/simulation/ray_transport/utils.py,sha256=TYdtfg1P9VfTdLMOJlifInGpxWHYs9UfUqIv2wfkRLA,2392
250
- flwr/simulation/run_simulation.py,sha256=f1yRtu9XNBaj72KqWiefhjynCwIkwPceC1VRcMrKD1w,16190
260
+ flwr/simulation/run_simulation.py,sha256=m2FK0LzLSOR2HC94XDowLLwvHuIhaV6q78HsT_MKLMU,16858
251
261
  flwr/superexec/__init__.py,sha256=9h94ogLxi6eJ3bUuJYq3E3pApThSabTPiSmPAGlTkHE,800
252
262
  flwr/superexec/app.py,sha256=kCCsCo51_peIMmKc4-tmiN5qO29cPqZOqqs-QVH0oA4,6097
253
263
  flwr/superexec/exec_grpc.py,sha256=u-rztpOleqSGqgvNE-ZLw1HchNsBHU1-eB3m52GZ0pQ,1852
254
264
  flwr/superexec/exec_servicer.py,sha256=qf8CT4RLXnY8omOy75kwfsWmMnfTD42B4ENTh5S-BCY,2120
255
265
  flwr/superexec/executor.py,sha256=GouXCY2LiZ-ffsOoZ_z-fh4JwbzMmhTl-gwpWFgGWTY,1688
256
- flwr_nightly-1.10.0.dev20240620.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
257
- flwr_nightly-1.10.0.dev20240620.dist-info/METADATA,sha256=eMWkisQs8PIjJWzIPcSk8-joghJ2klWMUGk995FVU-Y,15615
258
- flwr_nightly-1.10.0.dev20240620.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
259
- flwr_nightly-1.10.0.dev20240620.dist-info/entry_points.txt,sha256=7qBQcA-bDGDxnJmLd9FYqglFQubjCNqyg9M8a-lukps,336
260
- flwr_nightly-1.10.0.dev20240620.dist-info/RECORD,,
266
+ flwr_nightly-1.10.0.dev20240621.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
267
+ flwr_nightly-1.10.0.dev20240621.dist-info/METADATA,sha256=6Lqi-90hKJ8UHhvWJl2o9Ugmi1vK-KW3QgpLpdLgQF4,15614
268
+ flwr_nightly-1.10.0.dev20240621.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
269
+ flwr_nightly-1.10.0.dev20240621.dist-info/entry_points.txt,sha256=7qBQcA-bDGDxnJmLd9FYqglFQubjCNqyg9M8a-lukps,336
270
+ flwr_nightly-1.10.0.dev20240621.dist-info/RECORD,,