flwr-nightly 1.5.0.dev20230614__tar.gz → 1.5.0.dev20230615__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (114) hide show
  1. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/PKG-INFO +1 -1
  2. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/pyproject.toml +1 -1
  3. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/setup.py +1 -1
  4. flwr_nightly-1.5.0.dev20230615/src/py/flwr/client/dpfedavg_numpy_client.py +180 -0
  5. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/driver/driver_client_manager.py +9 -0
  6. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/state/in_memory_state.py +8 -0
  7. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/dpfedavg_adaptive.py +1 -0
  8. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/dpfedavg_fixed.py +46 -1
  9. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/utils/tensorboard.py +1 -2
  10. flwr_nightly-1.5.0.dev20230614/src/py/flwr/client/dpfedavg_numpy_client.py +0 -83
  11. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/LICENSE +0 -0
  12. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/README.md +0 -0
  13. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/__init__.py +0 -0
  14. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/__init__.py +0 -0
  15. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/app.py +0 -0
  16. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/client.py +0 -0
  17. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/grpc_client/__init__.py +0 -0
  18. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/grpc_client/connection.py +0 -0
  19. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/grpc_rere_client/__init__.py +0 -0
  20. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/grpc_rere_client/connection.py +0 -0
  21. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/message_handler/__init__.py +0 -0
  22. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/message_handler/message_handler.py +0 -0
  23. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/message_handler/task_handler.py +0 -0
  24. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/numpy_client.py +0 -0
  25. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/rest_client/__init__.py +0 -0
  26. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/client/rest_client/connection.py +0 -0
  27. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/__init__.py +0 -0
  28. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/address.py +0 -0
  29. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/constant.py +0 -0
  30. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/date.py +0 -0
  31. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/dp.py +0 -0
  32. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/grpc.py +0 -0
  33. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/logger.py +0 -0
  34. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/parameter.py +0 -0
  35. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/serde.py +0 -0
  36. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/telemetry.py +0 -0
  37. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/typing.py +0 -0
  38. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/common/version.py +0 -0
  39. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/driver/__init__.py +0 -0
  40. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/driver/app.py +0 -0
  41. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/driver/driver.py +0 -0
  42. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/driver/driver_client_proxy.py +0 -0
  43. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/__init__.py +0 -0
  44. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/driver_pb2.py +0 -0
  45. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/driver_pb2.pyi +0 -0
  46. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/driver_pb2_grpc.py +0 -0
  47. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/driver_pb2_grpc.pyi +0 -0
  48. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/fleet_pb2.py +0 -0
  49. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/fleet_pb2.pyi +0 -0
  50. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/fleet_pb2_grpc.py +0 -0
  51. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/fleet_pb2_grpc.pyi +0 -0
  52. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/node_pb2.py +0 -0
  53. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/node_pb2.pyi +0 -0
  54. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/node_pb2_grpc.py +0 -0
  55. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/node_pb2_grpc.pyi +0 -0
  56. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/task_pb2.py +0 -0
  57. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/task_pb2.pyi +0 -0
  58. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/task_pb2_grpc.py +0 -0
  59. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/task_pb2_grpc.pyi +0 -0
  60. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/transport_pb2.py +0 -0
  61. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/transport_pb2.pyi +0 -0
  62. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/transport_pb2_grpc.py +0 -0
  63. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/proto/transport_pb2_grpc.pyi +0 -0
  64. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/py.typed +0 -0
  65. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/__init__.py +0 -0
  66. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/app.py +0 -0
  67. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/client_manager.py +0 -0
  68. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/client_proxy.py +0 -0
  69. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/criterion.py +0 -0
  70. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/driver/__init__.py +0 -0
  71. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/driver/driver_servicer.py +0 -0
  72. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/__init__.py +0 -0
  73. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_bidi/__init__.py +0 -0
  74. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_bidi/driver_client_manager.py +0 -0
  75. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_bidi/flower_service_servicer.py +0 -0
  76. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_bidi/grpc_bridge.py +0 -0
  77. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_bidi/grpc_client_proxy.py +0 -0
  78. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_bidi/grpc_server.py +0 -0
  79. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_bidi/ins_scheduler.py +0 -0
  80. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_rere/__init__.py +0 -0
  81. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/grpc_rere/fleet_servicer.py +0 -0
  82. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/message_handler/__init__.py +0 -0
  83. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/message_handler/message_handler.py +0 -0
  84. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/rest_rere/__init__.py +0 -0
  85. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/fleet/rest_rere/rest_api.py +0 -0
  86. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/history.py +0 -0
  87. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/server.py +0 -0
  88. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/state/__init__.py +0 -0
  89. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/state/sqlite_state.py +0 -0
  90. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/state/state.py +0 -0
  91. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/state/state_factory.py +0 -0
  92. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/__init__.py +0 -0
  93. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/aggregate.py +0 -0
  94. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fault_tolerant_fedavg.py +0 -0
  95. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedadagrad.py +0 -0
  96. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedadam.py +0 -0
  97. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedavg.py +0 -0
  98. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedavg_android.py +0 -0
  99. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedavgm.py +0 -0
  100. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedmedian.py +0 -0
  101. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedopt.py +0 -0
  102. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedprox.py +0 -0
  103. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedtrimmedavg.py +0 -0
  104. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedxgb_nn_avg.py +0 -0
  105. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/fedyogi.py +0 -0
  106. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/krum.py +0 -0
  107. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/qfedavg.py +0 -0
  108. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/strategy/strategy.py +0 -0
  109. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/utils/__init__.py +0 -0
  110. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/server/utils/validator.py +0 -0
  111. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/simulation/__init__.py +0 -0
  112. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/simulation/app.py +0 -0
  113. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/simulation/ray_transport/__init__.py +0 -0
  114. {flwr_nightly-1.5.0.dev20230614 → flwr_nightly-1.5.0.dev20230615}/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: flwr-nightly
3
- Version: 1.5.0.dev20230614
3
+ Version: 1.5.0.dev20230615
4
4
  Summary: Flower: A Friendly Federated Learning Framework
5
5
  Home-page: https://flower.dev
6
6
  License: Apache-2.0
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "flwr-nightly"
7
- version = "1.5.0-dev20230614"
7
+ version = "1.5.0-dev20230615"
8
8
  description = "Flower: A Friendly Federated Learning Framework"
9
9
  license = "Apache-2.0"
10
10
  authors = ["The Flower Authors <hello@flower.dev>"]
@@ -52,7 +52,7 @@ entry_points = \
52
52
 
53
53
  setup_kwargs = {
54
54
  'name': 'flwr-nightly',
55
- 'version': '1.5.0.dev20230614',
55
+ 'version': '1.5.0.dev20230615',
56
56
  'description': 'Flower: A Friendly Federated Learning Framework',
57
57
  'long_description': '# Flower: A Friendly Federated Learning Framework\n\n<p align="center">\n <a href="https://flower.dev/">\n <img src="https://flower.dev/_next/image/?url=%2F_next%2Fstatic%2Fmedia%2Fflower_white_border.c2012e70.png&w=640&q=75" width="140px" alt="Flower Website" />\n </a>\n</p>\n<p align="center">\n <a href="https://flower.dev/">Website</a> |\n <a href="https://flower.dev/blog">Blog</a> |\n <a href="https://flower.dev/docs/">Docs</a> |\n <a href="https://flower.dev/conf/flower-summit-2022">Conference</a> |\n <a href="https://flower.dev/join-slack">Slack</a>\n <br /><br />\n</p>\n\n[![GitHub license](https://img.shields.io/github/license/adap/flower)](https://github.com/adap/flower/blob/main/LICENSE)\n[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/adap/flower/blob/main/CONTRIBUTING.md)\n![Build](https://github.com/adap/flower/actions/workflows/flower.yml/badge.svg)\n![Downloads](https://pepy.tech/badge/flwr)\n[![Slack](https://img.shields.io/badge/Chat-Slack-red)](https://flower.dev/join-slack)\n\nFlower (`flwr`) is a framework for building federated learning systems. The\ndesign of Flower is based on a few guiding principles:\n\n* **Customizable**: Federated learning systems vary wildly from one use case to\n another. Flower allows for a wide range of different configurations depending\n on the needs of each individual use case.\n\n* **Extendable**: Flower originated from a research project at the University of\n Oxford, so it was built with AI research in mind. Many components can be\n extended and overridden to build new state-of-the-art systems.\n\n* **Framework-agnostic**: Different machine learning frameworks have different\n strengths. Flower can be used with any machine learning framework, for\n example, [PyTorch](https://pytorch.org),\n [TensorFlow](https://tensorflow.org), [Hugging Face Transformers](https://huggingface.co/), [PyTorch Lightning](https://pytorchlightning.ai/), [MXNet](https://mxnet.apache.org/), [scikit-learn](https://scikit-learn.org/), [JAX](https://jax.readthedocs.io/), [TFLite](https://tensorflow.org/lite/), [fastai](https://www.fast.ai/), [Pandas](https://pandas.pydata.org/\n) for federated analytics, or even raw [NumPy](https://numpy.org/)\n for users who enjoy computing gradients by hand.\n\n* **Understandable**: Flower is written with maintainability in mind. The\n community is encouraged to both read and contribute to the codebase.\n\nMeet the Flower community on [flower.dev](https://flower.dev)!\n\n## Federated Learning Tutorial\n\nFlower\'s goal is to make federated learning accessible to everyone. This series of tutorials introduces the fundamentals of federated learning and how to implement them in Flower.\n\n0. **What is Federated Learning?**\n\n [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-0-What-is-FL.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial/Flower-0-What-is-FL.ipynb))\n\n1. **An Introduction to Federated Learning**\n\n [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-1-Intro-to-FL-PyTorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial/Flower-1-Intro-to-FL-PyTorch.ipynb))\n\n2. **Using Strategies in Federated Learning**\n\n [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-2-Strategies-in-FL-PyTorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial/Flower-2-Strategies-in-FL-PyTorch.ipynb))\n \n3. **Building Strategies for Federated Learning**\n\n [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a-Strategy-PyTorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a-Strategy-PyTorch.ipynb))\n \n4. **Custom Clients for Federated Learning**\n\n [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4-Client-and-NumPyClient-PyTorch.ipynb) (or open the [Jupyter Notebook](https://github.com/adap/flower/blob/main/doc/source/tutorial/Flower-4-Client-and-NumPyClient-PyTorch.ipynb))\n\nStay tuned, more tutorials are coming soon. Topics include **Privacy and Security in Federated Learning**, and **Scaling Federated Learning**.\n\n## Documentation\n\n[Flower Docs](https://flower.dev/docs):\n* [Installation](https://flower.dev/docs/installation.html)\n* [Quickstart (TensorFlow)](https://flower.dev/docs/quickstart-tensorflow.html)\n* [Quickstart (PyTorch)](https://flower.dev/docs/quickstart-pytorch.html)\n* [Quickstart (Hugging Face [code example])](https://flower.dev/docs/quickstart-huggingface.html)\n* [Quickstart (PyTorch Lightning [code example])](https://flower.dev/docs/quickstart-pytorch-lightning.html)\n* [Quickstart (MXNet)](https://flower.dev/docs/example-mxnet-walk-through.html)\n* [Quickstart (Pandas)](https://flower.dev/docs/quickstart-pandas.html)\n* [Quickstart (fastai)](https://flower.dev/docs/quickstart-fastai.html)\n* [Quickstart (JAX)](https://github.com/adap/flower/tree/main/examples/quickstart_jax)\n* [Quickstart (scikit-learn)](https://github.com/adap/flower/tree/main/examples/sklearn-logreg-mnist)\n* [Quickstart (TFLite on Android [code example])](https://github.com/adap/flower/tree/main/examples/android)\n* [Quickstart (iOS)](https://flower.dev/docs/quickstart-ios.html)\n\n## Flower Baselines\n\nFlower Baselines is a collection of community-contributed experiments that reproduce the experiments performed in popular federated learning publications. Researchers can build on Flower Baselines to quickly evaluate new ideas:\n\n* [FedAvg](https://arxiv.org/abs/1602.05629):\n * [MNIST](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/publications/fedavg_mnist)\n* [FedProx](https://arxiv.org/abs/1812.06127):\n * [MNIST](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/publications/fedprox_mnist)\n* [FedBN: Federated Learning on non-IID Features via Local Batch Normalization](https://arxiv.org/abs/2102.07623):\n * [Convergence Rate](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/publications/fedbn/convergence_rate)\n* [Adaptive Federated Optimization](https://arxiv.org/abs/2003.00295):\n * [CIFAR-10/100](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/publications/adaptive_federated_optimization)\n\nCheck the Flower documentation to learn more: [Using Baselines](https://flower.dev/docs/using-baselines.html)\n\nThe Flower community loves contributions! Make your work more visible and enable others to build on it by contributing it as a baseline: [Contributing Baselines](https://flower.dev/docs/contributing-baselines.html)\n\n## Flower Usage Examples\n\nSeveral code examples show different usage scenarios of Flower (in combination with popular machine learning frameworks such as PyTorch or TensorFlow).\n\nQuickstart examples:\n\n* [Quickstart (TensorFlow)](https://github.com/adap/flower/tree/main/examples/quickstart_tensorflow)\n* [Quickstart (PyTorch)](https://github.com/adap/flower/tree/main/examples/quickstart_pytorch)\n* [Quickstart (Hugging Face)](https://github.com/adap/flower/tree/main/examples/quickstart_huggingface)\n* [Quickstart (PyTorch Lightning)](https://github.com/adap/flower/tree/main/examples/quickstart_pytorch_lightning)\n* [Quickstart (fastai)](https://github.com/adap/flower/tree/main/examples/quickstart_fastai)\n* [Quickstart (Pandas)](https://github.com/adap/flower/tree/main/examples/quickstart_pandas)\n* [Quickstart (MXNet)](https://github.com/adap/flower/tree/main/examples/quickstart_mxnet)\n* [Quickstart (JAX)](https://github.com/adap/flower/tree/main/examples/quickstart_jax)\n* [Quickstart (scikit-learn)](https://github.com/adap/flower/tree/main/examples/sklearn-logreg-mnist)\n* [Quickstart (TFLite on Android)](https://github.com/adap/flower/tree/main/examples/android)\n\nOther [examples](https://github.com/adap/flower/tree/main/examples):\n\n* [Raspberry Pi & Nvidia Jetson Tutorial](https://github.com/adap/flower/tree/main/examples/embedded_devices)\n* [Android & TFLite](https://github.com/adap/flower/tree/main/examples/android)\n* [PyTorch: From Centralized to Federated](https://github.com/adap/flower/tree/main/examples/pytorch_from_centralized_to_federated)\n* [MXNet: From Centralized to Federated](https://github.com/adap/flower/tree/main/examples/mxnet_from_centralized_to_federated)\n* [Advanced Flower with TensorFlow/Keras](https://github.com/adap/flower/tree/main/examples/advanced_tensorflow)\n* [Advanced Flower with PyTorch](https://github.com/adap/flower/tree/main/examples/advanced_pytorch)\n* Single-Machine Simulation of Federated Learning Systems ([PyTorch](https://github.com/adap/flower/tree/main/examples/simulation_pytorch)) ([Tensorflow](https://github.com/adap/flower/tree/main/examples/simulation_tensorflow))\n\n## Community\n\nFlower is built by a wonderful community of researchers and engineers. [Join Slack](https://flower.dev/join-slack) to meet them, [contributions](#contributing-to-flower) are welcome.\n\n<a href="https://github.com/adap/flower/graphs/contributors">\n <img src="https://contrib.rocks/image?repo=adap/flower" />\n</a>\n\n## Citation\n\nIf you publish work that uses Flower, please cite Flower as follows: \n\n```bibtex\n@article{beutel2020flower,\n title={Flower: A Friendly Federated Learning Research Framework},\n author={Beutel, Daniel J and Topal, Taner and Mathur, Akhil and Qiu, Xinchi and Fernandez-Marques, Javier and Gao, Yan and Sani, Lorenzo and Kwing, Hei Li and Parcollet, Titouan and Gusmão, Pedro PB de and Lane, Nicholas D}, \n journal={arXiv preprint arXiv:2007.14390},\n year={2020}\n}\n```\n\nPlease also consider adding your publication to the list of Flower-based publications in the docs, just open a Pull Request.\n\n## Contributing to Flower\n\nWe welcome contributions. Please see [CONTRIBUTING.md](CONTRIBUTING.md) to get started!\n',
58
58
  'author': 'The Flower Authors',
@@ -0,0 +1,180 @@
1
+ # Copyright 2020 Adap GmbH. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Wrapper for configuring a Flower client for DP."""
16
+
17
+
18
+ import copy
19
+ from typing import Dict, Tuple
20
+
21
+ import numpy as np
22
+
23
+ from flwr.client.numpy_client import NumPyClient
24
+ from flwr.common.dp import add_gaussian_noise, clip_by_l2
25
+ from flwr.common.typing import Config, NDArrays, Scalar
26
+
27
+
28
+ class DPFedAvgNumPyClient(NumPyClient):
29
+ """Wrapper for configuring a Flower client for DP."""
30
+
31
+ def __init__(self, client: NumPyClient) -> None:
32
+ super().__init__()
33
+ self.client = client
34
+
35
+ def get_properties(self, config: Config) -> Dict[str, Scalar]:
36
+ """Get client properties using the given Numpy client.
37
+
38
+ Parameters
39
+ ----------
40
+ config : Config
41
+ Configuration parameters requested by the server.
42
+ This can be used to tell the client which properties
43
+ are needed along with some Scalar attributes.
44
+
45
+ Returns
46
+ -------
47
+ properties : Dict[str, Scalar]
48
+ A dictionary mapping arbitrary string keys to values of type
49
+ bool, bytes, float, int, or str. It can be used to communicate
50
+ arbitrary property values back to the server.
51
+ """
52
+ return self.client.get_properties(config)
53
+
54
+ def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays:
55
+ """Return the current local model parameters.
56
+
57
+ Parameters
58
+ ----------
59
+ config : Config
60
+ Configuration parameters requested by the server.
61
+ This can be used to tell the client which parameters
62
+ are needed along with some Scalar attributes.
63
+
64
+ Returns
65
+ -------
66
+ parameters : NDArrays
67
+ The local model parameters as a list of NumPy ndarrays.
68
+ """
69
+ return self.client.get_parameters(config)
70
+
71
+ def fit(
72
+ self, parameters: NDArrays, config: Dict[str, Scalar]
73
+ ) -> Tuple[NDArrays, int, Dict[str, Scalar]]:
74
+ """Train the provided parameters using the locally held dataset.
75
+
76
+ This method first updates the local model using the original parameters
77
+ provided. It then calculates the update by subtracting the original
78
+ parameters from the updated model. The update is then clipped by an L2
79
+ norm and Gaussian noise is added if specified by the configuration.
80
+
81
+ The update is then applied to the original parameters to obtain the
82
+ updated parameters which are returned along with the number of examples
83
+ used and metrics computed during the fitting process.
84
+
85
+ Parameters
86
+ ----------
87
+ parameters : NDArrays
88
+ The current (global) model parameters.
89
+ config : Dict[str, Scalar]
90
+ Configuration parameters which allow the
91
+ server to influence training on the client. It can be used to
92
+ communicate arbitrary values from the server to the client, for
93
+ example, to set the number of (local) training epochs.
94
+
95
+ Returns
96
+ -------
97
+ parameters : NDArrays
98
+ The locally updated model parameters.
99
+ num_examples : int
100
+ The number of examples used for training.
101
+ metrics : Dict[str, Scalar]
102
+ A dictionary mapping arbitrary string keys to values of type
103
+ bool, bytes, float, int, or str. It can be used to communicate
104
+ arbitrary values back to the server.
105
+
106
+ Raises
107
+ ------
108
+ Exception
109
+ If any required configuration parameters are not provided or are of
110
+ the wrong type.
111
+ """
112
+ original_params = copy.deepcopy(parameters)
113
+ # Getting the updated model from the wrapped client
114
+ updated_params, num_examples, metrics = self.client.fit(parameters, config)
115
+
116
+ # Update = updated model - original model
117
+ update = [np.subtract(x, y) for (x, y) in zip(updated_params, original_params)]
118
+
119
+ if "dpfedavg_clip_norm" not in config:
120
+ raise Exception("Clipping threshold not supplied by the server.")
121
+ if not isinstance(config["dpfedavg_clip_norm"], float):
122
+ raise Exception("Clipping threshold should be a floating point value.")
123
+
124
+ # Clipping
125
+ update, clipped = clip_by_l2(update, config["dpfedavg_clip_norm"])
126
+
127
+ if "dpfedavg_noise_stddev" in config:
128
+ if not isinstance(config["dpfedavg_noise_stddev"], float):
129
+ raise Exception(
130
+ "Scale of noise to be added should be a floating point value."
131
+ )
132
+ # Noising
133
+ update = add_gaussian_noise(update, config["dpfedavg_noise_stddev"])
134
+
135
+ for i, _ in enumerate(original_params):
136
+ updated_params[i] = original_params[i] + update[i]
137
+
138
+ # Calculating value of norm indicator bit, required for adaptive clipping
139
+ if "dpfedavg_adaptive_clip_enabled" in config:
140
+ if not isinstance(config["dpfedavg_adaptive_clip_enabled"], bool):
141
+ raise Exception(
142
+ "dpfedavg_adaptive_clip_enabled should be a boolean-valued flag."
143
+ )
144
+ metrics["dpfedavg_norm_bit"] = not clipped
145
+
146
+ return updated_params, num_examples, metrics
147
+
148
+ def evaluate(
149
+ self, parameters: NDArrays, config: Dict[str, Scalar]
150
+ ) -> Tuple[float, int, Dict[str, Scalar]]:
151
+ """Evaluate the provided parameters using the locally held dataset.
152
+
153
+ Parameters
154
+ ----------
155
+ parameters : NDArrays
156
+ The current (global) model parameters.
157
+ config : Dict[str, Scalar]
158
+ Configuration parameters which allow the server to influence
159
+ evaluation on the client. It can be used to communicate
160
+ arbitrary values from the server to the client, for example,
161
+ to influence the number of examples used for evaluation.
162
+
163
+ Returns
164
+ -------
165
+ loss : float
166
+ The evaluation loss of the model on the local dataset.
167
+ num_examples : int
168
+ The number of examples used for evaluation.
169
+ metrics : Dict[str, Scalar]
170
+ A dictionary mapping arbitrary string keys to values of
171
+ type bool, bytes, float, int, or str. It can be used to
172
+ communicate arbitrary values back to the server.
173
+
174
+ Warning
175
+ -------
176
+ The previous return type format (int, float, float) and the
177
+ extended format (int, float, float, Dict[str, Scalar]) have been
178
+ deprecated and removed since Flower 0.19.
179
+ """
180
+ return self.client.evaluate(parameters, config)
@@ -85,10 +85,12 @@ class DriverClientManager(ClientManager):
85
85
  raise NotImplementedError("DriverClientManager.unregister is not implemented")
86
86
 
87
87
  def all(self) -> Dict[str, ClientProxy]:
88
+ """Return all available clients."""
88
89
  self._update_nodes()
89
90
  return self.clients
90
91
 
91
92
  def wait_for(self, num_clients: int, timeout: int = 86400) -> bool:
93
+ """Wait until at least `num_clients` are available."""
92
94
  start_time = time.time()
93
95
  while time.time() < start_time + timeout:
94
96
  self._update_nodes()
@@ -103,6 +105,7 @@ class DriverClientManager(ClientManager):
103
105
  min_num_clients: Optional[int] = None,
104
106
  criterion: Optional[Criterion] = None,
105
107
  ) -> List[ClientProxy]:
108
+ """Sample a number of Flower ClientProxy instances."""
106
109
  if min_num_clients is None:
107
110
  min_num_clients = num_clients
108
111
  self.wait_for(min_num_clients)
@@ -128,6 +131,12 @@ class DriverClientManager(ClientManager):
128
131
  return [self.clients[cid] for cid in sampled_cids]
129
132
 
130
133
  def _update_nodes(self) -> None:
134
+ """Update the nodes list in the client manager.
135
+
136
+ This method communicates with the associated driver to get all node ids. Each
137
+ node id is then converted into a `DriverClientProxy` instance and stored in the
138
+ `clients` dictionary with node id as key.
139
+ """
131
140
  get_nodes_res = self.driver.get_nodes(req=driver_pb2.GetNodesRequest())
132
141
  all_node_ids = get_nodes_res.node_ids
133
142
  for node_id in all_node_ids:
@@ -158,9 +158,17 @@ class InMemoryState(State):
158
158
  del self.task_res_store[task_id]
159
159
 
160
160
  def num_task_ins(self) -> int:
161
+ """Calculate the number of task_ins in store.
162
+
163
+ This includes delivered but not yet deleted task_ins.
164
+ """
161
165
  return len(self.task_ins_store)
162
166
 
163
167
  def num_task_res(self) -> int:
168
+ """Calculate the number of task_res in store.
169
+
170
+ This includes delivered but not yet deleted task_res.
171
+ """
164
172
  return len(self.task_res_store)
165
173
 
166
174
  def register_node(self, node_id: int) -> None:
@@ -113,6 +113,7 @@ class DPFedAvgAdaptive(DPFedAvgFixed):
113
113
  results: List[Tuple[ClientProxy, FitRes]],
114
114
  failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],
115
115
  ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
116
+ """Aggregate training results as in DPFedAvgFixed and update clip norms."""
116
117
  if failures:
117
118
  return None, {}
118
119
  new_global_model = super().aggregate_fit(server_round, results, failures)
@@ -67,12 +67,34 @@ class DPFedAvgFixed(Strategy):
67
67
  def initialize_parameters(
68
68
  self, client_manager: ClientManager
69
69
  ) -> Optional[Parameters]:
70
+ """Initialize global model parameters using given strategy."""
70
71
  return self.strategy.initialize_parameters(client_manager)
71
72
 
72
73
  def configure_fit(
73
74
  self, server_round: int, parameters: Parameters, client_manager: ClientManager
74
75
  ) -> List[Tuple[ClientProxy, FitIns]]:
75
- """Configure the next round of training."""
76
+ """Configure the next round of training incorporating Differential Privacy (DP).
77
+
78
+ Configuration of the next training round includes information related to DP,
79
+ such as clip norm and noise stddev.
80
+
81
+ Parameters
82
+ ----------
83
+ server_round : int
84
+ The current round of federated learning.
85
+ parameters : Parameters
86
+ The current (global) model parameters.
87
+ client_manager : ClientManager
88
+ The client manager which holds all currently connected clients.
89
+
90
+ Returns
91
+ -------
92
+ fit_configuration : List[Tuple[ClientProxy, FitIns]]
93
+ A list of tuples. Each tuple in the list identifies a `ClientProxy` and the
94
+ `FitIns` for this particular `ClientProxy`. If a particular `ClientProxy`
95
+ is not included in this list, it means that this `ClientProxy`
96
+ will not participate in the next round of federated learning.
97
+ """
76
98
  additional_config = {"dpfedavg_clip_norm": self.clip_norm}
77
99
  if not self.server_side_noising:
78
100
  additional_config[
@@ -91,6 +113,26 @@ class DPFedAvgFixed(Strategy):
91
113
  def configure_evaluate(
92
114
  self, server_round: int, parameters: Parameters, client_manager: ClientManager
93
115
  ) -> List[Tuple[ClientProxy, EvaluateIns]]:
116
+ """Configure the next round of evaluation using the specified strategy.
117
+
118
+ Parameters
119
+ ----------
120
+ server_round : int
121
+ The current round of federated learning.
122
+ parameters : Parameters
123
+ The current (global) model parameters.
124
+ client_manager : ClientManager
125
+ The client manager which holds all currently connected clients.
126
+
127
+ Returns
128
+ -------
129
+ evaluate_configuration : List[Tuple[ClientProxy, EvaluateIns]]
130
+ A list of tuples. Each tuple in the list identifies a `ClientProxy` and the
131
+ `EvaluateIns` for this particular `ClientProxy`. If a particular
132
+ `ClientProxy` is not included in this list, it means that this
133
+ `ClientProxy` will not participate in the next round of federated
134
+ evaluation.
135
+ """
94
136
  return self.strategy.configure_evaluate(
95
137
  server_round, parameters, client_manager
96
138
  )
@@ -101,6 +143,7 @@ class DPFedAvgFixed(Strategy):
101
143
  results: List[Tuple[ClientProxy, FitRes]],
102
144
  failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],
103
145
  ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
146
+ """Aggregate training results using unweighted aggregation."""
104
147
  if failures:
105
148
  return None, {}
106
149
  # Forcing unweighted aggregation, as in https://arxiv.org/abs/1905.03871.
@@ -121,9 +164,11 @@ class DPFedAvgFixed(Strategy):
121
164
  results: List[Tuple[ClientProxy, EvaluateRes]],
122
165
  failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]],
123
166
  ) -> Tuple[Optional[float], Dict[str, Scalar]]:
167
+ """Aggregate evaluation losses using the given strategy."""
124
168
  return self.strategy.aggregate_evaluate(server_round, results, failures)
125
169
 
126
170
  def evaluate(
127
171
  self, server_round: int, parameters: Parameters
128
172
  ) -> Optional[Tuple[float, Dict[str, Scalar]]]:
173
+ """Evaluate model parameters using an evaluation function from the strategy."""
129
174
  return self.strategy.evaluate(server_round, parameters)
@@ -73,8 +73,7 @@ def tensorboard(logdir: str) -> Callable[[Strategy], Strategy]:
73
73
  """Return overloaded Strategy Wrapper."""
74
74
 
75
75
  class TBWrapper(strategy_class): # type: ignore
76
- """Strategy wrapper which hooks into some methods for TensorBoard
77
- logging."""
76
+ """Strategy wrapper that hooks into some methods for TensorBoard logging."""
78
77
 
79
78
  def aggregate_evaluate(
80
79
  self,
@@ -1,83 +0,0 @@
1
- # Copyright 2020 Adap GmbH. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # ==============================================================================
15
- """Wrapper for configuring a Flower client for DP."""
16
-
17
-
18
- import copy
19
- from typing import Dict, Tuple
20
-
21
- import numpy as np
22
-
23
- from flwr.client.numpy_client import NumPyClient
24
- from flwr.common.dp import add_gaussian_noise, clip_by_l2
25
- from flwr.common.typing import Config, NDArrays, Scalar
26
-
27
-
28
- class DPFedAvgNumPyClient(NumPyClient):
29
- """Wrapper for configuring a Flower client for DP."""
30
-
31
- def __init__(self, client: NumPyClient) -> None:
32
- super().__init__()
33
- self.client = client
34
-
35
- def get_properties(self, config: Config) -> Dict[str, Scalar]:
36
- return self.client.get_properties(config)
37
-
38
- def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays:
39
- return self.client.get_parameters(config)
40
-
41
- def fit(
42
- self, parameters: NDArrays, config: Dict[str, Scalar]
43
- ) -> Tuple[NDArrays, int, Dict[str, Scalar]]:
44
- original_params = copy.deepcopy(parameters)
45
- # Getting the updated model from the wrapped client
46
- updated_params, num_examples, metrics = self.client.fit(parameters, config)
47
-
48
- # Update = updated model - original model
49
- update = [np.subtract(x, y) for (x, y) in zip(updated_params, original_params)]
50
-
51
- if "dpfedavg_clip_norm" not in config:
52
- raise Exception("Clipping threshold not supplied by the server.")
53
- if not isinstance(config["dpfedavg_clip_norm"], float):
54
- raise Exception("Clipping threshold should be a floating point value.")
55
-
56
- # Clipping
57
- update, clipped = clip_by_l2(update, config["dpfedavg_clip_norm"])
58
-
59
- if "dpfedavg_noise_stddev" in config:
60
- if not isinstance(config["dpfedavg_noise_stddev"], float):
61
- raise Exception(
62
- "Scale of noise to be added should be a floating point value."
63
- )
64
- # Noising
65
- update = add_gaussian_noise(update, config["dpfedavg_noise_stddev"])
66
-
67
- for i, _ in enumerate(original_params):
68
- updated_params[i] = original_params[i] + update[i]
69
-
70
- # Calculating value of norm indicator bit, required for adaptive clipping
71
- if "dpfedavg_adaptive_clip_enabled" in config:
72
- if not isinstance(config["dpfedavg_adaptive_clip_enabled"], bool):
73
- raise Exception(
74
- "dpfedavg_adaptive_clip_enabled should be a boolean-valued flag."
75
- )
76
- metrics["dpfedavg_norm_bit"] = not clipped
77
-
78
- return updated_params, num_examples, metrics
79
-
80
- def evaluate(
81
- self, parameters: NDArrays, config: Dict[str, Scalar]
82
- ) -> Tuple[float, int, Dict[str, Scalar]]:
83
- return self.client.evaluate(parameters, config)