cvdlink 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. FeatureCloud/__init__.py +0 -0
  2. FeatureCloud/api/__init__.py +0 -0
  3. FeatureCloud/api/cli/__init__.py +0 -0
  4. FeatureCloud/api/cli/__main__.py +115 -0
  5. FeatureCloud/api/cli/app/__init__.py +0 -0
  6. FeatureCloud/api/cli/app/commands.py +182 -0
  7. FeatureCloud/api/cli/controller/__init__.py +0 -0
  8. FeatureCloud/api/cli/controller/commands.py +181 -0
  9. FeatureCloud/api/cli/test/__init__.py +0 -0
  10. FeatureCloud/api/cli/test/commands.py +251 -0
  11. FeatureCloud/api/cli/test/workflow/__init__.py +0 -0
  12. FeatureCloud/api/cli/test/workflow/commands.py +32 -0
  13. FeatureCloud/api/imp/__init__.py +0 -0
  14. FeatureCloud/api/imp/app/__init__.py +0 -0
  15. FeatureCloud/api/imp/app/commands.py +278 -0
  16. FeatureCloud/api/imp/controller/__init__.py +0 -0
  17. FeatureCloud/api/imp/controller/commands.py +246 -0
  18. FeatureCloud/api/imp/exceptions.py +29 -0
  19. FeatureCloud/api/imp/test/__init__.py +0 -0
  20. FeatureCloud/api/imp/test/api/__init__.py +0 -0
  21. FeatureCloud/api/imp/test/api/backend/__init__.py +0 -0
  22. FeatureCloud/api/imp/test/api/backend/auth.py +54 -0
  23. FeatureCloud/api/imp/test/api/backend/project.py +84 -0
  24. FeatureCloud/api/imp/test/api/controller.py +97 -0
  25. FeatureCloud/api/imp/test/commands.py +124 -0
  26. FeatureCloud/api/imp/test/helper.py +40 -0
  27. FeatureCloud/api/imp/util.py +45 -0
  28. FeatureCloud/app/__init__.py +0 -0
  29. FeatureCloud/app/api/__init__.py +0 -0
  30. FeatureCloud/app/api/http_ctrl.py +48 -0
  31. FeatureCloud/app/api/http_web.py +16 -0
  32. FeatureCloud/app/engine/__init__.py +0 -0
  33. FeatureCloud/app/engine/app.py +1214 -0
  34. FeatureCloud/app/engine/library.py +46 -0
  35. FeatureCloud/workflow/__init__.py +0 -0
  36. FeatureCloud/workflow/app.py +197 -0
  37. FeatureCloud/workflow/controller.py +17 -0
  38. FeatureCloud/workflow/example_wf.py +83 -0
  39. FeatureCloud/workflow/workflow.py +86 -0
  40. cvdlink-0.1.1.dist-info/METADATA +176 -0
  41. cvdlink-0.1.1.dist-info/RECORD +45 -0
  42. cvdlink-0.1.1.dist-info/WHEEL +5 -0
  43. cvdlink-0.1.1.dist-info/entry_points.txt +5 -0
  44. cvdlink-0.1.1.dist-info/licenses/LICENSE +201 -0
  45. cvdlink-0.1.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1214 @@
1
+ """
2
+ Test module documentation string for app.py
3
+ """
4
+ import abc
5
+ import datetime
6
+ import json
7
+ import numpy as np
8
+ import pickle
9
+ import sys
10
+ import threading
11
+ import traceback
12
+ import urllib.parse
13
+
14
+ from enum import Enum
15
+ from time import sleep
16
+ from typing import Dict, List, Tuple, Union, TypedDict, Literal
17
+
18
+ DATA_POLL_INTERVAL = 0.1 # Interval (seconds) to check for new data pieces, adapt if necessary
19
+ TERMINAL_WAIT = 10 # Time (seconds) to wait before final shutdown, to allow the controller to pick up the newest
20
+ # progress etc.
21
+ TRANSITION_WAIT = 1 # Time (seconds) to wait between state transitions
22
+
23
+
24
+ class Role(Enum):
25
+ """
26
+ | Describes the Role of a client
27
+ | Can be one of the following values:
28
+ | Role.PARTICIPANT
29
+ | Role.COORDINATOR
30
+ | Role.BOTH
31
+ """
32
+ PARTICIPANT = (True, False)
33
+ COORDINATOR = (False, True)
34
+ BOTH = (True, True)
35
+
36
+
37
+ class State(Enum):
38
+ """
39
+ | Describes the current State of the app instance
40
+ | Can be one of the following values:
41
+ | State.RUNNING
42
+ | State.ERROR
43
+ | State.ACTION
44
+ """
45
+ RUNNING = 'running'
46
+ ERROR = 'error'
47
+ ACTION = 'action_required'
48
+
49
+
50
+ class SMPCOperation(Enum):
51
+ """
52
+ | It's parameters are used to describe SMPC Operations for other functions
53
+ | One of the following:
54
+ | SMPCOperation.ADD
55
+ | SMPCOperation.MULTIPLY
56
+ """
57
+ ADD = 'add'
58
+ MULTIPLY = 'multiply'
59
+
60
+
61
+ class SMPCSerialization(Enum):
62
+ """
63
+ | Describes the serialization used with data when using SMPC, so the format data is send between different components of Featurecloud, e.g. between the app instance and the controller
64
+ | Currently ony the following is supported
65
+ | SMPCSerialization.JSON
66
+ """
67
+ JSON = 'json'
68
+
69
+
70
+ class LogLevel(Enum):
71
+ """
72
+ | The level of a log, given to the log function.
73
+ | LogLevel.DEBUG: Just for debugging
74
+ | LogLevel.ERROR: Throws an error but does not halt the program
75
+ | LogLevel.FATAL: Stops the program
76
+ """
77
+ DEBUG = 'info'
78
+ ERROR = 'error'
79
+ FATAL = 'fatal'
80
+
81
+ class DPNoisetype(Enum):
82
+ GAUSS = 'gauss'
83
+ LAPLACE = 'laplace'
84
+
85
+ class DPSerialization(Enum):
86
+ JSON = 'json'
87
+
88
+
89
+ class SMPCType(TypedDict):
90
+ operation: Literal['add', 'multiply']
91
+ serialization: Literal['json']
92
+ shards: int
93
+ exponent: int
94
+
95
+ class DPType(TypedDict):
96
+ serialization: Literal['json']
97
+ noisetype: Literal['gauss', 'laplace']
98
+ epsilon: float
99
+ delta: float
100
+ sensitivity: Union[float, None]
101
+ clippingVal: Union[float, None]
102
+
103
+ class App:
104
+ """ Implementing the workflow for the FeatureCloud platform.
105
+
106
+ Attributes
107
+ ----------
108
+ id: str
109
+ coordinator: bool
110
+ clients: list
111
+ send_counter: int
112
+ receive_counter: int
113
+ status_available: bool
114
+ status_finished: bool
115
+ status_message: str
116
+ status_progress: float
117
+ status_state: str
118
+ status_destination: str
119
+ status_smpc: SMPCType
120
+ status_dp: DPType
121
+ status_memo: str
122
+
123
+ default_smpc: dict
124
+ default_dp: dict
125
+
126
+ data_incoming: dict[str]: [(data, sendingClientID: str),...]
127
+ data_outgoing: list[(data, statusJSON: str)]
128
+ thread: threading.Thread
129
+
130
+ states: Dict[str, AppState]
131
+ transitions: Dict[str, Tuple[AppState, AppState, bool, bool]]
132
+ transition_log: List[Tuple[datetime.datetime, str]]
133
+ internal: dict
134
+
135
+ current_state: AppState
136
+ last_send_status: str (JSON)
137
+
138
+ Methods
139
+ -------
140
+ handle_setup(client_id, coordinator, clients)
141
+ handle_incoming(data)
142
+ handle_outgoing()
143
+ get_current_status(**kwargs)
144
+ guarded_run()
145
+ run()
146
+ register()
147
+ _register_state(name, state, participant, coordinator, **kwargs)
148
+ register_transition(name, source, participant, coordinator)
149
+ transition()
150
+ log()
151
+ """
152
+
153
+ def __init__(self):
154
+ self.id = None
155
+ self.coordinator = None
156
+ self.coordinatorID = None
157
+ self.clients = None
158
+ self.default_memo = None
159
+ self.thread: Union[threading.Thread, None] = None
160
+ self.send_counter: int = 0
161
+ self.receive_counter: int = 0
162
+ # the send counter is increased with any send_data_to_coordinator
163
+ # call while the receive counter is increased with any
164
+ # aggregate_data and gather_data call as well as with
165
+ # await_data with n=#clients OR n=#clients-1 OR n=1 and smpc=True
166
+ # This should work in 99% of all cases, except when
167
+ # somebody uses send_data_to_participant from each client and then
168
+ # uses await_data on all these #clients -1 data pieces.
169
+ self.data_incoming = {}
170
+ # dictionary mapping memo: [(data, client),...]
171
+ # data is the data serialized with JSON when SMPC or DP is used,
172
+ # otherwise serialized the way that the sender used (usually pickle)
173
+ # client is the id of the client that sent the data
174
+ # memo is the memo send alongside the data to identify to which
175
+ # comunication round the data belongs to
176
+ self.data_outgoing = []
177
+ # list of all data objects and the corresponding status to use with them
178
+ # format: list of tuples, each tuple contains dataObject, status as JSON string
179
+
180
+ self.default_smpc: SMPCType = {'operation': 'add', 'serialization': 'json', 'shards': 0, 'exponent': 8}
181
+ self.default_dp: DPType = {'serialization': 'json', 'noisetype': 'laplace',
182
+ 'epsilon': 1.0, 'delta': 0.0,
183
+ 'sensitivity': None, 'clippingVal': 10.0}
184
+
185
+ self.current_state: Union[AppState, None] = None
186
+ self.states: Dict[str, AppState] = {}
187
+ #self.transitions: Dict[
188
+ # str, Tuple[AppState, AppState, bool, bool]] = {} # name => (source, target, participant, coordinator)
189
+ self.transitions: Dict[
190
+ str, Tuple[AppState, AppState, bool, bool, str]] = {} # name => (source, target, participant, coordinator, label)
191
+
192
+ self.transition_log: List[Tuple[datetime.datetime, str]] = []
193
+
194
+ self.internal = {}
195
+
196
+ self.status_available: bool = False
197
+ self.status_finished: bool = False
198
+ self.status_message: Union[str, None] = None
199
+ self.status_progress: Union[float, None] = None
200
+ self.status_state: Union[str, None] = None
201
+ self.status_destination: Union[str, None] = None
202
+ self.status_smpc: Union[SMPCType, None] = None
203
+ self.status_dp: Union[DPType, None] = None
204
+ self.status_memo: Union[str, None] = None
205
+
206
+ self.last_send_status = self.get_current_status()
207
+
208
+ # Add terminal state
209
+ @app_state('terminal', Role.BOTH, self)
210
+ class TerminalState(AppState):
211
+ def register(self):
212
+ pass
213
+
214
+ def run(self) -> Union[str, None]:
215
+ pass
216
+
217
+ def handle_setup(self, client_id, coordinator, clients, coordinatorID=None):
218
+ """ It will be called on startup and contains information about the
219
+ execution context of this instance. And registers all of the states.
220
+
221
+
222
+ Parameters
223
+ ----------
224
+ client_id: str
225
+ coordinator: bool
226
+ clients: list
227
+ coordinatorID: str
228
+
229
+ """
230
+ self.id = client_id
231
+ self.coordinator = coordinator
232
+ self.coordinatorID = coordinatorID
233
+ self.clients = clients
234
+
235
+ self.log(f'id: {self.id}')
236
+ self.log(f'coordinator: {self.coordinator}')
237
+ self.log(f'clients: {self.clients}')
238
+
239
+ self.current_state = self.states.get('initial')
240
+
241
+ if not self.current_state:
242
+ self.log('initial state not found', level=LogLevel.FATAL)
243
+
244
+ self.thread = threading.Thread(target=self.guarded_run)
245
+ self.thread.start()
246
+
247
+ def guarded_run(self):
248
+ """ run the workflow while trying to catch possible exceptions
249
+
250
+ """
251
+ try:
252
+ self.run()
253
+ except Exception as e: # catch all # noqa
254
+ self.log(traceback.format_exc())
255
+ self.status_message = e.__class__.__name__
256
+ self.status_state = State.ERROR.value
257
+ self.status_finished = True
258
+ self.data_outgoing.insert(0, (None, self.get_current_status(
259
+ finished=True, state=State.ERROR.value,
260
+ message=e.__class__.__name__)))
261
+ # remove ANY data in the pipeline and crash the workflow
262
+ # on the next poll
263
+
264
+ def run(self):
265
+ """ Runs the workflow, logs the current state, executes it,
266
+ and handles the transition to the next desired state.
267
+ Once the app transits to the terminal state, the workflow will be terminated.
268
+ """
269
+ while True:
270
+ self.log(f'state: {self.current_state.name}')
271
+ transition = self.current_state.run()
272
+ self.log(f'transition: {transition}')
273
+ self.transition(f'{self.current_state.name}_{transition}')
274
+ if self.current_state.name == 'terminal':
275
+ sleep(TERMINAL_WAIT)
276
+ terminal_status_added = False
277
+ while True:
278
+ if not terminal_status_added:
279
+ # add finished status answer to the outgoing data queue
280
+ status = self.get_current_status(progress=1.0,
281
+ message="terminal",
282
+ finished=True)
283
+ self.data_outgoing.append((None, status))
284
+ # only append to ensure that all data in the pipe is still
285
+ # sent out
286
+ terminal_status_added = True
287
+ sleep(TERMINAL_WAIT)
288
+ # potentially this wait time clears the queue already
289
+ if len(self.data_outgoing) > 1:
290
+ # there is still data to be sent out
291
+ self.log(f'done, waiting for the last {len(self.data_outgoing)-1} data pieces to be send')
292
+ elif len(self.data_outgoing) == 1:
293
+ sleep(TERMINAL_WAIT)
294
+ # the last status that was added before is never
295
+ # removed, so we finnish when only one status is
296
+ # left
297
+ # To ensure that this last status has been pulled,
298
+ # we just wait
299
+ self.log('done')
300
+ return
301
+ sleep(TRANSITION_WAIT)
302
+ sleep(TRANSITION_WAIT)
303
+
304
+ def register(self):
305
+ """ Registers all of the states transitions
306
+ it should be called once all of the states are registered.
307
+
308
+ """
309
+ for s in self.states:
310
+ state = self.states[s]
311
+ state.register()
312
+
313
+ def get_current_status(self, **kwargs):
314
+ status = dict()
315
+ status["available"] = self.status_available
316
+ status["finished"] = self.status_finished
317
+ status["message"] = self.status_message
318
+ status["progress"] = self.status_progress
319
+ status["state"] = self.status_state
320
+ status["destination"] = self.status_destination
321
+ status["smpc"] = self.status_smpc
322
+ status["dp"] = self.status_dp
323
+ status["memo"] = self.status_memo
324
+ for key, value in kwargs.items():
325
+ # set whatever is wanted from the arguments
326
+ status[key] = value
327
+ return status
328
+
329
+ def handle_incoming(self, data, client, memo=None):
330
+ """ When new data arrives, it appends it to the
331
+ `data_incoming` attribute to be accessible for app states.
332
+
333
+ Parameters
334
+ ----------
335
+ data: list
336
+ encoded data
337
+ client: str
338
+ Id of the client that Sent the data
339
+
340
+ """
341
+ if memo not in self.data_incoming:
342
+ self.data_incoming[memo] = [(data, client)]
343
+ else:
344
+ self.data_incoming[memo].append((data, client))
345
+
346
+ def handle_status(self):
347
+ """ This informs if there is any data to be sent as well as the way
348
+ data should be send
349
+ """
350
+ self.status_message = self.status_message if self.status_message else (self.current_state.name if self.current_state else None)
351
+ # ensure that some message is set
352
+ if len(self.data_outgoing) == 0:
353
+ # no data to send, just return the default status with available=false
354
+ return self.get_current_status(available=False)
355
+
356
+ # else take the status from the data to be sent out next. The whole
357
+ # data, status combination gets popped in the next handle_outgoing
358
+ # function call by the next GET request from the controller, so here
359
+ # the status and data itself must still be kept in the list
360
+
361
+ _, status = self.data_outgoing[0]
362
+ self.last_send_status = status
363
+ return status
364
+
365
+ def handle_outgoing(self):
366
+ """ When it is requested to send some data to other client/s
367
+ it will be called to deliver the data to the FeatureCloud Controller.
368
+ Then sets status variables (status_*) accordingly for the next data to send from
369
+ self.data_outgoing
370
+
371
+ """
372
+ if len(self.data_outgoing) == 0:
373
+ # no data to send
374
+ return None
375
+
376
+ # extract current data to be sent
377
+ data, status = self.data_outgoing.pop(0)
378
+
379
+ # check if the last status request was answered with the same status
380
+ # as the data is supposed to be sent with
381
+ # we just compare the JSON strings
382
+ if status != self.last_send_status:
383
+ raise Exception("Race condition error, the controller got sent a" +
384
+ "different GET/status object that the one intended with this data object."+
385
+ "Needed status object: {}, actual status object: {}".format(
386
+ status, self.last_send_status))
387
+
388
+ # status is fine, send data out
389
+ return data
390
+
391
+ def _register_state(self, name, state, participant, coordinator, **kwargs):
392
+ """ Instantiates a state, provides app-level information and adds it as part of the app workflow.
393
+
394
+ Parameters
395
+ ----------
396
+ name: str
397
+ state: AppState
398
+ participant: bool
399
+ coordinator: bool
400
+
401
+ """
402
+ if self.transitions.get(name):
403
+ self.log(f'state {name} already exists', level=LogLevel.FATAL)
404
+
405
+ si = state(**kwargs)
406
+ si._app = self
407
+ si.name = name
408
+ si.participant = participant
409
+ si.coordinator = coordinator
410
+ self.states[si.name] = si
411
+
412
+ def register_transition(self, name: str, source: str, target: str, participant=True, coordinator=True, label: Union[str,None] = None):
413
+ """ Receives transition registration parameters, check the validity of its logic,
414
+ and consider it as one possible transitions in the workflow.
415
+ There will be exceptions if apps try to register a transition with contradicting roles.
416
+
417
+ Parameters
418
+ ----------
419
+ name: str
420
+ Name of the transition
421
+ source: str
422
+ Name of the source state
423
+ target: str
424
+ Name of the target state
425
+ participant: bool
426
+ Indicates whether the transition is allowed for participant role
427
+ coordinator: bool
428
+ Indicates whether the transition is allowed for the coordinator role
429
+
430
+ """
431
+ if not participant and not coordinator:
432
+ self.log('either participant or coordinator must be True', level=LogLevel.FATAL)
433
+
434
+ if self.transitions.get(name):
435
+ self.log(f'transition {name} already exists', level=LogLevel.FATAL)
436
+
437
+ source_state = self.states.get(source)
438
+ if not source_state:
439
+ self.log(f'source state {source} not found', level=LogLevel.FATAL)
440
+ if participant and not source_state.participant:
441
+ self.log(f'source state {source} not accessible for participants', level=LogLevel.FATAL)
442
+ if coordinator and not source_state.coordinator:
443
+ self.log(f'source state {source} not accessible for the coordinator', level=LogLevel.FATAL)
444
+
445
+ target_state = self.states.get(target)
446
+ if not target_state:
447
+ self.log(f'target state {target} not found', level=LogLevel.FATAL)
448
+ if participant and not target_state.participant:
449
+ self.log(f'target state {target} not accessible for participants', level=LogLevel.FATAL)
450
+ if coordinator and not target_state.coordinator:
451
+ self.log(f'target state {target} not accessible for the coordinator', level=LogLevel.FATAL)
452
+
453
+ self.transitions[name] = (source_state, target_state, participant, coordinator, label)
454
+
455
+ def transition(self, name):
456
+ """ Transits the app workflow to the unique next state based on
457
+ current states, the role of the FeatureCloud client,
458
+ and requirements of registered transitions for the current state.
459
+
460
+ Parameters
461
+ ----------
462
+ name: str
463
+ Name of the transition(which includes name of current and the next state).
464
+
465
+ """
466
+ transition = self.transitions.get(name)
467
+ if not transition:
468
+ self.log(f'transition {name} not found', level=LogLevel.FATAL)
469
+ if transition[0] != self.current_state:
470
+ self.log('current state unequal to source state', level=LogLevel.FATAL)
471
+ if not transition[2] and not self.coordinator:
472
+ self.log(f'cannot perform transition {name} as participant', level=LogLevel.FATAL)
473
+ if not transition[3] and self.coordinator:
474
+ self.log(f'cannot perform transition {name} as coordinator', level=LogLevel.FATAL)
475
+
476
+ self.transition_log.append((datetime.datetime.now(), name))
477
+ self.current_state = transition[1]
478
+ self.status_message = self.current_state.name
479
+
480
+ def log(self, msg, level: LogLevel = LogLevel.DEBUG):
481
+ """
482
+ Prints a log message or raises an exception according to the log level.
483
+
484
+ Parameters
485
+ ----------
486
+ msg : str
487
+ message to be displayed
488
+ level : LogLevel, default=LogLevel.DEBUG
489
+ determines the channel (stdout, stderr) or whether to trigger an exception
490
+ """
491
+
492
+ msg = f'[Time: {datetime.datetime.now().strftime("%d.%m.%y %H:%M:%S")}] [Level: {level.value}] {msg}'
493
+
494
+ if level == LogLevel.FATAL:
495
+ raise RuntimeError(msg)
496
+ if level == LogLevel.ERROR:
497
+ print(msg, flush=True, file=sys.stderr)
498
+ else:
499
+ print(msg, flush=True)
500
+
501
+ class AppState(abc.ABC):
502
+ """ Defining custom states
503
+
504
+ AppState is the class used when programming a FeatureCloud App to create
505
+ states and to communicate with other clients. Generally, a FeatureCloud app
506
+ consists of different states that all share the same AppState class.
507
+ The states themselves are created as classes with the
508
+ @app_state("statename") decorator.
509
+ See the example below or the template apps for more details.
510
+
511
+ Attributes
512
+ ----------
513
+ app: App
514
+ name: str
515
+
516
+ Properties
517
+ ----------
518
+ is_coordinator: bool
519
+ clients: list[str]
520
+ id: str
521
+ """
522
+
523
+ def __init__(self):
524
+ self._app = None
525
+ self.name = None
526
+
527
+ @abc.abstractmethod
528
+ def register(self):
529
+ """ This is an abstract method that should be implemented by developers
530
+ it calls AppState.register_transition to register transitions for state.
531
+ it will be called in App.register method so that, once all states are defined,
532
+ in a verifiable way, all app transitions can be registered.
533
+
534
+ """
535
+
536
+ @abc.abstractmethod
537
+ def run(self) -> str:
538
+ """ It is an abstract method that should be implemented by developers,
539
+ to execute all local or global operation and calculations of the state.
540
+ It will be called in App.run() method so that the state perform its operations.
541
+
542
+ """
543
+
544
+ @property
545
+ def is_coordinator(self):
546
+ """ Boolean variable, if True the this AppState instance represents the
547
+ coordinator. False otherwise.
548
+
549
+ """
550
+ return self._app.coordinator
551
+
552
+ @property
553
+ def clients(self):
554
+ """ Contains a list of client IDs of all clients involved in the
555
+ current learning run.
556
+
557
+ """
558
+ return self._app.clients
559
+
560
+ @property
561
+ def id(self):
562
+ """ Contains the id of this client
563
+
564
+ """
565
+ return self._app.id
566
+
567
+ @property
568
+ def coordintor_id(self):
569
+ """ Contains the id of the coordinator
570
+
571
+ """
572
+ return self._app.coordinatorID
573
+
574
+ def register_transition(self, target: str, role: Role = Role.BOTH, name: Union[str, None] = None, label: Union[str, None] = None):
575
+ """
576
+ Registers a transition in the state machine.
577
+
578
+ Parameters
579
+ ----------
580
+ target : str
581
+ name of the target state
582
+ role : Role, default=Role.BOTH
583
+ role for which this transition is valid
584
+ name : str or None, default=None
585
+ name of the transition
586
+ """
587
+
588
+ if not name:
589
+ name = target
590
+ participant, coordinator = role.value
591
+ self._app.register_transition(f'{self.name}_{name}', self.name, target, participant, coordinator, label)
592
+
593
+ def aggregate_data(self, operation: SMPCOperation = SMPCOperation.ADD, use_smpc=False,
594
+ use_dp=False, memo=None):
595
+ """
596
+ Waits for all participants (including the coordinator instance)
597
+ to send data and returns the aggregated value. Will try to convert
598
+ each data piece to a np.array and aggregate those arrays.
599
+ Therefore, this method only works for numerical data and all datapieces
600
+ should be addable.
601
+
602
+ Parameters
603
+ ----------
604
+ operation : SMPCOperation
605
+ specifies the aggregation type
606
+ use_smpc : bool, default=False
607
+ if True, the data to be aggregated is expected to stem from an SMPC aggregation
608
+ use_dp: bool, default=False
609
+ if True, will assume that data was sent and modified with the
610
+ controllers differential privacy capacities (with use_dp=true in the
611
+ corresponding send function). This must be set in the receiving
612
+ function due to serialization differences with DP
613
+ memo : str or None, default=None
614
+ the string identifying a specific communication round.
615
+ Any app should ensure that this string is the same over all clients
616
+ over the same communication round and that a different string is
617
+ used for each communication round. This ensures that no race
618
+ condition problems occur
619
+ Returns
620
+ -------
621
+ aggregated value
622
+ """
623
+ if not memo:
624
+ self._app.receive_counter += 1
625
+ memo = f"GATHERROUND{self._app.receive_counter}"
626
+ # we need to use the urlencoded memo as this is what we reiceive
627
+ memo = urllib.parse.quote(memo)
628
+ if use_smpc:
629
+ return self.await_data(n=1, unwrap=True, is_json=True, memo=memo)
630
+ # Data is aggregated already
631
+ else:
632
+ data = self.gather_data(is_json=use_dp, memo=memo)
633
+ return _aggregate(data, operation)
634
+ # Data needs to be aggregated according to operation
635
+
636
+ def gather_data(self, is_json=False, use_smpc=False, use_dp=False, memo=None):
637
+ """
638
+ Waits for all participants (including the coordinator instance) to send data and returns a list containing the received data pieces. Only valid for the coordinator instance.
639
+
640
+ Parameters
641
+ ----------
642
+ is_json : bool, default=False
643
+ [deprecated] when data was sent via DP or SMPC, the data is sent in
644
+ JSON serialization. This was used to indicate this but is now
645
+ DEPRICATED, use use_smpc/use_dp accordingly instead, they will take
646
+ care of serialization automatically.
647
+ use_smpc: bool, default=False
648
+ Indicated whether the data that is gather was sent using SMPC.
649
+ If this is not set to True when data was sent using SMPC, this
650
+ function ends up in an endless loop
651
+ use_dp: bool, default=False
652
+ if True, will assume that data was sent and modified with the
653
+ controllers differential privacy capacities (with use_dp=true in the
654
+ corresponding send function). This must be set in the receiving
655
+ function due to serialization differences with DP
656
+ memo : str or None, default=None
657
+ the string identifying a specific communication round.
658
+ Any app should ensure that this string is the same over all clients
659
+ over the same communication round and that a different string is
660
+ used for each communication round. This ensures that no race
661
+ condition problems occur
662
+ Returns
663
+ -------
664
+ list of n data pieces, where n is the number of participants
665
+ """
666
+ if not self._app.coordinator:
667
+ self._app.log('must be coordinator to use gather_data', level=LogLevel.FATAL)
668
+ n = len(self._app.clients)
669
+ if use_smpc or use_dp:
670
+ is_json = True
671
+ if use_smpc:
672
+ n = 1
673
+ if not memo:
674
+ self._app.receive_counter += 1
675
+ memo = f"GATHERROUND{self._app.receive_counter}"
676
+ # we need to use the urlencoded memo as this is what we reiceive
677
+ memo = urllib.parse.quote(memo)
678
+ return self.await_data(n, unwrap=False, is_json=is_json, use_dp=use_dp,
679
+ use_smpc=use_smpc, memo=memo)
680
+
681
+ def await_data(self, n: int = 1, unwrap=True, is_json=False,
682
+ use_dp=False, use_smpc=False, memo=None):
683
+ """
684
+ Waits for n data pieces and returns them. It is highly recommended to
685
+ use the memo variable when using this method
686
+
687
+ Parameters
688
+ ----------
689
+ n : int, default=1
690
+ number of data pieces to wait for. Is ignored when use_smpc is used
691
+ as smpc data is aggregated by the controller, therefore only one
692
+ data piece is given when using smpc
693
+ unwrap : bool, default=True
694
+ if True, will return the first element of the collected data (only useful if n = 1)
695
+ is_json : bool, default=False
696
+ [deprecated] when data was sent via DP or SMPC, the data is sent in
697
+ JSON serialization. This was used to indicate this to deserailize
698
+ the data correctly, but is now DEPRICATED, use use_smpc/use_dp
699
+ accordingly instead, they will take care of serialization
700
+ automatically.
701
+ use_dp : bool, default=False
702
+ if True, will assume that data was sent and modified with the
703
+ controllers differential privacy capacities (with use_dp=true in the
704
+ corresponding send function). This must be set in the receiving
705
+ function due to serialization differences with DP
706
+ use_smpc: bool, default=False
707
+ if True, will ensure that n is set to 1 and the correct
708
+ serialization is used (SMPC uses JSON serialization)
709
+ memo : str or None, default=None
710
+ RECOMMENDED TO BE SET FOR THIS METHOD!
711
+ the string identifying a specific communication round. The same
712
+ string that is used in this call must be used before in the
713
+ corresponding sending functions.
714
+ Any app should ensure that this string is the same over all clients
715
+ over the same communication round and that a different string is
716
+ used for each communication round. This ensures that no race
717
+ condition problems occur.
718
+ Returns
719
+ -------
720
+ list of data pieces (if n > 1 or unwrap = False) or a single data piece (if n = 1 and unwrap = True)
721
+ """
722
+ if use_smpc:
723
+ n = 1
724
+ is_json = True
725
+ if use_dp:
726
+ is_json = True
727
+ if not memo and self._app.coordinator:
728
+ # only increment for the coordinator to really avoid any p2p
729
+ # problems
730
+ if (n == len(self._app.clients) and use_smpc==False) \
731
+ or ((n == len(self._app.clients) - 1 and use_smpc==False) and len(self._app.clients) > 2) \
732
+ or use_smpc == True:
733
+ # this is a gather/aggregate call, although in theory
734
+ # (n==1 and is_json=True) could also be an SMPC gather call,
735
+ # but it cannot be differentiate between that being an SMPC
736
+ # gather call or an p2p with DP call
737
+ self._app.receive_counter += 1
738
+ memo = f"GATHERROUND{self._app.receive_counter}"
739
+
740
+ try:
741
+ memo = str(memo)
742
+ except Exception as e:
743
+ self._app.log(
744
+ f"given memo cannot be translated to a string, ERROR: {e}",
745
+ LogLevel.Error)
746
+
747
+ # we need to use the urlencoded memo as this is what we reiceive
748
+ if memo:
749
+ memo = urllib.parse.quote(memo)
750
+
751
+ while True:
752
+ num_data_pieces = 0
753
+ if memo in self._app.data_incoming:
754
+ num_data_pieces = len(self._app.data_incoming[memo])
755
+ if num_data_pieces >= n:
756
+ # warn if too many data pieces came in
757
+ if num_data_pieces > n:
758
+ self._app.log(
759
+ f"await was used to wait for {n} data pieces, " +
760
+ f"but more data pieces ({num_data_pieces}) were found. " +
761
+ f"Used memo is <{memo}>",
762
+ LogLevel.ERROR)
763
+
764
+ # extract and deseralize the data
765
+ data = self._app.data_incoming[memo][:n]
766
+ self._app.data_incoming[memo] = self._app.data_incoming[memo][n:]
767
+ if len(self._app.data_incoming[memo]) == 0:
768
+ # clean up the dict regularly
769
+ del self._app.data_incoming[memo]
770
+
771
+ if n == 1 and unwrap:
772
+ return _deserialize_incoming(data[0][0], is_json=is_json)
773
+ else:
774
+ return [_deserialize_incoming(d[0], is_json=is_json) for d in data]
775
+
776
+ sleep(DATA_POLL_INTERVAL)
777
+
778
+ def send_data_to_participant(self, data, destination, use_dp=False,
779
+ memo=None):
780
+ """
781
+ Sends data to a particular participant identified by its ID. Should be
782
+ used for any specific communication to individual clients.
783
+ For the communication schema of all clients/all clients except the
784
+ coordinator sending data to the coordinator, use send_data_to_coordinator
785
+
786
+ Parameters
787
+ ----------
788
+ data : object
789
+ data to be sent
790
+ destination : str
791
+ destination client ID, get this from e.g. self.clients
792
+ use_dp : bool, default=False
793
+ Whether to use differential privacy(dp) before sending out the data.
794
+ To configure the hypterparameters of dp, use the configure_dp method
795
+ before this method. The receiving method must also use the same
796
+ setting of the use_dp flag or there will be serialization problems
797
+ memo : str or None, default=None
798
+ RECOMMENDED TO BE SET FOR THIS METHOD!
799
+ the string identifying a specific communication round.
800
+ This ensures that there are no race condition problems and the
801
+ correct data piece can be identified by the recipient of the
802
+ data piece sent with this function call. The recipient of this data
803
+ must use the same memo to identify the data.
804
+
805
+ """
806
+ try:
807
+ memo = str(memo)
808
+ except Exception as e:
809
+ self._app.log(
810
+ f"given memo cannot be translated to a string, ERROR: {e}",
811
+ LogLevel.Error)
812
+
813
+ data = _serialize_outgoing(data, is_json=use_dp)
814
+ if destination == self._app.id and not use_dp:
815
+ # In no DP case, the data does not have to be sent via the controller
816
+ self._app.handle_incoming(data, client=self._app.id, memo=memo)
817
+ else:
818
+ # update the status variables and get the status object
819
+ message = self._app.status_message if self._app.status_message else (self._app.current_state.name if self._app.current_state else None)
820
+ dp = self._app.default_dp if use_dp else None
821
+ self._app.status_message = message
822
+ status = self._app.get_current_status(message=message,
823
+ destination=destination, dp=dp, memo=memo,
824
+ available=True)
825
+ self._app.data_outgoing.append((data, json.dumps(status, sort_keys=True)))
826
+
827
+ def send_data_to_coordinator(self, data, send_to_self=True, use_smpc=False,
828
+ use_dp=False, memo=None):
829
+ """
830
+ Sends data to the coordinator instance. Must be used by all clients
831
+ or all clients except for the coordinator itself when no memo is given,
832
+ as the automated memo used when using memo=None breaks otherwise.
833
+ If any subset of clients should communicate with the coordinator,
834
+ either define the memo or use
835
+ send_data_to_participant(destination=self.coordintor_id) with a memo.
836
+
837
+ Parameters
838
+ ----------
839
+ data : object
840
+ data to be sent
841
+ send_to_self : bool, default=True
842
+ if True, the data will also be sent internally to this instance (only applies to the coordinator instance)
843
+ use_smpc : bool, default=False
844
+ if True, the data will be sent as part of an SMPC aggregation step
845
+ use_dp : bool, default=False
846
+ Whether to use differential privacy(dp) before sending out the data.
847
+ To configure the hypterparameters of dp, use the configure_dp method
848
+ before this method. The receiving method must also use the same
849
+ setting of the use_dp flag or there will be serialization problems
850
+ memo : str or None, default=None
851
+ the string identifying a specific communication round.
852
+ This ensures that there are no race condition problems so that the
853
+ coordinator uses the correct data piece from each client for each
854
+ communication round. This also ensures that workflows where
855
+ participants send data to the coordinator without waiting for a
856
+ response work
857
+ """
858
+ # if no memo is given (default), we use the counter from App
859
+ if not memo:
860
+ self._app.send_counter += 1
861
+ memo = f"GATHERROUND{self._app.send_counter}"
862
+ # ensure memo can be sent as a string
863
+ try:
864
+ memo = str(memo)
865
+ except Exception as e:
866
+ self._app.log(
867
+ f"given memo cannot be translated to a string, ERROR: {e}",
868
+ LogLevel.Error)
869
+
870
+ if use_smpc or use_dp:
871
+ data = _serialize_outgoing(data, is_json=True)
872
+
873
+ else:
874
+ data = _serialize_outgoing(data, is_json=False)
875
+
876
+ if self._app.coordinator and not use_smpc and not use_dp:
877
+ # coordinator sending itself data, if that is wanted (send_to_self),
878
+ # and neither dp nor smpc are used, the controller does not have to be used
879
+ # for sending the data
880
+ if send_to_self:
881
+ self._app.handle_incoming(data, self._app.id, memo)
882
+ else:
883
+ # for SMPC and DP, the data has to be sent via the controller
884
+ if use_dp and self._app.coordinator:
885
+ # give the coordinator as destination,
886
+ # else, it will be interpreted as a broadcast action
887
+ destination = self._app.id
888
+ else:
889
+ destination = None
890
+ # this is interpreted as to the coordinator
891
+ message = self._app.status_message if self._app.status_message else (self._app.current_state.name if self._app.current_state else None)
892
+ self._app.status_message = message
893
+ smpc = self._app.default_smpc if use_smpc else None
894
+ dp = self._app.default_dp if use_dp else None
895
+ status = self._app.get_current_status(message=message,
896
+ destination=destination, smpc=smpc, dp=dp, memo=memo,
897
+ available=True)
898
+ self._app.data_outgoing.append((data, json.dumps(status, sort_keys=True)))
899
+
900
+ def broadcast_data(self, data, send_to_self=True, use_dp = False,
901
+ memo = None):
902
+ """
903
+ Broadcasts data to all participants (only valid for the coordinator instance).
904
+
905
+ Parameters
906
+ ----------
907
+ data : object
908
+ data to be sent
909
+ send_to_self : bool
910
+ if True, the data will also be sent internally to this coordinator instance
911
+ use_dp : bool, default=False
912
+ Whether to use differential privacy(dp) before sending out the data.
913
+ To configure the hypterparameters of dp, use the configure_dp method
914
+ before this method. The receiving method must also use the same
915
+ setting of the use_dp flag or there will be serialization problems
916
+ memo : str or None, default=None
917
+ the string identifying a specific communication round.
918
+ This ensures that there are no race condition problems so that the
919
+ participants and the coordinator can differentiate between this
920
+ data piece broadcast and other data pieces they receive from the
921
+ coordinator.
922
+ """
923
+ try:
924
+ memo = str(memo)
925
+ except Exception as e:
926
+ self._app.log(
927
+ f"given memo cannot be translated to a string, ERROR: {e}",
928
+ LogLevel.Error)
929
+
930
+ if not self._app.coordinator:
931
+ self._app.log('only the coordinator can broadcast data', level=LogLevel.FATAL)
932
+
933
+ is_json = False
934
+ if use_dp:
935
+ is_json = True
936
+
937
+ # serialize before broadcast
938
+ data = _serialize_outgoing(data, is_json=use_dp)
939
+
940
+ message = self._app.status_message if self._app.status_message else (self._app.current_state.name if self._app.current_state else None)
941
+ self._app.status_message = message
942
+ dp = self._app.default_dp if use_dp else None
943
+ status = self._app.get_current_status(message=message,
944
+ destination=None, dp=dp, memo=memo,
945
+ available=True)
946
+ if send_to_self:
947
+ self._app.handle_incoming(data, client=self._app.id, memo=memo)
948
+ self._app.data_outgoing.append((data, json.dumps(status, sort_keys=True)))
949
+
950
+ def configure_smpc(self, exponent: int = 8, shards: int = 0, operation: SMPCOperation = SMPCOperation.ADD,
951
+ serialization: SMPCSerialization = SMPCSerialization.JSON):
952
+ """
953
+ Configures successive usage of SMPC aggregation performed in the FeatureCloud controller.
954
+
955
+ Parameters
956
+ ----------
957
+ exponent : int, default=8
958
+ exponent to be used for converting floating point numbers to fixed-point numbers
959
+ shards : int, default=0
960
+ number of secrets to be created, if 0, the total number of participants will be used
961
+ operation : SMPCOperation, default=SMPCOperation.ADD
962
+ operation to perform for aggregation. Options are SMPCOperation.ADD and SMPCOperation.MULTIPLY
963
+ SMPCOperation.MULTIPLY is still experimental and may lead to values
964
+ being 0 or integer overflows for many clients involved.
965
+ serialization : SMPCSerialization, default=SMPCSerialization.JSON
966
+ serialization to be used for the data, currently only the default Option (SMPCSerialization.JSON) is supported
967
+ """
968
+
969
+ self._app.default_smpc['exponent'] = exponent
970
+ self._app.default_smpc['shards'] = shards
971
+ self._app.default_smpc['operation'] = operation.value
972
+ self._app.default_smpc['serialization'] = serialization.value
973
+
974
+ def configure_dp(self, epsilon: float = 1.0, delta: float = 0.0,
975
+ sensitivity: float or None = None,
976
+ clippingVal: float or None = 10.0,
977
+ noisetype: DPNoisetype = DPNoisetype.LAPLACE):
978
+ """
979
+ Configures the usage of differential privacy inside the FeatureCloud
980
+ controller
981
+
982
+ Parameters
983
+ ----------
984
+ epsilon: : float, default = 1.0
985
+ the epsilon value determining how much privacy is wanted
986
+ delta : float, default = 0.0
987
+ the delta value determining how much privacy is wanted. Should be 0
988
+ when using laplace noise (noisetype=DPNoisetype.LAPLACE)
989
+ sensitivity: float, default = None
990
+ describes the amount of privacy introduced by the function used on
991
+ the data that was used to create the model that is send with DP.
992
+ Depends on the function or on the function and the data.
993
+ If using a clippingVal, the sensitivity must not be defined.
994
+ clippingVal : float, default = 10.0
995
+ Determines the scaling down of values sent via the controller.
996
+ if e.g. an array of 5 numeric values ( 5 weights) is sent via the
997
+ controller and clippingVal = 10.0 is choosen, then the p-norm of all
998
+ 5 values will not exceed 10. For laplace the 1-norm is choosen, for
999
+ gauss noise the 2-norm.
1000
+ noisetype: DPNoisetype.LAPLACE or DPNoisetype.GAUSS, default = DPNoisetype.LAPLACE
1001
+ The distribution of noise added when adding differential privacy to
1002
+ the model
1003
+ """
1004
+ if sensitivity and sensitivity == 0:
1005
+ self._app.log('DP was configured to a sensitivity of 0, therefore '+\
1006
+ 'deactivating DP. Use sensitivity = None if sensitivity '+\
1007
+ 'given via clipping should be used', level=LogLevel.FATAL)
1008
+ if clippingVal and clippingVal == 0:
1009
+ self._app.log('DP was configured to a clippingVal of 0, this would '+\
1010
+ 'block all learning. Use clippingVal = None if '+\
1011
+ 'no clipping of models is wanted. In that case, ' +\
1012
+ 'a sensitivity value is needed to use DP', level=LogLevel.FATAL)
1013
+ if not delta:
1014
+ if noisetype == DPNoisetype.LAPLACE:
1015
+ delta = 0
1016
+ else:
1017
+ self._app.log("Delta not given, please give a delta value or DP cannot be applied",
1018
+ level=LogLevel.FATAL)
1019
+ if not epsilon:
1020
+ self._app.log("Epsilon not given, please give an epsilon value or DP cannot be applied",
1021
+ level=LogLevel.FATAL)
1022
+ if not noisetype:
1023
+ self._app.log("noisetype not given, please give an noisetype value or DP cannot be applied",
1024
+ level=LogLevel.FATAL)
1025
+ if epsilon <= 0:
1026
+ self._app.log("invalid epsilon given, epsilon must be a positive number",
1027
+ level=LogLevel.FATAL)
1028
+ if delta <= 0:
1029
+ self._app.log("invalid delta given, delta must be >= 0",
1030
+ level=LogLevel.FATAL)
1031
+ if noisetype == DPNoisetype.LAPLACE and delta != 0:
1032
+ self._app.log("When using laplace noise, delta must be set to 0!",
1033
+ level=LogLevel.FATAL)
1034
+ if noisetype == DPNoisetype.GAUSS:
1035
+ if delta <= 0:
1036
+ self._app.log("When using gauss noise, delta must be > 0",
1037
+ level=LogLevel.FATAL)
1038
+
1039
+ self._app.default_dp['serialization'] = 'json'
1040
+ self._app.default_dp['noisetype'] = noisetype.value
1041
+ self._app.default_dp['epsilon'] = epsilon
1042
+ self._app.default_dp['delta'] = delta
1043
+ self._app.default_dp['sensitivity'] = sensitivity
1044
+ self._app.default_dp['clippingVal'] = clippingVal
1045
+
1046
+
1047
+
1048
+ def update(self, message: Union[str, None] = None, progress: Union[float, None] = None,
1049
+ state: Union[State, None] = None):
1050
+ """
1051
+ Updates information about the execution.
1052
+
1053
+ Parameters
1054
+ ----------
1055
+ message : str
1056
+ message briefly summarizing what is happening currently
1057
+ progress : float
1058
+ number between 0 and 1, indicating the overall progress
1059
+ state : State or None
1060
+ overall state (running, error or action_required)
1061
+ """
1062
+
1063
+ if message and len(message) > 40:
1064
+ self._app.log('message is too long (max: 40)', level=LogLevel.FATAL)
1065
+ if progress is not None and (progress < 0 or progress > 1):
1066
+ self._app.log('progress must be between 0 and 1', level=LogLevel.FATAL)
1067
+ if state is not None and state != State.RUNNING and state != State.ERROR and state != State.ACTION:
1068
+ self._app.log('invalid state', level=LogLevel.FATAL)
1069
+
1070
+ self._app.status_message = message
1071
+ self._app.status_progress = progress
1072
+ self._app.status_state = state.value if state else None
1073
+
1074
+ def store(self, key: str, value):
1075
+ """ Store allows to share data across different AppState instances.
1076
+
1077
+ Parameters
1078
+ ----------
1079
+ key: str
1080
+ value:
1081
+
1082
+ """
1083
+ self._app.internal[key] = value
1084
+
1085
+ def load(self, key: str):
1086
+ """ Load allows to access data shared across different AppState instances.
1087
+
1088
+ Parameters
1089
+ ----------
1090
+ key: str
1091
+
1092
+ Returns
1093
+ -------
1094
+ value:
1095
+ Value stored previously using store
1096
+
1097
+ """
1098
+ return self._app.internal.get(key)
1099
+
1100
+ def log(self, msg, level: LogLevel = LogLevel.DEBUG):
1101
+ """
1102
+ Prints a log message or raises an exception according to the log level.
1103
+
1104
+ Parameters
1105
+ ----------
1106
+ msg : str
1107
+ message to be displayed
1108
+ level : LogLevel, default=LogLevel.DEBUG
1109
+ determines the channel (stdout, stderr) or whether to trigger an exception
1110
+ """
1111
+
1112
+ self._app.log(f'[State: {self.name}] {msg}', level)
1113
+
1114
+
1115
+ def app_state(name: str, role: Role = Role.BOTH, app_instance: Union[App, None] = None, **kwargs):
1116
+ if app_instance is None:
1117
+ app_instance = app
1118
+
1119
+ participant, coordinator = role.value
1120
+ if not participant and not coordinator:
1121
+ app_instance.log('either participant or coordinator must be True', level=LogLevel.FATAL)
1122
+
1123
+ def func(state_class):
1124
+ app_instance._register_state(name, state_class, participant, coordinator, **kwargs)
1125
+ return state_class
1126
+
1127
+ return func
1128
+
1129
+ class _NumpyArrayEncoder(json.JSONEncoder):
1130
+ def default(self, obj):
1131
+ if isinstance(obj, np.integer):
1132
+ return int(obj)
1133
+ elif isinstance(obj, np.floating):
1134
+ return float(obj)
1135
+ elif isinstance(obj, np.ndarray):
1136
+ return obj.tolist()
1137
+ else:
1138
+ # call default Encoder in other cases
1139
+ return json.JSONEncoder.default(self, obj)
1140
+
1141
+ def _serialize_outgoing(data, is_json=False):
1142
+ """
1143
+ Transforms a Python data object into a byte serialization.
1144
+
1145
+ Parameters
1146
+ ----------
1147
+ data : object
1148
+ data to serialize
1149
+ is_json : bool, default=False
1150
+ indicates whether JSON serialization is required
1151
+
1152
+ Returns
1153
+ ----------
1154
+ serialized data as bytes
1155
+ """
1156
+
1157
+ if not is_json:
1158
+ return pickle.dumps(data)
1159
+
1160
+ # we use a custom cls to manage numpy which is quite common
1161
+ return json.dumps(data, cls=_NumpyArrayEncoder)
1162
+
1163
+
1164
+ def _deserialize_incoming(data: bytes, is_json=False):
1165
+ """
1166
+ Transforms serialized data bytes into a Python object.
1167
+
1168
+ Parameters
1169
+ ----------
1170
+ data : bytes
1171
+ data to deserialize
1172
+ is_json : bool, default=False
1173
+ indicates whether JSON deserialization should be used
1174
+
1175
+ Returns
1176
+ ----------
1177
+ deserialized data
1178
+ """
1179
+ if not is_json:
1180
+ return pickle.loads(data)
1181
+
1182
+ return json.loads(data)
1183
+
1184
+
1185
+ def _aggregate(data, operation: SMPCOperation):
1186
+ """
1187
+ Aggregates a list of received values.
1188
+
1189
+ Parameters
1190
+ ----------
1191
+ data : array_like
1192
+ list of data pieces
1193
+ operation : SMPCOperation
1194
+ operation to use for aggregation (add or multiply)
1195
+
1196
+ Returns
1197
+ ----------
1198
+ aggregated value
1199
+ """
1200
+ data_np = [np.array(d) for d in data]
1201
+
1202
+ aggregate = data_np[0]
1203
+
1204
+ if operation == SMPCOperation.ADD:
1205
+ for d in data_np[1:]:
1206
+ aggregate = aggregate + d
1207
+
1208
+ if operation == SMPCOperation.MULTIPLY:
1209
+ for d in data_np[1:]:
1210
+ aggregate = aggregate * d
1211
+
1212
+ return aggregate
1213
+
1214
+ app = App()