naeural-client 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. naeural_client/__init__.py +13 -0
  2. naeural_client/_ver.py +13 -0
  3. naeural_client/base/__init__.py +6 -0
  4. naeural_client/base/distributed_custom_code_presets.py +44 -0
  5. naeural_client/base/generic_session.py +1763 -0
  6. naeural_client/base/instance.py +616 -0
  7. naeural_client/base/payload/__init__.py +1 -0
  8. naeural_client/base/payload/payload.py +66 -0
  9. naeural_client/base/pipeline.py +1499 -0
  10. naeural_client/base/plugin_template.py +5209 -0
  11. naeural_client/base/responses.py +209 -0
  12. naeural_client/base/transaction.py +157 -0
  13. naeural_client/base_decentra_object.py +143 -0
  14. naeural_client/bc/__init__.py +3 -0
  15. naeural_client/bc/base.py +1046 -0
  16. naeural_client/bc/chain.py +0 -0
  17. naeural_client/bc/ec.py +324 -0
  18. naeural_client/certs/__init__.py +0 -0
  19. naeural_client/certs/r9092118.ala.eu-central-1.emqxsl.com.crt +22 -0
  20. naeural_client/code_cheker/__init__.py +1 -0
  21. naeural_client/code_cheker/base.py +520 -0
  22. naeural_client/code_cheker/checker.py +294 -0
  23. naeural_client/comm/__init__.py +2 -0
  24. naeural_client/comm/amqp_wrapper.py +338 -0
  25. naeural_client/comm/mqtt_wrapper.py +539 -0
  26. naeural_client/const/README.md +3 -0
  27. naeural_client/const/__init__.py +9 -0
  28. naeural_client/const/base.py +101 -0
  29. naeural_client/const/comms.py +80 -0
  30. naeural_client/const/environment.py +26 -0
  31. naeural_client/const/formatter.py +7 -0
  32. naeural_client/const/heartbeat.py +111 -0
  33. naeural_client/const/misc.py +20 -0
  34. naeural_client/const/payload.py +190 -0
  35. naeural_client/default/__init__.py +1 -0
  36. naeural_client/default/instance/__init__.py +4 -0
  37. naeural_client/default/instance/chain_dist_custom_job_01_plugin.py +54 -0
  38. naeural_client/default/instance/custom_web_app_01_plugin.py +118 -0
  39. naeural_client/default/instance/net_mon_01_plugin.py +45 -0
  40. naeural_client/default/instance/view_scene_01_plugin.py +28 -0
  41. naeural_client/default/session/mqtt_session.py +72 -0
  42. naeural_client/io_formatter/__init__.py +2 -0
  43. naeural_client/io_formatter/base/__init__.py +1 -0
  44. naeural_client/io_formatter/base/base_formatter.py +80 -0
  45. naeural_client/io_formatter/default/__init__.py +3 -0
  46. naeural_client/io_formatter/default/a_dummy.py +51 -0
  47. naeural_client/io_formatter/default/aixp1.py +113 -0
  48. naeural_client/io_formatter/default/default.py +22 -0
  49. naeural_client/io_formatter/io_formatter_manager.py +96 -0
  50. naeural_client/logging/__init__.py +1 -0
  51. naeural_client/logging/base_logger.py +2056 -0
  52. naeural_client/logging/logger_mixins/__init__.py +12 -0
  53. naeural_client/logging/logger_mixins/class_instance_mixin.py +92 -0
  54. naeural_client/logging/logger_mixins/computer_vision_mixin.py +443 -0
  55. naeural_client/logging/logger_mixins/datetime_mixin.py +344 -0
  56. naeural_client/logging/logger_mixins/download_mixin.py +421 -0
  57. naeural_client/logging/logger_mixins/general_serialization_mixin.py +242 -0
  58. naeural_client/logging/logger_mixins/json_serialization_mixin.py +481 -0
  59. naeural_client/logging/logger_mixins/pickle_serialization_mixin.py +301 -0
  60. naeural_client/logging/logger_mixins/process_mixin.py +63 -0
  61. naeural_client/logging/logger_mixins/resource_size_mixin.py +81 -0
  62. naeural_client/logging/logger_mixins/timers_mixin.py +501 -0
  63. naeural_client/logging/logger_mixins/upload_mixin.py +260 -0
  64. naeural_client/logging/logger_mixins/utils_mixin.py +675 -0
  65. naeural_client/logging/small_logger.py +93 -0
  66. naeural_client/logging/tzlocal/__init__.py +20 -0
  67. naeural_client/logging/tzlocal/unix.py +231 -0
  68. naeural_client/logging/tzlocal/utils.py +113 -0
  69. naeural_client/logging/tzlocal/win32.py +151 -0
  70. naeural_client/logging/tzlocal/windows_tz.py +718 -0
  71. naeural_client/plugins_manager_mixin.py +273 -0
  72. naeural_client/utils/__init__.py +2 -0
  73. naeural_client/utils/comm_utils.py +44 -0
  74. naeural_client/utils/dotenv.py +75 -0
  75. naeural_client-2.0.0.dist-info/METADATA +365 -0
  76. naeural_client-2.0.0.dist-info/RECORD +78 -0
  77. naeural_client-2.0.0.dist-info/WHEEL +4 -0
  78. naeural_client-2.0.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,675 @@
1
+ import os
2
+ import re
3
+ import itertools
4
+ import sys
5
+ import pickle
6
+ import hashlib
7
+ import numpy as np
8
+ import traceback
9
+ from queue import Queue
10
+
11
+ from collections import OrderedDict, deque, defaultdict
12
+
13
+ from io import BytesIO, TextIOWrapper
14
+
15
+
16
+ class _UtilsMixin(object):
17
+ """
18
+ Mixin for functionalities that do not belong to any mixin that are attached to `pye2.Logger`.
19
+
20
+ This mixin cannot be instantiated because it is built just to provide some additional
21
+ functionalities for `pye2.Logger`
22
+
23
+ In this mixin we can use any attribute/method of the Logger.
24
+ """
25
+
26
+ def __init__(self):
27
+ super(_UtilsMixin, self).__init__()
28
+
29
+ @staticmethod
30
+ def get_function_parameters(function):
31
+ import inspect
32
+ signature = inspect.signature(function)
33
+ parameters = signature.parameters
34
+
35
+ all_params = []
36
+ required_params = []
37
+ optional_params = []
38
+
39
+ for k, v in parameters.items():
40
+ if k == 'self':
41
+ continue
42
+
43
+ all_params.append(k)
44
+
45
+ if v.default is inspect._empty:
46
+ required_params.append(k)
47
+ else:
48
+ optional_params.append(k)
49
+
50
+ return all_params, required_params, optional_params
51
+
52
+ @staticmethod
53
+ def string_diff(seq1, seq2):
54
+ return sum(1 for a, b in zip(seq1, seq2) if a != b) + abs(len(seq1) - len(seq2))
55
+
56
+ @staticmethod
57
+ def flatten_2d_list(lst):
58
+ return _UtilsMixin.flatten_nd_list(lst, 2)
59
+ # return list(itertools.chain.from_iterable(lst))
60
+
61
+ @staticmethod
62
+ def flatten_3d_list(lst):
63
+ return _UtilsMixin.flatten_nd_list(lst, 3)
64
+
65
+ @staticmethod
66
+ def flatten_nd_list(lst, n):
67
+ for _ in range(n - 1):
68
+ if len(lst) == 0 or not isinstance(lst[0], list):
69
+ break
70
+ lst = list(itertools.chain.from_iterable(lst))
71
+ return lst
72
+
73
+
74
+ def _get_obj_size_graph(self, lst_obj_size_result, min_size_threshold=104_857):
75
+ """
76
+ Returns a graph as a dict, where de dependency is given by two fields: 'CHILDREN' and 'PARENT'
77
+
78
+ Parameters
79
+ ----------
80
+ lst_obj_size_result : list
81
+ list resulted from a `get_obj_size` call.
82
+
83
+ Returns
84
+ -------
85
+ graph : dict
86
+ graph as a dict, where de dependency is given by the fields: 'CHILDREN' and 'PARENT'
87
+
88
+ """
89
+ graph = {
90
+ 'PARENT': None,
91
+ 'CHILDREN': []
92
+ }
93
+
94
+ curr_node_at_level = {
95
+ 0: graph
96
+ }
97
+
98
+ for obj in lst_obj_size_result:
99
+ level = obj.pop('LEVEL')
100
+
101
+ node = {
102
+ 'PARENT': curr_node_at_level[level-1],
103
+ 'CHILDREN': [],
104
+ **obj
105
+ }
106
+ curr_node_at_level[level-1]['CHILDREN'].append(node)
107
+ curr_node_at_level[level] = node
108
+
109
+ # now prune the graph, removing all children that are below a certain size
110
+ processing_nodes = Queue()
111
+ for kid in graph['CHILDREN']:
112
+ processing_nodes.put(kid)
113
+
114
+ while not processing_nodes.empty():
115
+ node = processing_nodes.get()
116
+ big_kids = [kid for kid in node['CHILDREN'] if kid['SIZE'] > min_size_threshold]
117
+ node['CHILDREN'] = big_kids
118
+ for kid in node['CHILDREN']:
119
+ processing_nodes.put(kid)
120
+
121
+ return graph
122
+
123
+ def _get_graph_leaves(self, graph):
124
+ """
125
+ Returns a list with all the leaves from a given graph.
126
+ The graph must be a dict with a key `CHILDREN`
127
+
128
+ Parameters
129
+ ----------
130
+ graph : dict
131
+ The graph of the object dependency
132
+
133
+ Returns
134
+ -------
135
+ list
136
+ All the leaves from a given graph
137
+ """
138
+ leaves = []
139
+ processing_nodes = Queue()
140
+ for kid in graph['CHILDREN']:
141
+ processing_nodes.put(kid)
142
+
143
+ while not processing_nodes.empty():
144
+ node = processing_nodes.get()
145
+ if len(node['CHILDREN']) == 0:
146
+ leaves.append(node)
147
+ else:
148
+ for kid in node['CHILDREN']:
149
+ processing_nodes.put(kid)
150
+
151
+ return leaves
152
+
153
+ def _get_top_k_biggest_leaves(self, lst_objects, k):
154
+ """
155
+ Sort a list of objects and return the biggest ones
156
+
157
+ Parameters
158
+ ----------
159
+ lst_objects : list
160
+ a list of objects that have a key `SIZE`
161
+ k : int
162
+ the max number of elements returned
163
+
164
+ Returns
165
+ -------
166
+ list
167
+ The top k biggest objects, sorted
168
+ """
169
+ sorted_objects = sorted(lst_objects, key=lambda d: d['SIZE'], reverse=True)
170
+ return sorted_objects[:k]
171
+
172
+ def _get_path_to_leaf(self, obj):
173
+ """
174
+ Returns the path from the graphs root to this node
175
+
176
+ Parameters
177
+ ----------
178
+ obj : dict
179
+ A node in the graph
180
+
181
+ Returns
182
+ -------
183
+ list
184
+ The path from the root to the current node
185
+ """
186
+ path = []
187
+ while obj is not None and 'NAME' in obj:
188
+ path.append({
189
+ 'NAME': obj['NAME'],
190
+ 'SIZE': obj['SIZE']
191
+ })
192
+ obj = obj['PARENT']
193
+
194
+ path.reverse()
195
+ return path
196
+
197
+ def get_obj_size_issues(self, lst_obj_size_result, topk=3, MB=True):
198
+ """
199
+ Returns the top k paths from the generated memory tree with worst memory loads
200
+
201
+ Parameters
202
+ ----------
203
+ lst_obj_size_result : list
204
+ list resulted from a `get_obj_size` call.
205
+
206
+ topk : int, optional
207
+ Get biggest `topk` memory consumers. The default is 3.
208
+
209
+ MB : bool, optional
210
+ Return values in MB or bytes. Default MB
211
+
212
+
213
+ Returns
214
+ -------
215
+ None.
216
+
217
+
218
+ Example
219
+ -------
220
+
221
+ > lst_tree = get_obj_size(orchestrator)
222
+ > get_obj_size_issues(lst_tree, topk=2, MB=True)
223
+ [
224
+ [{"orchestrator" : 5.11}, {"monitor": 4.20}, {"box-32" : 4.02}, {"hearbeatsbuff" : 3.91}, {"_deque" : 3.89}],
225
+ [{"orchestrator" : 5.11}, {"bizman": 0.71}, {"plugins" : 0.69}, {"plugin-instance-1" : 0.69}, {"buffer" : 0.69}],
226
+ ]
227
+
228
+ """
229
+
230
+ graph = self._get_obj_size_graph(lst_obj_size_result)
231
+ leaves = self._get_graph_leaves(graph)
232
+ topk_leaves = self._get_top_k_biggest_leaves(leaves, topk)
233
+ result = [self._get_path_to_leaf(l) for l in topk_leaves]
234
+ if MB:
235
+ for path in result:
236
+ for obj in path:
237
+ obj['SIZE'] = float(f"{(obj['SIZE'] / 1_048_576):.02f}")
238
+
239
+ return result
240
+
241
+ def _obsolete_get_obj_size(self, obj, return_tree=False, excluded_obj_props=[], exclude_obj_props_like=[], as_list=True):
242
+ """
243
+ Recursively (dfs) finds size of objects
244
+
245
+ obj: anything
246
+ this is the root object
247
+
248
+
249
+ return_tree:
250
+ returns the whole object tree
251
+
252
+ =======
253
+ returns:
254
+ the tree size
255
+
256
+ usage:
257
+
258
+ ...
259
+ objA = Class1()
260
+ objB = Class2(owner=objA) # some var contain objA in objB
261
+
262
+ # simple usage
263
+ sizeObjB = log.get_obj_size(objB) # this includes size of objA
264
+
265
+ ...
266
+
267
+ Observation:
268
+ quick implementation of multi-level dict results in mem exceptions
269
+ TODO: try a better algo
270
+
271
+ """
272
+ self._obj_tree = OrderedDict()
273
+ self._obj_level = 0
274
+ with self.managed_lock_resource('get_obj_size'):
275
+ try:
276
+ result = self._helper_get_obj_size(
277
+ obj=obj,
278
+ name='Base: {}'.format(obj.__class__.__name__),
279
+ excluded_obj_props=excluded_obj_props,
280
+ exclude_obj_props_like=exclude_obj_props_like,
281
+ )
282
+ if return_tree:
283
+ if as_list:
284
+ tree = [v for k,v in self._obj_tree.items()]
285
+ else:
286
+ tree = self._obj_tree
287
+ result = result, tree
288
+ self._obj_tree = None
289
+ except:
290
+ result = 0
291
+ return result
292
+
293
+
294
+ def _helper_get_obj_size(self, obj, name='unk', excluded_obj_props=[], exclude_obj_props_like=[]):
295
+ EXCLUDED_TYPES = (
296
+ str, bytes, bytearray, np.ndarray,
297
+ )
298
+ EXCLUDED_STRINGS = [
299
+ 'lock', 'mappingproxy', 'Thread',
300
+ ]
301
+ size = 0
302
+ obj_id = id(obj)
303
+ if obj_id in self._obj_tree or obj_id == id(self._obj_tree):
304
+ return 0
305
+
306
+ size = sys.getsizeof(obj)
307
+
308
+ self._obj_level += 1
309
+ self._obj_tree[obj_id] = {
310
+ 'NAME' : name,
311
+ 'SIZE' : size,
312
+ 'CLASS' : obj.__class__.__name__,
313
+ 'LEVEL' : self._obj_level,
314
+ }
315
+ # Important mark as seen *before* entering recursion to gracefully handle
316
+ # self-referential objects
317
+ if isinstance(obj, EXCLUDED_TYPES) or obj.__class__.__name__ in EXCLUDED_STRINGS:
318
+ # we do not want to go deeper in these objects... yet
319
+ pass
320
+ elif obj.__class__.__name__ == 'Tensor':
321
+ if str(obj.device) == 'cpu':
322
+ size += obj.nelement() * obj.element_size()
323
+ elif isinstance(obj, dict):
324
+ keys = list(obj.keys())
325
+ # add just the values
326
+ size += sum([self._helper_get_obj_size(obj=obj[k], name=str(k)) for k in keys])
327
+ elif isinstance(obj, (list, deque)) and len(obj) > 0 and isinstance(obj[0], (int, float)) and isinstance(obj[-1], (int, float)):
328
+ size += (len(obj) * ((sys.getsizeof(obj[0]) + sys.getsizeof(obj[-1])) / 2))
329
+ elif hasattr(obj, '__dict__'): # this is a object
330
+ # we take all the properties of the object
331
+ keys = list(obj.__dict__.keys())
332
+ # add just the values
333
+ for obj_prop in keys:
334
+ excluded = obj_prop in excluded_obj_props
335
+ excluded = excluded or any([x in obj_prop for x in exclude_obj_props_like])
336
+ if not excluded:
337
+ size += self._helper_get_obj_size(
338
+ obj=obj.__dict__[obj_prop],
339
+ name=str(obj_prop),
340
+ excluded_obj_props=excluded_obj_props,
341
+ )
342
+ elif hasattr(obj, '__iter__'):
343
+ try:
344
+ temp_size = len(obj)
345
+ temp_list = list(obj)
346
+ size += sum([self._helper_get_obj_size(temp_list[i]) for i in range(temp_size)])
347
+ except Exception as e:
348
+ with open(os.path.join(self.get_data_folder(), 'obj_tree_error.txt'), 'wt') as fh:
349
+ for k,v in self._obj_tree.items():
350
+ fh.write('\n{}'.format(v))
351
+ print('Except:\n{}'.format(traceback.format_exc()))
352
+ if obj_id in self._obj_tree:
353
+ self._obj_tree[obj_id]['SIZE'] = size
354
+ self._obj_level -= 1
355
+ return size
356
+
357
+
358
+ def get_obj_size(
359
+ self,
360
+ obj,
361
+ name='<root>',
362
+ level=0,
363
+ top_consumers=40,
364
+ return_tree=False,
365
+ excluded_obj_props=[],
366
+ exclude_obj_props_like=[],
367
+ ):
368
+ """
369
+ Calculate the memory footprint of an object, and its sub-objects, and generate info about the object.
370
+
371
+ Parameters
372
+ ----------
373
+ obj : any
374
+ The object to calculate the memory footprint of.
375
+
376
+ name : str, optional
377
+ The name of the object, by default '<root>'.
378
+
379
+ level : int, optional
380
+ The nesting level of the object, by default 0.
381
+
382
+ top_consumers : int, optional
383
+ Nr of top memory consumers (above level 2)
384
+
385
+ Returns
386
+ -------
387
+ int
388
+ Memory size in bytes
389
+
390
+ list of dict
391
+ A list of dictionaries with information about each object visited during the search.
392
+ Each dictionary includes the object's name, class name, nesting level, own size, total size, and maximum child level.
393
+
394
+ list of dict
395
+ A list of the top 5 biggest objects that don't have more than 2 nested levels below them.
396
+
397
+ Notes
398
+ -----
399
+ This function ignores objects of certain types for performance reasons (e.g., str, bytes, bytearray, np.ndarray),
400
+ and also objects of certain classes (e.g., 'lock', 'mappingproxy', 'Thread').
401
+ """
402
+ with self.managed_lock_resource('get_obj_size'):
403
+
404
+ EXCLUDED_TYPES = (
405
+ str, bytes, bytearray, np.ndarray,
406
+ )
407
+ EXCLUDED_STRINGS = [
408
+ 'lock', 'mappingproxy', 'Thread',
409
+ ]
410
+
411
+ root_id = id(obj)
412
+
413
+ # Stack for DFS traversal of object's attributes and
414
+ # post-order processing (children before parents)
415
+ stack = [(obj, name, level, root_id, False)]
416
+
417
+ # Info about each object, indexed by object id
418
+ objects_info = OrderedDict()
419
+
420
+ # Children objects for each object, indexed by object id
421
+ children = defaultdict(list)
422
+
423
+ total_mem = 0
424
+
425
+ while stack:
426
+ obj, name, level, obj_id, processed = stack.pop()
427
+
428
+ if processed:
429
+ # All children have been processed, compute total size
430
+ total_size = objects_info[obj_id]['OBJ_SIZE'] + sum(objects_info[child]['SIZE'] for child in children[obj_id])
431
+ objects_info[obj_id]['SIZE'] = total_size
432
+ else:
433
+ class_name = obj.__class__.__name__
434
+
435
+ obj_size = 0
436
+ if class_name == 'Tensor':
437
+ if hasattr(obj, 'device') and str(obj.device) == 'cpu':
438
+ obj_size = obj.nelement() * obj.element_size()
439
+ else:
440
+ obj_size = sys.getsizeof(obj)
441
+
442
+ total_mem += obj_size
443
+
444
+ # Store the object's info
445
+ objects_info[obj_id] = {
446
+ 'NAME': name,
447
+ 'CLASS_NAME': obj.__class__.__name__,
448
+ 'LEVEL': level,
449
+ 'OBJ_SIZE': obj_size,
450
+ 'SIZE': 0 # Will be updated later
451
+ }
452
+
453
+ # Add this node back to the stack for post-order processing
454
+ stack.append((obj, name, level, obj_id, True))
455
+
456
+ if class_name not in EXCLUDED_STRINGS and not isinstance(obj, EXCLUDED_TYPES):
457
+ new_objects = []
458
+
459
+ if hasattr(obj, '__dict__'):
460
+ prop_list = list(obj.__dict__.keys())
461
+ for attr_name in prop_list:
462
+ excluded = attr_name in excluded_obj_props
463
+ excluded = excluded or any([x in attr_name for x in exclude_obj_props_like])
464
+ if not excluded:
465
+ attr_value = obj.__dict__[attr_name]
466
+ new_objects.append((attr_value, f'{name}.{attr_name}'))
467
+ #endfor
468
+ elif isinstance(obj, (list, tuple, set, deque)):
469
+ list_obj = list(obj)
470
+ obj_len = len(list_obj)
471
+ for i in range(obj_len):
472
+ item = list_obj[i]
473
+ new_objects.append((item, f'{name}[{i}]'))
474
+ #endfor
475
+ elif isinstance(obj, dict):
476
+ key_list = list(obj.keys())
477
+ for k in key_list:
478
+ v = obj[k]
479
+ new_objects.append((v, f'{name}["{k}"]'))
480
+ #endfor
481
+ for new_obj, new_name in new_objects:
482
+ new_id = id(new_obj)
483
+ if new_id not in objects_info:
484
+ stack.append((new_obj, new_name, level + 1, new_id, False))
485
+ children[obj_id].append(new_id)
486
+ #endfor
487
+
488
+ #endif type checking
489
+ #endif processed checking
490
+ #endwhile DFS
491
+
492
+
493
+ # Compute the total memory of the root object
494
+ root_object_total_memory = objects_info[root_id]['SIZE']
495
+
496
+
497
+ if total_size != root_object_total_memory:
498
+ self.P("WARNING: MEMORY CALCULATION INCONSISTENCY!", color='r')
499
+
500
+ if return_tree:
501
+ # List of objects info
502
+ objects_info_list = [
503
+ x for x in objects_info.values() if x['CLASS_NAME'] not in ['str', 'int', 'float']]
504
+
505
+ # Filter objects that have at most two levels of objects beneath them and get the top 5 biggest ones
506
+ top_5_objects = sorted(
507
+ [info for obj_id, info in objects_info.items() if info['LEVEL'] >= 2],
508
+ key=lambda x: x['SIZE'],
509
+ reverse=True
510
+ )[:top_consumers]
511
+ # endwith lock
512
+
513
+ if return_tree:
514
+ result = root_object_total_memory, objects_info_list, top_5_objects
515
+ else:
516
+ result = total_mem
517
+ return result
518
+
519
+
520
+ @staticmethod
521
+ def find_documentation(class_name, *args):
522
+ # setup the environment
523
+ old_stdout = sys.stdout
524
+ sys.stdout = TextIOWrapper(BytesIO(), sys.stdout.encoding)
525
+ # write to stdout or stdout.buffer
526
+ help(class_name)
527
+ # get output
528
+ sys.stdout.seek(0) # jump to the start
529
+ out = sys.stdout.read() # read output
530
+ # restore stdout
531
+ sys.stdout.close()
532
+ sys.stdout = old_stdout
533
+
534
+ out_splitted = out.split('\n')
535
+ filtered_doc = list(filter(lambda x: all([_str in x for _str in args]),
536
+ out_splitted))
537
+
538
+ return filtered_doc
539
+
540
+ @staticmethod
541
+ def common_start(*args):
542
+ """ returns the longest common substring from the beginning of passed `args` """
543
+
544
+ def _iter():
545
+ for t in zip(*args):
546
+ s = set(t)
547
+ if len(s) == 1:
548
+ yield list(s)[0]
549
+ else:
550
+ return
551
+
552
+ return ''.join(_iter())
553
+
554
+ @staticmethod
555
+ def distance_euclidean(np_x, np_y):
556
+ return np.sqrt(np.sum((np_x - np_y) ** 2, axis=1))
557
+
558
+ @staticmethod
559
+ def code_version(lst_dirs=['.'], lst_exclude=[]):
560
+ import re
561
+ import pandas as pd
562
+ from pandas.util import hash_pandas_object
563
+ from pathlib import Path
564
+
565
+ assert len(lst_dirs) > 0
566
+ assert all([os.path.isdir(x) for x in lst_dirs])
567
+ assert all([os.path.isdir(x) for x in lst_exclude])
568
+ dct_temp = {}
569
+ dct = {'FILE': [], 'VER': []}
570
+ for d in lst_dirs:
571
+ for orig_file in Path(d).rglob('**/*.py'):
572
+ try:
573
+ orig_file = str(orig_file)
574
+ if any([x in orig_file for x in lst_exclude]):
575
+ continue
576
+ # endif
577
+ file = orig_file
578
+ file_ver = '{}_ver'.format(re.sub(r'[^\w\s]', '', file))
579
+ for x in ['/', '\\']:
580
+ file = file.replace(x, '.')
581
+ file = file.replace('.py', '')
582
+ for key in ['__version__', '__VER__']:
583
+ try:
584
+ cmd = 'from {} import {} as {}'.format(file, key, file_ver)
585
+ exec(cmd, dct_temp)
586
+ except:
587
+ pass
588
+ # endfor
589
+
590
+ if file_ver in dct_temp:
591
+ dct['FILE'].append(orig_file)
592
+ dct['VER'].append(dct_temp[file_ver])
593
+ # endif
594
+ except Exception as e:
595
+ pass
596
+ # endfor
597
+ # endfor
598
+ df = pd.DataFrame(dct).sort_values('FILE').reset_index(drop=True)
599
+ return df, hex(hash_pandas_object(df).sum())
600
+
601
+ @staticmethod
602
+ def natural_sort(l):
603
+ import re
604
+ convert = lambda text: int(text) if text.isdigit() else text.lower()
605
+ alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
606
+ return sorted(l, key=alphanum_key)
607
+
608
+ @staticmethod
609
+ def hash_object(obj, size=None):
610
+ """
611
+ Uses md5 to get the hash of any pickleable object
612
+
613
+ Parameters:
614
+ -----------
615
+ obj : any pickleable object, mandatory
616
+
617
+ Returns:
618
+ ---------
619
+ md5 hash : str
620
+
621
+ """
622
+ p = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
623
+ result = hashlib.md5(p).hexdigest()
624
+ return result if size is None else result[:size]
625
+
626
+
627
+ @staticmethod
628
+ def get_uid(size=8):
629
+ """
630
+ Uses uuid4 to generate a unique ID and extract part of the id
631
+ """
632
+ from uuid import uuid4
633
+ str_id = str(uuid4())
634
+ str_id = str_id.replace('-','')
635
+ return str_id[:size]
636
+
637
+
638
+
639
+ @staticmethod
640
+ def shorten_name(s):
641
+ return _UtilsMixin.name_abbreviation(s)
642
+
643
+
644
+ @staticmethod
645
+ def name_abbreviation(s):
646
+ name_split = []
647
+ if '_' not in s:
648
+ # try to split by uppercase - for example VideoStream should become ["VIDEO", "STREAM"]
649
+ name_split = re.findall('[A-Z][a-z]*', s)
650
+ name_split = [x.upper() for x in name_split]
651
+ #endif
652
+
653
+ name_split = name_split or s.upper().split('_')
654
+ prefix = name_split[0][:2]
655
+ if len(name_split) < 2:
656
+ pass
657
+ elif len(name_split) < 3:
658
+ prefix += name_split[1][:2]
659
+ else:
660
+ lst = []
661
+ for i in range(1, len(name_split)):
662
+ if name_split[i].isdigit():
663
+ lst.append(name_split[i][:2])
664
+ else:
665
+ lst.append(name_split[i][:1])
666
+ #endfor
667
+ prefix += ''.join(lst)
668
+ #endif
669
+ return prefix
670
+
671
+ @staticmethod
672
+ def get_short_name(s):
673
+ return _UtilsMixin.name_abbreviation(s)
674
+
675
+