appier 1.31.4__py2.py3-none-any.whl → 1.32.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. appier/__init__.py +333 -52
  2. appier/amqp.py +29 -30
  3. appier/api.py +214 -212
  4. appier/asgi.py +54 -55
  5. appier/async_neo.py +46 -35
  6. appier/async_old.py +55 -42
  7. appier/asynchronous.py +7 -13
  8. appier/base.py +1762 -1429
  9. appier/bus.py +51 -52
  10. appier/cache.py +99 -84
  11. appier/common.py +9 -11
  12. appier/component.py +17 -19
  13. appier/compress.py +25 -28
  14. appier/config.py +96 -73
  15. appier/controller.py +9 -15
  16. appier/crypt.py +25 -21
  17. appier/data.py +73 -57
  18. appier/defines.py +191 -226
  19. appier/exceptions.py +103 -63
  20. appier/execution.py +94 -88
  21. appier/export.py +90 -88
  22. appier/extra.py +6 -13
  23. appier/extra_neo.py +8 -11
  24. appier/extra_old.py +18 -16
  25. appier/geo.py +57 -47
  26. appier/git.py +101 -90
  27. appier/graph.py +23 -24
  28. appier/http.py +520 -398
  29. appier/legacy.py +373 -180
  30. appier/log.py +90 -97
  31. appier/meta.py +42 -42
  32. appier/mock.py +32 -34
  33. appier/model.py +793 -681
  34. appier/model_a.py +208 -183
  35. appier/mongo.py +183 -107
  36. appier/observer.py +39 -31
  37. appier/part.py +23 -24
  38. appier/preferences.py +44 -47
  39. appier/queuing.py +78 -96
  40. appier/redisdb.py +40 -35
  41. appier/request.py +227 -175
  42. appier/scheduler.py +13 -18
  43. appier/serialize.py +37 -31
  44. appier/session.py +161 -147
  45. appier/settings.py +2 -11
  46. appier/smtp.py +53 -49
  47. appier/storage.py +39 -33
  48. appier/structures.py +50 -45
  49. appier/test/__init__.py +2 -11
  50. appier/test/base.py +111 -108
  51. appier/test/cache.py +28 -35
  52. appier/test/config.py +10 -19
  53. appier/test/crypt.py +3 -12
  54. appier/test/data.py +3 -12
  55. appier/test/exceptions.py +8 -17
  56. appier/test/export.py +16 -33
  57. appier/test/graph.py +27 -60
  58. appier/test/http.py +42 -54
  59. appier/test/legacy.py +20 -30
  60. appier/test/log.py +14 -35
  61. appier/test/mock.py +27 -123
  62. appier/test/model.py +79 -91
  63. appier/test/part.py +5 -14
  64. appier/test/preferences.py +5 -13
  65. appier/test/queuing.py +29 -37
  66. appier/test/request.py +61 -73
  67. appier/test/serialize.py +12 -23
  68. appier/test/session.py +10 -19
  69. appier/test/smtp.py +8 -14
  70. appier/test/structures.py +20 -24
  71. appier/test/typesf.py +14 -28
  72. appier/test/util.py +480 -438
  73. appier/typesf.py +251 -171
  74. appier/util.py +578 -407
  75. appier/validation.py +280 -143
  76. {appier-1.31.4.dist-info → appier-1.32.0.dist-info}/METADATA +6 -1
  77. appier-1.32.0.dist-info/RECORD +86 -0
  78. appier-1.31.4.dist-info/RECORD +0 -86
  79. {appier-1.31.4.dist-info → appier-1.32.0.dist-info}/LICENSE +0 -0
  80. {appier-1.31.4.dist-info → appier-1.32.0.dist-info}/WHEEL +0 -0
  81. {appier-1.31.4.dist-info → appier-1.32.0.dist-info}/top_level.txt +0 -0
appier/util.py CHANGED
@@ -2,7 +2,7 @@
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
4
  # Hive Appier Framework
5
- # Copyright (c) 2008-2022 Hive Solutions Lda.
5
+ # Copyright (c) 2008-2024 Hive Solutions Lda.
6
6
  #
7
7
  # This file is part of Hive Appier Framework.
8
8
  #
@@ -22,16 +22,7 @@
22
22
  __author__ = "João Magalhães <joamag@hive.pt>"
23
23
  """ The author(s) of the module """
24
24
 
25
- __version__ = "1.0.0"
26
- """ The version of the module """
27
-
28
- __revision__ = "$LastChangedRevision$"
29
- """ The revision number of the module """
30
-
31
- __date__ = "$LastChangedDate$"
32
- """ The last change date of the module """
33
-
34
- __copyright__ = "Copyright (c) 2008-2022 Hive Solutions Lda."
25
+ __copyright__ = "Copyright (c) 2008-2024 Hive Solutions Lda."
35
26
  """ The copyright for the module """
36
27
 
37
28
  __license__ = "Apache License, Version 2.0"
@@ -78,10 +69,10 @@ upper case letter regex that will provide a way of
78
69
  putting the underscore in the middle of the transition """
79
70
 
80
71
  SORT_MAP = {
81
- "1" : 1,
82
- "-1" : -1,
83
- "ascending" : 1,
84
- "descending" : -1,
72
+ "1": 1,
73
+ "-1": -1,
74
+ "ascending": 1,
75
+ "descending": -1,
85
76
  }
86
77
  """ The map associating the normalized (text) way of
87
78
  representing sorting with the current infra-structure
@@ -93,70 +84,78 @@ considered to be sequence based for python """
93
84
 
94
85
  defines = defines
95
86
 
87
+
96
88
  def to_limit(limit_s):
97
89
  limit = int(limit_s)
98
- if limit < 0: return 0
90
+ if limit < 0:
91
+ return 0
99
92
  return limit
100
93
 
94
+
101
95
  def to_find(find_s):
102
- if not find_s: return []
96
+ if not find_s:
97
+ return []
103
98
  find_t = type(find_s)
104
- if find_t == list: return find_s
99
+ if find_t == list:
100
+ return find_s
105
101
  return [find_s]
106
102
 
103
+
107
104
  def to_sort(sort_s):
108
105
  sort_l = []
109
106
  sorts = sort_s.split(",")
110
107
  for sort_i in sorts:
111
108
  values = sort_i.split(":", 1)
112
- if len(values) == 1: values.append("descending")
109
+ if len(values) == 1:
110
+ values.append("descending")
113
111
  name, direction = values
114
- if name == "default": return None
112
+ if name == "default":
113
+ return None
115
114
  values[1] = SORT_MAP.get(direction, 1)
116
115
  sort_l.append(tuple(values))
117
116
  return sort_l
118
117
 
118
+
119
119
  ALIAS = {
120
- "context" : "find_d",
121
- "filters" : "find_d",
122
- "filters[]" : "find_d",
123
- "filter_def" : "find_d",
124
- "filter_string" : "find_s",
125
- "filter_name" : "find_n",
126
- "filter_operator" : "find_o",
127
- "insensitive" : "find_i",
128
- "order" : "sort",
129
- "offset" : "skip",
130
- "start_record" : "skip",
131
- "number_records" : "limit"
120
+ "context": "find_d",
121
+ "filters": "find_d",
122
+ "filters[]": "find_d",
123
+ "filter_def": "find_d",
124
+ "filter_string": "find_s",
125
+ "filter_name": "find_n",
126
+ "filter_operator": "find_o",
127
+ "insensitive": "find_i",
128
+ "order": "sort",
129
+ "offset": "skip",
130
+ "start_record": "skip",
131
+ "number_records": "limit",
132
132
  }
133
133
  """ The map containing the various attribute alias
134
134
  between the normalized manned and the appier manner """
135
135
 
136
136
  FIND_TYPES = dict(
137
- skip = int,
138
- limit = to_limit,
139
- find_s = legacy.UNICODE,
140
- find_d = to_find,
141
- find_i = bool,
142
- find_t = legacy.UNICODE,
143
- find_n = legacy.UNICODE,
144
- find_o = legacy.UNICODE,
145
- sort = to_sort,
146
- meta = bool,
147
- fields = list
137
+ skip=int,
138
+ limit=to_limit,
139
+ find_s=legacy.UNICODE,
140
+ find_d=to_find,
141
+ find_i=bool,
142
+ find_t=legacy.UNICODE,
143
+ find_n=legacy.UNICODE,
144
+ find_o=legacy.UNICODE,
145
+ sort=to_sort,
146
+ meta=bool,
147
+ fields=list,
148
148
  )
149
149
  """ The map associating the various find fields with
150
150
  their respective types, note that in case a special
151
151
  conversion operation is required the associated value
152
152
  may represent a conversion function instead """
153
153
 
154
- FIND_DEFAULTS = dict(
155
- limit = 10
156
- )
154
+ FIND_DEFAULTS = dict(limit=10)
157
155
  """ The map that defines the various default values
158
156
  for a series of find related attributes """
159
157
 
158
+
160
159
  def is_iterable(object):
161
160
  """
162
161
  Verifies if the provided object (value) is iterable
@@ -173,6 +172,7 @@ def is_iterable(object):
173
172
 
174
173
  return isinstance(object, defines.ITERABLES)
175
174
 
175
+
176
176
  def is_mobile(user_agent):
177
177
  """
178
178
  Verifies if the provided user agent string represents a
@@ -188,13 +188,15 @@ def is_mobile(user_agent):
188
188
  mobile browser or a regular (desktop) one.
189
189
  """
190
190
 
191
- if not user_agent: return False
191
+ if not user_agent:
192
+ return False
192
193
  prefix = user_agent[:4]
193
194
  mobile = defines.MOBILE_REGEX.search(user_agent)
194
195
  mobile_prefix = defines.MOBILE_PREFIX_REGEX.search(prefix)
195
196
  is_mobile = True if mobile or mobile_prefix else False
196
197
  return is_mobile
197
198
 
199
+
198
200
  def is_tablet(user_agent):
199
201
  """
200
202
  Verifies if the provided user agent string represents a
@@ -210,13 +212,15 @@ def is_tablet(user_agent):
210
212
  tablet browser or a regular (desktop) one.
211
213
  """
212
214
 
213
- if not user_agent: return False
215
+ if not user_agent:
216
+ return False
214
217
  prefix = user_agent[:4]
215
218
  tablet = defines.TABLET_REGEX.search(user_agent)
216
219
  mobile_prefix = defines.MOBILE_PREFIX_REGEX.search(prefix)
217
220
  is_tablet = True if tablet or mobile_prefix else False
218
221
  return is_tablet
219
222
 
223
+
220
224
  def is_browser(user_agent):
221
225
  """
222
226
  Verifies if the provided user agent string represents a
@@ -232,11 +236,14 @@ def is_browser(user_agent):
232
236
  """
233
237
 
234
238
  info = browser_info(user_agent)
235
- if not info: return False
239
+ if not info:
240
+ return False
236
241
  interactive = info.get("interactive", False)
237
- if not interactive: return False
242
+ if not interactive:
243
+ return False
238
244
  return True
239
245
 
246
+
240
247
  def is_bot(user_agent):
241
248
  """
242
249
  Verifies if the provided user agent string represents a
@@ -251,12 +258,15 @@ def is_bot(user_agent):
251
258
  automated bot or not.
252
259
  """
253
260
 
254
- info = browser_info(user_agent = user_agent)
255
- if not info: return False
261
+ info = browser_info(user_agent=user_agent)
262
+ if not info:
263
+ return False
256
264
  bot = info.get("bot", False)
257
- if not bot: return False
265
+ if not bot:
266
+ return False
258
267
  return True
259
268
 
269
+
260
270
  def browser_info(user_agent):
261
271
  """
262
272
  Retrieves a dictionary containing information about the browser
@@ -272,7 +282,8 @@ def browser_info(user_agent):
272
282
  the provided user agent.
273
283
  """
274
284
 
275
- if not user_agent: return None
285
+ if not user_agent:
286
+ return None
276
287
 
277
288
  info = dict()
278
289
 
@@ -283,23 +294,29 @@ def browser_info(user_agent):
283
294
  interactive = browser_i.get("interactive", True)
284
295
  bot = browser_i.get("bot", False)
285
296
 
286
- if not sub_string in user_agent: continue
287
- if not version_search in user_agent: continue
297
+ if not sub_string in user_agent:
298
+ continue
299
+ if not version_search in user_agent:
300
+ continue
288
301
 
289
302
  version_i = user_agent.index(version_search) + len(version_search)
290
303
  version = user_agent[version_i:].split(" ", 1)[0].strip(" ;")
291
- try: version_f = float(".".join(version.split(".")[:2]))
292
- except ValueError: version_f = 0.0
293
- try: version_i = int(version_f)
294
- except ValueError: version_f = 0
304
+ try:
305
+ version_f = float(".".join(version.split(".")[:2]))
306
+ except ValueError:
307
+ version_f = 0.0
308
+ try:
309
+ version_i = int(version_f)
310
+ except ValueError:
311
+ version_f = 0
295
312
 
296
313
  info.update(
297
- name = identity,
298
- version = version,
299
- version_f = version_f,
300
- version_i = version_i,
301
- interactive = interactive,
302
- bot = bot
314
+ name=identity,
315
+ version=version,
316
+ version_f=version_f,
317
+ version_i=version_i,
318
+ interactive=interactive,
319
+ bot=bot,
303
320
  )
304
321
  break
305
322
 
@@ -307,14 +324,16 @@ def browser_info(user_agent):
307
324
  identity = os_i["identity"]
308
325
  sub_string = os_i.get("sub_string", identity)
309
326
 
310
- if not sub_string in user_agent: continue
327
+ if not sub_string in user_agent:
328
+ continue
311
329
 
312
- info.update(os = identity)
330
+ info.update(os=identity)
313
331
  break
314
332
 
315
333
  return info if info else None
316
334
 
317
- def email_parts(base, strip = True):
335
+
336
+ def email_parts(base, strip=True):
318
337
  """
319
338
  Unpacks the complete set of parts (name and email) from the
320
339
  provided generalized email string. The provided string may
@@ -338,133 +357,170 @@ def email_parts(base, strip = True):
338
357
 
339
358
  base_t = type(base)
340
359
  if base_t in SEQUENCE_TYPES:
341
- return [email_parts(base, strip = strip) for base in base]
360
+ return [email_parts(base, strip=strip) for base in base]
342
361
 
343
- if not base: return (None, None)
344
- if strip: base = base.strip()
362
+ if not base:
363
+ return (None, None)
364
+ if strip:
365
+ base = base.strip()
345
366
 
346
367
  match = defines.EMAIL_REGEX.match(base)
347
- if not match: return (None, None)
368
+ if not match:
369
+ return (None, None)
348
370
 
349
371
  email = match.group("email_a") or match.group("email_b")
350
372
  name = match.group("name") or email
351
373
 
352
374
  return (name, email)
353
375
 
354
- def email_mime(base, encoding = "utf-8"):
355
- if legacy.PYTHON_3: encoding = None
376
+
377
+ def email_mime(base, encoding="utf-8"):
378
+ if legacy.PYTHON_3:
379
+ encoding = None
356
380
 
357
381
  base_t = type(base)
358
382
  if base_t in SEQUENCE_TYPES:
359
- return [value for value in (email_mime(item, encoding = encoding) for item in base) if value]
383
+ return [
384
+ value
385
+ for value in (email_mime(item, encoding=encoding) for item in base)
386
+ if value
387
+ ]
360
388
 
361
389
  name, email = email_parts(base)
362
- if not name or not email: return None
390
+ if not name or not email:
391
+ return None
363
392
 
364
- name = smtp.header(name, encoding = encoding)
393
+ name = smtp.header(name, encoding=encoding)
365
394
 
366
395
  return "%s <%s>" % (name, email)
367
396
 
397
+
368
398
  def email_name(base):
369
399
  base_t = type(base)
370
400
  if base_t in SEQUENCE_TYPES:
371
- return [value for value in (email_name(base) for base in base if email_name(base)) if value]
401
+ return [
402
+ value
403
+ for value in (email_name(base) for base in base if email_name(base))
404
+ if value
405
+ ]
372
406
  name, _email = email_parts(base)
373
407
  return name
374
408
 
409
+
375
410
  def email_base(base):
376
411
  base_t = type(base)
377
412
  if base_t in SEQUENCE_TYPES:
378
- return [value for value in (email_base(base) for base in base if email_base(base)) if value]
413
+ return [
414
+ value
415
+ for value in (email_base(base) for base in base if email_base(base))
416
+ if value
417
+ ]
379
418
  _name, email = email_parts(base)
380
419
  return email
381
420
 
382
- def date_to_timestamp(value, format = "%d/%m/%Y"):
383
- if not value: return None
384
- try: value = datetime.datetime.strptime(value, format)
385
- except Exception: return None
421
+
422
+ def date_to_timestamp(value, format="%d/%m/%Y"):
423
+ if not value:
424
+ return None
425
+ try:
426
+ value = datetime.datetime.strptime(value, format)
427
+ except Exception:
428
+ return None
386
429
  value = value.utctimetuple()
387
430
  return calendar.timegm(value)
388
431
 
389
- def obfuscate(value, display_l = 3, token = "*"):
432
+
433
+ def obfuscate(value, display_l=3, token="*"):
390
434
  value_l = len(value)
391
435
  display_l = min([value_l, display_l])
392
436
  obfuscated = value[:display_l] + ((value_l - display_l) * token)
393
437
  return obfuscated
394
438
 
395
- def import_pip(name, package = None, default = None):
439
+
440
+ def import_pip(name, package=None, default=None):
396
441
  package = package or name
397
442
  try:
398
443
  module = __import__(name)
399
444
  except ImportError:
400
- try: module = install_pip_s(package)
401
- except Exception: return default
402
- try: module = __import__(name)
403
- except ImportError: return default
445
+ try:
446
+ module = install_pip_s(package)
447
+ except Exception:
448
+ return default
449
+ try:
450
+ module = __import__(name)
451
+ except ImportError:
452
+ return default
404
453
  return module
405
454
 
406
- def ensure_pip(name, package = None, delayed = False):
455
+
456
+ def ensure_pip(name, package=None, delayed=False):
407
457
  package = package or name
408
458
  try:
409
459
  __import__(name)
410
460
  except ImportError:
411
- install_pip_s(package, delayed = delayed)
461
+ install_pip_s(package, delayed=delayed)
462
+
412
463
 
413
- def install_pip(package, delayed = False, isolated = True, user = None):
464
+ def install_pip(package, delayed=False, isolated=True, user=None):
414
465
  try:
415
466
  import pip
467
+
416
468
  pip_internal = pip
417
469
  finally:
418
470
  pass
419
471
  try:
420
472
  import pip._internal
473
+
421
474
  pip_internal = pip._internal
422
475
  except ImportError:
423
476
  pass
424
477
  try:
425
478
  import pip._internal.main
479
+
426
480
  pip_internal = pip._internal.main
427
481
  except ImportError:
428
482
  pass
429
- user = config.conf("PIP_USER", False, cast = bool)
483
+ user = config.conf("PIP_USER", False, cast=bool)
430
484
  args = ["install", package]
431
- if hasattr(pip_internal, "main"): pip_main = pip_internal.main
432
- elif hasattr(pip, "main"): pip_main = pip.main #@UndefinedVariable
433
- else: raise exceptions.OperationalError(message = "pip not found")
434
- if user: args.insert(1, "--user")
485
+ if hasattr(pip_internal, "main"):
486
+ pip_main = pip_internal.main
487
+ elif hasattr(pip, "main"):
488
+ pip_main = pip.main # @UndefinedVariable
489
+ else:
490
+ raise exceptions.OperationalError(message="pip not found")
491
+ if user:
492
+ args.insert(1, "--user")
435
493
  if delayed:
436
- process = multiprocessing.Process(
437
- target = pip_main,
438
- args = (args,)
439
- )
494
+ process = multiprocessing.Process(target=pip_main, args=(args,))
440
495
  process.start()
441
496
  result = 0
442
497
  elif isolated:
443
- process = multiprocessing.Process(
444
- target = pip_main,
445
- args = (args,)
446
- )
498
+ process = multiprocessing.Process(target=pip_main, args=(args,))
447
499
  process.start()
448
500
  process.join()
449
501
  result = process.exitcode
450
502
  else:
451
503
  result = pip_main(args)
452
- if result == 0: return
453
- raise exceptions.OperationalError(message = "pip error")
504
+ if result == 0:
505
+ return
506
+ raise exceptions.OperationalError(message="pip error")
454
507
 
455
- def install_pip_s(package, delayed = False):
508
+
509
+ def install_pip_s(package, delayed=False):
456
510
  try:
457
- install_pip(package, delayed = delayed, user = False)
511
+ install_pip(package, delayed=delayed, user=False)
458
512
  except exceptions.OperationalError:
459
- install_pip(package, delayed = delayed, user = True)
513
+ install_pip(package, delayed=delayed, user=True)
514
+
460
515
 
461
- def request_json(request = None, encoding = "utf-8"):
516
+ def request_json(request=None, encoding="utf-8"):
462
517
  # retrieves the proper request object, either the provided
463
518
  # request or the default base request object and then in
464
519
  # case the the JSON data is already in the request properties
465
520
  # it is used (cached value) otherwise continues with the parse
466
521
  request = request or common.base().get_request()
467
- if "_data_j" in request.properties: return request.properties["_data_j"]
522
+ if "_data_j" in request.properties:
523
+ return request.properties["_data_j"]
468
524
 
469
525
  # retrieves the current request data and tries to
470
526
  # "load" it as JSON data, in case it fails gracefully
@@ -472,7 +528,8 @@ def request_json(request = None, encoding = "utf-8"):
472
528
  data = request.data
473
529
  try:
474
530
  is_bytes = legacy.is_bytes(data)
475
- if is_bytes: data = data.decode(encoding)
531
+ if is_bytes:
532
+ data = data.decode(encoding)
476
533
  data_j = json.loads(data)
477
534
  except Exception:
478
535
  data_j = {}
@@ -482,6 +539,7 @@ def request_json(request = None, encoding = "utf-8"):
482
539
  # may be used as the parsed value (post information)
483
540
  return data_j
484
541
 
542
+
485
543
  def get_context(self):
486
544
  """
487
545
  Retrieves the "best" possible context object for the current
@@ -498,26 +556,22 @@ def get_context(self):
498
556
  if hasattr(self, "request"):
499
557
  request = self.request
500
558
  is_valid = hasattr(request, "is_mock") and not request.is_mock()
501
- if is_valid: return request
559
+ if is_valid:
560
+ return request
502
561
 
503
562
  # uses the global strategy to try to retrieve a request for the
504
563
  # current execution environment (not thread safe)
505
564
  request = common.base().get_request()
506
565
  is_valid = hasattr(request, "is_mock") and not request.is_mock()
507
- if is_valid: return request
566
+ if is_valid:
567
+ return request
508
568
 
509
569
  # fallback return value meaning that it was not possible to retrieve
510
570
  # any valid execution context for the current environment
511
571
  return None
512
572
 
513
- def get_object(
514
- object = None,
515
- alias = False,
516
- page = False,
517
- find = False,
518
- norm = True,
519
- **kwargs
520
- ):
573
+
574
+ def get_object(object=None, alias=False, page=False, find=False, norm=True, **kwargs):
521
575
  # retrieves the base request object that is going to be used in
522
576
  # the construction of the object
523
577
  request = common.base().get_request()
@@ -535,36 +589,48 @@ def get_object(
535
589
  # uses all the values referencing data in the request to try
536
590
  # to populate the object this way it may be constructed using
537
591
  # any of theses strategies (easier for the developer)
538
- for name, value in data_j.items(): object[name] = value
539
- for name, value in request.files_s.items(): object[name] = value
540
- for name, value in request.post_s.items(): object[name] = value
541
- for name, value in request.params_s.items(): object[name] = value
592
+ for name, value in data_j.items():
593
+ object[name] = value
594
+ for name, value in request.files_s.items():
595
+ object[name] = value
596
+ for name, value in request.post_s.items():
597
+ object[name] = value
598
+ for name, value in request.params_s.items():
599
+ object[name] = value
542
600
 
543
601
  # in case the alias flag is set tries to resolve the attribute
544
602
  # alias and in case the find types are set converts the find
545
603
  # based attributes using the currently defined mapping map
546
- if alias: resolve_alias(object)
547
- if page: page_types(object)
548
- if find: find_types(object)
549
- if find: find_defaults(object, kwargs)
604
+ if alias:
605
+ resolve_alias(object)
606
+ if page:
607
+ page_types(object)
608
+ if find:
609
+ find_types(object)
610
+ if find:
611
+ find_defaults(object, kwargs)
550
612
 
551
613
  # in case the normalization flag is set runs the normalization
552
614
  # of the provided object so that sequences are properly handled
553
615
  # as defined in the specification (this allows multiple references)
554
- if norm: norm_object(object)
616
+ if norm:
617
+ norm_object(object)
555
618
 
556
619
  # returns the constructed object to the caller method this object
557
620
  # should be a structured representation of the data in the request
558
621
  return object
559
622
 
623
+
560
624
  def resolve_alias(object):
561
625
  for name, value in legacy.eager(object.items()):
562
- if not name in ALIAS: continue
626
+ if not name in ALIAS:
627
+ continue
563
628
  _alias = ALIAS[name]
564
629
  object[_alias] = value
565
630
  del object[name]
566
631
 
567
- def page_types(object, size = 50):
632
+
633
+ def page_types(object, size=50):
568
634
  page = object.get("page", 1)
569
635
  size = object.get("size", size)
570
636
  sorter = object.get("sorter", None)
@@ -574,7 +640,9 @@ def page_types(object, size = 50):
574
640
  offset = page - 1
575
641
  object["skip"] = offset * size
576
642
  object["limit"] = size
577
- if sorter: object["sort"] = "%s:%s" % (sorter, direction)
643
+ if sorter:
644
+ object["sort"] = "%s:%s" % (sorter, direction)
645
+
578
646
 
579
647
  def find_types(object):
580
648
  for name, value in legacy.eager(object.items()):
@@ -584,16 +652,21 @@ def find_types(object):
584
652
  find_type = FIND_TYPES[name]
585
653
  object[name] = find_type(value)
586
654
 
655
+
587
656
  def find_defaults(object, kwargs):
588
657
  for name, value in legacy.iteritems(kwargs):
589
- if name in object: continue
590
- if not name in FIND_TYPES: continue
658
+ if name in object:
659
+ continue
660
+ if not name in FIND_TYPES:
661
+ continue
591
662
  object[name] = value
592
663
 
593
664
  for name, value in legacy.iteritems(FIND_DEFAULTS):
594
- if name in object: continue
665
+ if name in object:
666
+ continue
595
667
  object[name] = value
596
668
 
669
+
597
670
  def norm_object(object):
598
671
  # iterates over all the key value association in the
599
672
  # object, trying to find the ones that refer sequences
@@ -602,7 +675,8 @@ def norm_object(object):
602
675
  # verifies if the current name references a sequence
603
676
  # and if that's not the case continues the loop trying
604
677
  # to find any other sequence based value
605
- if not name.endswith("[]"): continue
678
+ if not name.endswith("[]"):
679
+ continue
606
680
 
607
681
  # removes the current reference to the name as the value
608
682
  # is not in the valid structure and then normalizes the
@@ -613,7 +687,9 @@ def norm_object(object):
613
687
  # in case the current value is not valid (empty) the object
614
688
  # is set with an empty list for the current iteration as this
615
689
  # is considered to be the default value
616
- if not value: object[name] = []; continue
690
+ if not value:
691
+ object[name] = []
692
+ continue
617
693
 
618
694
  # retrieves the normalized and linearized list of leafs
619
695
  # for the current value and ten verifies the size of each
@@ -640,6 +716,7 @@ def norm_object(object):
640
716
  _name_l = _name.split(".")
641
717
  set_object(_object, _name_l, _value[index])
642
718
 
719
+
643
720
  def set_object(object, name_l, value):
644
721
  """
645
722
  Sets a composite value in an object, allowing for
@@ -670,7 +747,8 @@ def set_object(object, name_l, value):
670
747
  # in case the length of the current names list has reached
671
748
  # one this is the final iteration and so the value is set
672
749
  # at the current naming point
673
- if len(name_l) == 1: object[name] = value
750
+ if len(name_l) == 1:
751
+ object[name] = value
674
752
 
675
753
  # otherwise this is a "normal" step and so a new map must
676
754
  # be created/retrieved and the iteration step should be
@@ -681,6 +759,7 @@ def set_object(object, name_l, value):
681
759
  object[name] = map
682
760
  set_object(map, name_l[1:], value)
683
761
 
762
+
684
763
  def leafs(object):
685
764
  """
686
765
  Retrieves a list containing a series of tuples that
@@ -726,14 +805,16 @@ def leafs(object):
726
805
  # (properly validated for sequence presence)
727
806
  else:
728
807
  value_t = type(value)
729
- if not value_t == list: value = [value]
808
+ if not value_t == list:
809
+ value = [value]
730
810
  leafs_l.append((name, value))
731
811
 
732
812
  # returns the list of leaf nodes that was "just" created
733
813
  # to the caller method so that it may be used there
734
814
  return leafs_l
735
815
 
736
- def gather_errors(lazy_dict, resolve = True):
816
+
817
+ def gather_errors(lazy_dict, resolve=True):
737
818
  """
738
819
  Function responsible for the iterative gathering of
739
820
  lazy evaluation errors, allowing for a complete gathering
@@ -758,7 +839,8 @@ def gather_errors(lazy_dict, resolve = True):
758
839
  # iterates over the complete set of keys in the lazy dictionary
759
840
  # to evaluate the values and check if there are errors associated
760
841
  for key in lazy_dict:
761
- try: _value = lazy_dict.__getitem__(key, resolve = resolve)
842
+ try:
843
+ _value = lazy_dict.__getitem__(key, resolve=resolve)
762
844
  except (exceptions.AppierException, exceptions.BaseInternalError) as exception:
763
845
  _errors = errors.get(key, [])
764
846
  _errors.append(exception.message)
@@ -768,7 +850,8 @@ def gather_errors(lazy_dict, resolve = True):
768
850
  # the caller method so that it may be used for error handling
769
851
  return errors
770
852
 
771
- def gen_token(limit = None, hash = hashlib.sha256):
853
+
854
+ def gen_token(limit=None, hash=hashlib.sha256):
772
855
  """
773
856
  Generates a random cryptographic ready token according
774
857
  to the framework specification, this is generated using
@@ -792,9 +875,11 @@ def gen_token(limit = None, hash = hashlib.sha256):
792
875
  token_s = str(uuid.uuid4())
793
876
  token_s = token_s.encode("utf-8")
794
877
  token = hash(token_s).hexdigest()
795
- if limit: token = token[:limit]
878
+ if limit:
879
+ token = token[:limit]
796
880
  return token
797
881
 
882
+
798
883
  def html_to_text(data):
799
884
  """
800
885
  Converts the provided HTML textual data into a plain text
@@ -828,14 +913,16 @@ def html_to_text(data):
828
913
  lines = data.splitlines(False)
829
914
  for line in lines:
830
915
  line = line.strip()
831
- if not line: continue
916
+ if not line:
917
+ continue
832
918
  valid.append(line)
833
919
 
834
920
  data = "\n".join(valid)
835
921
  data = data.replace("\n.", ".")
836
922
  return data
837
923
 
838
- def camel_to_underscore(camel, separator = "_", lower = True):
924
+
925
+ def camel_to_underscore(camel, separator="_", lower=True):
839
926
  """
840
927
  Converts the provided camel cased based value into
841
928
  a normalized underscore based string.
@@ -860,13 +947,16 @@ def camel_to_underscore(camel, separator = "_", lower = True):
860
947
  conversion of the provided camel cased one.
861
948
  """
862
949
 
863
- if not camel: return camel
950
+ if not camel:
951
+ return camel
864
952
  value = FIRST_CAP_REGEX.sub(r"\1" + separator + r"\2", camel)
865
953
  value = ALL_CAP_REGEX.sub(r"\1" + separator + r"\2", value)
866
- if lower: value = value.lower()
954
+ if lower:
955
+ value = value.lower()
867
956
  return value
868
957
 
869
- def camel_to_readable(camel, lower = False, capitalize = False):
958
+
959
+ def camel_to_readable(camel, lower=False, capitalize=False):
870
960
  """
871
961
  Converts the given camel cased oriented string value
872
962
  into a readable one meaning that the returned value
@@ -889,11 +979,13 @@ def camel_to_readable(camel, lower = False, capitalize = False):
889
979
  used to display a value to an end user.
890
980
  """
891
981
 
892
- if not camel: return camel
893
- underscore = camel_to_underscore(camel, lower = lower)
894
- return underscore_to_readable(underscore, capitalize = capitalize)
982
+ if not camel:
983
+ return camel
984
+ underscore = camel_to_underscore(camel, lower=lower)
985
+ return underscore_to_readable(underscore, capitalize=capitalize)
986
+
895
987
 
896
- def underscore_to_camel(underscore, lower = False):
988
+ def underscore_to_camel(underscore, lower=False):
897
989
  """
898
990
  Converts the provided underscore cased based value into
899
991
  a normalized camel cased string.
@@ -915,12 +1007,15 @@ def underscore_to_camel(underscore, lower = False):
915
1007
  conversion of the provided underscore cased one.
916
1008
  """
917
1009
 
918
- if not underscore: return underscore
919
- camel = underscore_to_readable(underscore, capitalize = True, separator = "")
920
- if not lower: return camel
1010
+ if not underscore:
1011
+ return underscore
1012
+ camel = underscore_to_readable(underscore, capitalize=True, separator="")
1013
+ if not lower:
1014
+ return camel
921
1015
  return camel[0].lower() + camel[1:]
922
1016
 
923
- def underscore_to_readable(underscore, capitalize = False, separator = " "):
1017
+
1018
+ def underscore_to_readable(underscore, capitalize=False, separator=" "):
924
1019
  """
925
1020
  Converts the given underscore oriented string value
926
1021
  into a readable one meaning that the returned value
@@ -943,13 +1038,17 @@ def underscore_to_readable(underscore, capitalize = False, separator = " "):
943
1038
  used to display a value to an end user.
944
1039
  """
945
1040
 
946
- if not underscore: return underscore
1041
+ if not underscore:
1042
+ return underscore
947
1043
  parts = underscore.split("_")
948
1044
  parts = [part for part in parts if part]
949
- if capitalize: parts = [part[0].upper() + part[1:] for part in parts]
950
- else: parts[0] = parts[0][0].upper() + parts[0][1:]
1045
+ if capitalize:
1046
+ parts = [part[0].upper() + part[1:] for part in parts]
1047
+ else:
1048
+ parts[0] = parts[0][0].upper() + parts[0][1:]
951
1049
  return separator.join(parts)
952
1050
 
1051
+
953
1052
  def quote(value, *args, **kwargs):
954
1053
  """
955
1054
  Quotes the passed value according to the defined
@@ -968,9 +1067,11 @@ def quote(value, *args, **kwargs):
968
1067
  """
969
1068
 
970
1069
  is_unicode = isinstance(value, legacy.UNICODE)
971
- if is_unicode: value = value.encode("utf-8")
1070
+ if is_unicode:
1071
+ value = value.encode("utf-8")
972
1072
  return legacy.quote(value, *args, **kwargs)
973
1073
 
1074
+
974
1075
  def unquote(value, *args, **kwargs):
975
1076
  """
976
1077
  Unquotes the provided value according to the URL scheme
@@ -991,10 +1092,12 @@ def unquote(value, *args, **kwargs):
991
1092
 
992
1093
  value = legacy.unquote(value, *args, **kwargs)
993
1094
  is_bytes = isinstance(value, legacy.BYTES)
994
- if is_bytes: value = value.decode("utf-8")
1095
+ if is_bytes:
1096
+ value = value.decode("utf-8")
995
1097
  return value
996
1098
 
997
- def escape(value, char, escape = "\\"):
1099
+
1100
+ def escape(value, char, escape="\\"):
998
1101
  """
999
1102
  Escapes the provided string value according to the requested
1000
1103
  target character(s) and escape value. Meaning that all the characters
@@ -1012,13 +1115,15 @@ def escape(value, char, escape = "\\"):
1012
1115
  :return: The final string with the target character properly escaped.
1013
1116
  """
1014
1117
 
1015
- if not isinstance(char, (list, tuple)): char = (char,)
1118
+ if not isinstance(char, (list, tuple)):
1119
+ char = (char,)
1016
1120
  value = value.replace(escape, escape + escape)
1017
1121
  for _char in char:
1018
1122
  value = value.replace(_char, escape + _char)
1019
1123
  return value
1020
1124
 
1021
- def unescape(value, escape = "\\"):
1125
+
1126
+ def unescape(value, escape="\\"):
1022
1127
  """
1023
1128
  Unescapes the provided string value using the provided escape
1024
1129
  character as the reference for the unescape operation.
@@ -1044,7 +1149,8 @@ def unescape(value, escape = "\\"):
1044
1149
  result.append(char)
1045
1150
  return "".join(result)
1046
1151
 
1047
- def count_unescape(value, sub, escape = "\\"):
1152
+
1153
+ def count_unescape(value, sub, escape="\\"):
1048
1154
  """
1049
1155
  Runs the sub string count operation on an escaped string
1050
1156
  so that it takes into account the escaped values avoiding
@@ -1076,7 +1182,8 @@ def count_unescape(value, sub, escape = "\\"):
1076
1182
  count += 1
1077
1183
  return count
1078
1184
 
1079
- def split_unescape(value, delimiter = " ", max = -1, escape = "\\", unescape = True):
1185
+
1186
+ def split_unescape(value, delimiter=" ", max=-1, escape="\\", unescape=True):
1080
1187
  """
1081
1188
  Splits the provided string around the delimiter character that
1082
1189
  has been provided and allows proper escaping of it using the
@@ -1112,10 +1219,12 @@ def split_unescape(value, delimiter = " ", max = -1, escape = "\\", unescape = T
1112
1219
  for char in iterator:
1113
1220
  if char == escape:
1114
1221
  try:
1115
- if not unescape: current.append(escape)
1222
+ if not unescape:
1223
+ current.append(escape)
1116
1224
  current.append(next(iterator))
1117
1225
  except StopIteration:
1118
- if unescape: current.append(escape)
1226
+ if unescape:
1227
+ current.append(escape)
1119
1228
  elif char == delimiter and not count == max:
1120
1229
  result.append("".join(current))
1121
1230
  current = []
@@ -1125,6 +1234,7 @@ def split_unescape(value, delimiter = " ", max = -1, escape = "\\", unescape = T
1125
1234
  result.append("".join(current))
1126
1235
  return result
1127
1236
 
1237
+
1128
1238
  def call_safe(callable, *args, **kwargs):
1129
1239
  """
1130
1240
  Method used to call a callable object using a "safe" approach,
@@ -1153,21 +1263,24 @@ def call_safe(callable, *args, **kwargs):
1153
1263
  argspec = legacy.getargspec(callable)
1154
1264
  method_args = argspec[0]
1155
1265
  method_kwargs = argspec[2]
1156
- if method_kwargs: return callable(*args, **kwargs)
1266
+ if method_kwargs:
1267
+ return callable(*args, **kwargs)
1157
1268
 
1158
1269
  # iterates over the complete set of keyword based arguments to be
1159
1270
  # used in the call and validates them against the method specification
1160
1271
  # in case they do not exist in the specification deletes them from
1161
1272
  # the map of keyword based arguments (not going to be sent)
1162
1273
  for name in legacy.keys(kwargs):
1163
- if name in method_args: continue
1274
+ if name in method_args:
1275
+ continue
1164
1276
  del kwargs[name]
1165
1277
 
1166
1278
  # runs the callable with the "remaining" arguments and keyword arguments
1167
1279
  # returning the value to the caller method
1168
1280
  return callable(*args, **kwargs)
1169
1281
 
1170
- def base_name(name, suffix = "_controller"):
1282
+
1283
+ def base_name(name, suffix="_controller"):
1171
1284
  """
1172
1285
  Retrieves the base name of a class name that contains
1173
1286
  a suffix (eg: controller) the resulting value is the
@@ -1189,10 +1302,12 @@ def base_name(name, suffix = "_controller"):
1189
1302
 
1190
1303
  suffix_l = len(suffix)
1191
1304
  name = camel_to_underscore(name)
1192
- if name.endswith(suffix): name = name[:suffix_l * -1]
1305
+ if name.endswith(suffix):
1306
+ name = name[: suffix_l * -1]
1193
1307
  return name
1194
1308
 
1195
- def base_name_m(name, suffixes = ("_controller", "_part", "_app")):
1309
+
1310
+ def base_name_m(name, suffixes=("_controller", "_part", "_app")):
1196
1311
  """
1197
1312
  Multiple iteration version of the base name function that provides
1198
1313
  a simple strategy for the retrieval of a "base name" without the
@@ -1209,9 +1324,11 @@ def base_name_m(name, suffixes = ("_controller", "_part", "_app")):
1209
1324
  and without the complete set of provided suffixes.
1210
1325
  """
1211
1326
 
1212
- for suffix in suffixes: name = base_name(name, suffix = suffix)
1327
+ for suffix in suffixes:
1328
+ name = base_name(name, suffix=suffix)
1213
1329
  return name
1214
1330
 
1331
+
1215
1332
  def is_content_type(data, target):
1216
1333
  """
1217
1334
  Verifies if the any of the provided mime types (target) is
@@ -1228,15 +1345,19 @@ def is_content_type(data, target):
1228
1345
  for the content type.
1229
1346
  """
1230
1347
 
1231
- if not isinstance(target, (list, tuple)): target = (target,)
1348
+ if not isinstance(target, (list, tuple)):
1349
+ target = (target,)
1232
1350
  mime, _extra = parse_content_type(data)
1233
1351
  for item in target:
1234
1352
  type, _sub_type = item.split("/")
1235
1353
  wildcard = type + "/*"
1236
- if item in mime: return True
1237
- if wildcard in mime: return True
1354
+ if item in mime:
1355
+ return True
1356
+ if wildcard in mime:
1357
+ return True
1238
1358
  return False
1239
1359
 
1360
+
1240
1361
  def parse_content_type(data):
1241
1362
  """
1242
1363
  Parses the provided content type string retrieving both the multiple
@@ -1259,7 +1380,8 @@ def parse_content_type(data):
1259
1380
 
1260
1381
  # in case no valid type has been sent returns the values
1261
1382
  # immediately to avoid further problems
1262
- if not data: return types, extra_m
1383
+ if not data:
1384
+ return types, extra_m
1263
1385
 
1264
1386
  # extracts the mime and the extra parts from the data string
1265
1387
  # they are the basis of the processing method
@@ -1271,7 +1393,8 @@ def parse_content_type(data):
1271
1393
 
1272
1394
  # runs a series of verifications on the base mime value and in
1273
1395
  # case it's not valid returns the default values immediately
1274
- if not "/" in mime: return types, extra_m
1396
+ if not "/" in mime:
1397
+ return types, extra_m
1275
1398
 
1276
1399
  # strips the complete set of valid extra values, note
1277
1400
  # that these values are going to be processed as key
@@ -1292,7 +1415,8 @@ def parse_content_type(data):
1292
1415
  # goes through all of the extra key to value items
1293
1416
  # and converts them into proper dictionary values
1294
1417
  for extra_item in extra:
1295
- if not "=" in extra_item: continue
1418
+ if not "=" in extra_item:
1419
+ continue
1296
1420
  extra_item = extra_item.strip()
1297
1421
  key, value = extra_item.split("=")
1298
1422
  extra_m[key] = value
@@ -1301,6 +1425,7 @@ def parse_content_type(data):
1301
1425
  # mime types for the content and the extra key to value items
1302
1426
  return types, extra_m
1303
1427
 
1428
+
1304
1429
  def parse_cookie(data):
1305
1430
  """
1306
1431
  Parses/interprets the provided cookie data string, returning a
@@ -1327,7 +1452,8 @@ def parse_cookie(data):
1327
1452
  # final cookie map (with the key to value associations)
1328
1453
  cookies = [cookie.strip() for cookie in data.split(";")]
1329
1454
  for cookie in cookies:
1330
- if not "=" in cookie: cookie += "="
1455
+ if not "=" in cookie:
1456
+ cookie += "="
1331
1457
  name, value = cookie.split("=", 1)
1332
1458
  cookie_m[name] = value
1333
1459
 
@@ -1335,6 +1461,7 @@ def parse_cookie(data):
1335
1461
  # proper and easy access is possible to the cookie
1336
1462
  return cookie_m
1337
1463
 
1464
+
1338
1465
  def parse_multipart(data, boundary):
1339
1466
  """
1340
1467
  Parses the provided data buffer as a set of multipart data
@@ -1363,26 +1490,29 @@ def parse_multipart(data, boundary):
1363
1490
 
1364
1491
  boundary = str(boundary)
1365
1492
  boundary = boundary.strip()
1366
- boundary_base = "--" + boundary[9:].strip("\"")
1493
+ boundary_base = "--" + boundary[9:].strip('"')
1367
1494
  boundary_value = legacy.bytes(boundary_base + "\r\n")
1368
1495
  boundary_extra = legacy.bytes(boundary_base + "--" + "\r\n")
1369
1496
  boundary_extra_l = len(boundary_extra)
1370
1497
  parts = data.split(boundary_value)
1371
- parts[-1] = parts[-1][:boundary_extra_l * -1]
1498
+ parts[-1] = parts[-1][: boundary_extra_l * -1]
1372
1499
 
1373
1500
  # iterates over the complete set of parts in the multi part payload
1374
1501
  # to process them and add them to the appropriate dictionary and list
1375
1502
  for part in parts:
1376
1503
  # in case the current part is not valid or empty skips the
1377
1504
  # current cycle (nothing to be done)
1378
- if not part: continue
1505
+ if not part:
1506
+ continue
1379
1507
 
1380
1508
  # splits the current part around the beginning of part sequence
1381
1509
  # and retrieves the proper contents if they exist
1382
1510
  part_s = part.split(b"\r\n\r\n", 1)
1383
1511
  headers = part_s[0]
1384
- if len(part_s) > 1: contents = part_s[1]
1385
- else: contents = None
1512
+ if len(part_s) > 1:
1513
+ contents = part_s[1]
1514
+ else:
1515
+ contents = None
1386
1516
 
1387
1517
  # strips the current headers string and then splits it around
1388
1518
  # the various lines that define the various headers
@@ -1410,7 +1540,8 @@ def parse_multipart(data, boundary):
1410
1540
  # part and in case there's none it's not possible to process the
1411
1541
  # current part (this header is considered required)
1412
1542
  disposition = headers.get("content-disposition", None)
1413
- if not disposition: continue
1543
+ if not disposition:
1544
+ continue
1414
1545
 
1415
1546
  # creates the dictionary that will hold the various parts of the
1416
1547
  # content disposition header that are going to be extracted for
@@ -1421,8 +1552,10 @@ def parse_multipart(data, boundary):
1421
1552
  for value in parts_data:
1422
1553
  value_s = value.split(b"=", 1)
1423
1554
  key = legacy.str(value_s[0]).strip().lower()
1424
- if len(value_s) > 1: value = value_s[1].strip()
1425
- else: value = None
1555
+ if len(value_s) > 1:
1556
+ value = value_s[1].strip()
1557
+ else:
1558
+ value = None
1426
1559
  parts[key] = value
1427
1560
 
1428
1561
  # retrieves the various characteristics values from the headers
@@ -1430,13 +1563,14 @@ def parse_multipart(data, boundary):
1430
1563
  # values are going to be used to decide on whether the current
1431
1564
  # part is a file or a normal key value attribute
1432
1565
  content_type = headers.get("content-type", None)
1433
- name = parts.get("name", b"\"undefined\"").strip(b"\"")
1434
- filename = parts.get("filename", b"").strip(b"\"")
1566
+ name = parts.get("name", b'"undefined"').strip(b'"')
1567
+ filename = parts.get("filename", b"").strip(b'"')
1435
1568
 
1436
1569
  # decodes the various content disposition values into an unicode
1437
1570
  # based string so that may be latter be used safely inside the
1438
1571
  # application environment(as expected by the current structure)
1439
- if content_type: content_type = content_type.decode("utf-8")
1572
+ if content_type:
1573
+ content_type = content_type.decode("utf-8")
1440
1574
  name = name.decode("utf-8")
1441
1575
  filename = filename.decode("utf-8")
1442
1576
 
@@ -1448,8 +1582,10 @@ def parse_multipart(data, boundary):
1448
1582
  # verifies if the file name is included in the parts unpacked
1449
1583
  # from the content type in case it does this is considered to be
1450
1584
  # file part otherwise it's a normal key value part
1451
- if "filename" in parts: is_file = True
1452
- else: is_file = False
1585
+ if "filename" in parts:
1586
+ is_file = True
1587
+ else:
1588
+ is_file = False
1453
1589
 
1454
1590
  if is_file:
1455
1591
  target = files
@@ -1469,13 +1605,15 @@ def parse_multipart(data, boundary):
1469
1605
  sequence_o.append(value)
1470
1606
  ordered_m[name] = sequence_o
1471
1607
 
1472
- if exists: continue
1608
+ if exists:
1609
+ continue
1473
1610
 
1474
1611
  tuple_s = (name, sequence_o)
1475
1612
  ordered.append(tuple_s)
1476
1613
 
1477
1614
  return (post, files, ordered)
1478
1615
 
1616
+
1479
1617
  def decode_params(params):
1480
1618
  """
1481
1619
  Decodes the complete set of parameters defined in the
@@ -1502,14 +1640,17 @@ def decode_params(params):
1502
1640
  items = []
1503
1641
  for item in value:
1504
1642
  is_bytes = legacy.is_bytes(item)
1505
- if is_bytes: item = item.decode("utf-8")
1643
+ if is_bytes:
1644
+ item = item.decode("utf-8")
1506
1645
  items.append(item)
1507
1646
  is_bytes = legacy.is_bytes(key)
1508
- if is_bytes: key = key.decode("utf-8")
1647
+ if is_bytes:
1648
+ key = key.decode("utf-8")
1509
1649
  _params[key] = items
1510
1650
 
1511
1651
  return _params
1512
1652
 
1653
+
1513
1654
  def load_form(form):
1514
1655
  # creates the map that is going to hold the "structured"
1515
1656
  # version of the form with key value associations
@@ -1526,8 +1667,9 @@ def load_form(form):
1526
1667
  # in case the sequence is larger than one element sets it,
1527
1668
  # otherwise retrieves and sets the value as the first element
1528
1669
  value = form[name]
1529
- value = value[0] if isinstance(value, (list, tuple)) and\
1530
- len(value) == 1 else value
1670
+ value = (
1671
+ value[0] if isinstance(value, (list, tuple)) and len(value) == 1 else value
1672
+ )
1531
1673
 
1532
1674
  # splits the complete name into its various components
1533
1675
  # and retrieves both the final (last) element and the
@@ -1555,27 +1697,33 @@ def load_form(form):
1555
1697
  # linear version of the attribute names
1556
1698
  return form_s
1557
1699
 
1558
- def check_login(self, token = None, request = None):
1700
+
1701
+ def check_login(self, token=None, request=None):
1559
1702
  # tries to retrieve the request from the current context
1560
1703
  # in case it has not been passed through other manner
1561
1704
  request = request or (self.request if self else None)
1562
1705
 
1563
1706
  # retrieves the data type of the token and creates the
1564
1707
  # tokens sequence value taking into account its type
1565
- if isinstance(token, SEQUENCE_TYPES): tokens = token
1566
- else: tokens = (token,)
1708
+ if isinstance(token, SEQUENCE_TYPES):
1709
+ tokens = token
1710
+ else:
1711
+ tokens = (token,)
1567
1712
 
1568
1713
  # in case the username value is set in session and there's
1569
1714
  # no token to be validated returns valid and in case the checking
1570
1715
  # of the complete set of tokens is valid also returns valid
1571
- if check_user(self, request = request) and not token: return True
1572
- if check_tokens(self, tokens, request = request): return True
1716
+ if check_user(self, request=request) and not token:
1717
+ return True
1718
+ if check_tokens(self, tokens, request=request):
1719
+ return True
1573
1720
 
1574
1721
  # returns the default value as invalid because if all the
1575
1722
  # validation procedures have failed the check is invalid
1576
1723
  return False
1577
1724
 
1578
- def check_user(self, request = None):
1725
+
1726
+ def check_user(self, request=None):
1579
1727
  # tries to retrieve the reference to the current request
1580
1728
  # either from the provided arguments or from the current context
1581
1729
  request = request or (self.request if self else None)
@@ -1583,21 +1731,26 @@ def check_user(self, request = None):
1583
1731
  # runs the multiple verification strategies available an
1584
1732
  # in case at least one of them succeeds the user is considered
1585
1733
  # to be currently authenticated
1586
- if request and "username" in request.session: return True
1587
- if request and hasattr(request, "tokens_p"): return True
1734
+ if request and "username" in request.session:
1735
+ return True
1736
+ if request and hasattr(request, "tokens_p"):
1737
+ return True
1588
1738
 
1589
1739
  # by default the user is considered to be not authenticated, all
1590
1740
  # of the tests for authentication have failed
1591
1741
  return False
1592
1742
 
1593
- def check_token(self, token, tokens_m = None, request = None):
1743
+
1744
+ def check_token(self, token, tokens_m=None, request=None):
1594
1745
  # in case the provided token is invalid or empty the method
1595
1746
  # return immediately in success (simple validation)
1596
- if not token: return True
1747
+ if not token:
1748
+ return True
1597
1749
 
1598
1750
  # tries to retrieve the tokens map from the provided argument
1599
1751
  # defaulting to the session one in case none is provided
1600
- if tokens_m == None: tokens_m = get_tokens_m(self, request = request)
1752
+ if tokens_m == None:
1753
+ tokens_m = get_tokens_m(self, request=request)
1601
1754
 
1602
1755
  # splits the provided token string into its parts, note that
1603
1756
  # a namespace is defined around the dot character
@@ -1607,9 +1760,12 @@ def check_token(self, token, tokens_m = None, request = None):
1607
1760
  # of parts to validate the complete chain of values against
1608
1761
  # the map of token parts (namespace validation)
1609
1762
  for token_p in token_l:
1610
- if not isinstance(tokens_m, dict): return False
1611
- if "*" in tokens_m and tokens_m["*"] == True: return True
1612
- if not token_p in tokens_m: return False
1763
+ if not isinstance(tokens_m, dict):
1764
+ return False
1765
+ if "*" in tokens_m and tokens_m["*"] == True:
1766
+ return True
1767
+ if not token_p in tokens_m:
1768
+ return False
1613
1769
  tokens_m = tokens_m[token_p]
1614
1770
 
1615
1771
  # determines if the final tokens map value is a dictionary
@@ -1621,38 +1777,33 @@ def check_token(self, token, tokens_m = None, request = None):
1621
1777
  # the final validation result accordingly
1622
1778
  return True if result == True else False
1623
1779
 
1624
- def check_tokens(self, tokens, tokens_m = None, request = None):
1780
+
1781
+ def check_tokens(self, tokens, tokens_m=None, request=None):
1625
1782
  # iterates over the complete set of tokens that are going
1626
1783
  # to be validated against the current context and if any of
1627
1784
  # them fails an invalid result is returned otherwise a valid
1628
1785
  # result is returned (indicating that all is valid)
1629
1786
  for token in tokens:
1630
- if not check_token(
1631
- self,
1632
- token,
1633
- tokens_m = tokens_m,
1634
- request = request
1635
- ): return False
1787
+ if not check_token(self, token, tokens_m=tokens_m, request=request):
1788
+ return False
1636
1789
  return True
1637
1790
 
1638
- def ensure_login(self, token = None, context = None, request = None):
1791
+
1792
+ def ensure_login(self, token=None, context=None, request=None):
1639
1793
  request = request or (self.request if self else None)
1640
- is_auth = check_user(self, request = request)
1641
- if not is_auth: raise exceptions.AppierException(
1642
- message = "User not authenticated",
1643
- code = 403,
1644
- token = token,
1645
- context = context
1646
- )
1647
- if check_token(self, token, request = request): return
1794
+ is_auth = check_user(self, request=request)
1795
+ if not is_auth:
1796
+ raise exceptions.AppierException(
1797
+ message="User not authenticated", code=403, token=token, context=context
1798
+ )
1799
+ if check_token(self, token, request=request):
1800
+ return
1648
1801
  raise exceptions.AppierException(
1649
- message = "Not enough permissions",
1650
- code = 403,
1651
- token = token,
1652
- context = context
1802
+ message="Not enough permissions", code=403, token=token, context=context
1653
1803
  )
1654
1804
 
1655
- def get_tokens_m(self, request = None, set = None):
1805
+
1806
+ def get_tokens_m(self, request=None, set=None):
1656
1807
  """
1657
1808
  Retrieves the map of tokens from the current session so that
1658
1809
  they can be used for proper ACL validation.
@@ -1676,7 +1827,8 @@ def get_tokens_m(self, request = None, set = None):
1676
1827
  # in case it has not been passed through other manner, if
1677
1828
  # no valid context is found returns invalid value immediately
1678
1829
  request = request or (self.request if self else None)
1679
- if not request: return dict()
1830
+ if not request:
1831
+ return dict()
1680
1832
 
1681
1833
  # verifies if the set flag is set and if that's not the case
1682
1834
  # ensures proper default value taking into account if there's
@@ -1688,8 +1840,10 @@ def get_tokens_m(self, request = None, set = None):
1688
1840
  # current request an in case it's not available used the default
1689
1841
  # one (simple session access)
1690
1842
  try:
1691
- if hasattr(request, "tokens_p"): tokens_m = request.tokens_p()
1692
- else: tokens_m = request.session.get("tokens", {})
1843
+ if hasattr(request, "tokens_p"):
1844
+ tokens_m = request.tokens_p()
1845
+ else:
1846
+ tokens_m = request.session.get("tokens", {})
1693
1847
  except Exception:
1694
1848
  return dict()
1695
1849
 
@@ -1700,7 +1854,8 @@ def get_tokens_m(self, request = None, set = None):
1700
1854
 
1701
1855
  # if the tokens value is already a map then an immediate return
1702
1856
  # is going to be performed (it is a valid tokens map)
1703
- if is_map: return tokens_m
1857
+ if is_map:
1858
+ return tokens_m
1704
1859
 
1705
1860
  # in case the value present in the tokens value is a sequence
1706
1861
  # it must be properly converted into the equivalent map value
@@ -1712,7 +1867,8 @@ def get_tokens_m(self, request = None, set = None):
1712
1867
  # in case the set flag is set the tokens map should
1713
1868
  # be set in the request session (may be dangerous)
1714
1869
  # and then returns the tokens map to the caller method
1715
- if set: request.session["tokens"] = tokens_m
1870
+ if set:
1871
+ request.session["tokens"] = tokens_m
1716
1872
  return tokens_m
1717
1873
 
1718
1874
  # returns the "default" empty tokens map as it was not possible
@@ -1720,6 +1876,7 @@ def get_tokens_m(self, request = None, set = None):
1720
1876
  # current context and environment
1721
1877
  return dict()
1722
1878
 
1879
+
1723
1880
  def to_tokens_m(tokens):
1724
1881
  # creates a new map to be used to store tokens map that is
1725
1882
  # going to be created from the list/sequence version
@@ -1736,19 +1893,23 @@ def to_tokens_m(tokens):
1736
1893
  for token_p in head:
1737
1894
  current = tokens_c.get(token_p, {})
1738
1895
  is_dict = isinstance(current, dict)
1739
- if not is_dict: current = {"_" : current}
1896
+ if not is_dict:
1897
+ current = {"_": current}
1740
1898
  tokens_c[token_p] = current
1741
1899
  tokens_c = current
1742
1900
 
1743
1901
  leaf = tokens_c.get(tail, None)
1744
- if leaf and isinstance(leaf, dict): leaf["_"] = True
1745
- else: tokens_c[tail] = True
1902
+ if leaf and isinstance(leaf, dict):
1903
+ leaf["_"] = True
1904
+ else:
1905
+ tokens_c[tail] = True
1746
1906
 
1747
1907
  # returns the final map version of the token to the caller
1748
1908
  # method so that it may be used for structure verification
1749
1909
  return tokens_m
1750
1910
 
1751
- def dict_merge(first, second, override = True, recursive = False, callback = None):
1911
+
1912
+ def dict_merge(first, second, override=True, recursive=False, callback=None):
1752
1913
  """
1753
1914
  Merges two dictionaries, optionally using a deep (recursive)
1754
1915
  strategy to achieve the merge.
@@ -1781,7 +1942,8 @@ def dict_merge(first, second, override = True, recursive = False, callback = Non
1781
1942
  # in case no override exists then the order of the items is
1782
1943
  # exchanged so that the first overrides the second values
1783
1944
  # and not the exact opposite
1784
- if not override: first, second = second, first
1945
+ if not override:
1946
+ first, second = second, first
1785
1947
 
1786
1948
  # in case the recursive flag is set, must iterate over all
1787
1949
  # of the first items to try to merge any possible dictionary
@@ -1814,12 +1976,10 @@ def dict_merge(first, second, override = True, recursive = False, callback = Non
1814
1976
  # if it represents a dictionary (smart merge) then both
1815
1977
  # values are going to be merged recursively
1816
1978
  elif isinstance(value, dict) and isinstance(other, dict):
1817
- if not override: value, other = other, value
1979
+ if not override:
1980
+ value, other = other, value
1818
1981
  final[key] = dict_merge(
1819
- value,
1820
- other,
1821
- override = override,
1822
- recursive = recursive
1982
+ value, other, override=override, recursive=recursive
1823
1983
  )
1824
1984
 
1825
1985
  # otherwise the previous value is simply replaced with the
@@ -1831,7 +1991,8 @@ def dict_merge(first, second, override = True, recursive = False, callback = Non
1831
1991
  # runs the final iteration cycles around the second dictionary
1832
1992
  # values to try to set the unique second values in the final
1833
1993
  for key, value in legacy.iteritems(second):
1834
- if key in final: continue
1994
+ if key in final:
1995
+ continue
1835
1996
  final[key] = value
1836
1997
 
1837
1998
  # returns the final merged result to the caller method, this
@@ -1848,7 +2009,8 @@ def dict_merge(first, second, override = True, recursive = False, callback = Non
1848
2009
  final.update(second)
1849
2010
  return final
1850
2011
 
1851
- def deprecated(message = "Function %s is now deprecated"):
2012
+
2013
+ def deprecated(message="Function %s is now deprecated"):
1852
2014
  """
1853
2015
  Decorator that marks a certain function or method as
1854
2016
  deprecated so that whenever such function is called
@@ -1864,17 +2026,12 @@ def deprecated(message = "Function %s is now deprecated"):
1864
2026
  """
1865
2027
 
1866
2028
  def decorator(function):
1867
-
1868
2029
  name = function.__name__ if hasattr(function, "__name__") else None
1869
2030
 
1870
2031
  @functools.wraps(function)
1871
2032
  def interceptor(*args, **kwargs):
1872
2033
  warnings.simplefilter("always", DeprecationWarning)
1873
- warnings.warn(
1874
- message % name,
1875
- category = DeprecationWarning,
1876
- stacklevel = 2
1877
- )
2034
+ warnings.warn(message % name, category=DeprecationWarning, stacklevel=2)
1878
2035
  warnings.simplefilter("default", DeprecationWarning)
1879
2036
  return function(*args, **kwargs)
1880
2037
 
@@ -1882,6 +2039,7 @@ def deprecated(message = "Function %s is now deprecated"):
1882
2039
 
1883
2040
  return decorator
1884
2041
 
2042
+
1885
2043
  def cached(function):
1886
2044
  """
1887
2045
  Decorator that marks a certain function as cached meaning that
@@ -1912,43 +2070,41 @@ def cached(function):
1912
2070
  # returning the value immediately if it's cached
1913
2071
  properties = context.properties if context else None
1914
2072
  exists = name in properties if properties else False
1915
- if exists: return properties[name]
2073
+ if exists:
2074
+ return properties[name]
1916
2075
 
1917
2076
  # as no cache retrieval was possible executes the function
1918
2077
  # operation and caches the resulting value into the properties
1919
2078
  # map (in case it exists)
1920
2079
  value = function(self, *args, **kwargs)
1921
- if not properties == None: properties[name] = value
2080
+ if not properties == None:
2081
+ properties[name] = value
1922
2082
  return value
1923
2083
 
1924
2084
  return _cached
1925
2085
 
1926
- def private(function):
1927
2086
 
2087
+ def private(function):
1928
2088
  @functools.wraps(function)
1929
2089
  def _private(self, *args, **kwargs):
1930
2090
  ensure = kwargs.get("ensure", True)
1931
2091
  request = kwargs.get("request", self.request)
1932
- if ensure: ensure_login(self, request = request)
2092
+ if ensure:
2093
+ ensure_login(self, request=request)
1933
2094
  sanitize(function, kwargs)
1934
2095
  return function(self, *args, **kwargs)
1935
2096
 
1936
2097
  return _private
1937
2098
 
1938
- def ensure(token = None, context = None):
1939
2099
 
2100
+ def ensure(token=None, context=None):
1940
2101
  def decorator(function):
1941
-
1942
2102
  @functools.wraps(function)
1943
2103
  def interceptor(self, *args, **kwargs):
1944
2104
  ensure = kwargs.get("ensure", True)
1945
2105
  request = kwargs.get("request", self.request)
1946
- if ensure: ensure_login(
1947
- self,
1948
- token = token,
1949
- context = context,
1950
- request = request
1951
- )
2106
+ if ensure:
2107
+ ensure_login(self, token=token, context=context, request=request)
1952
2108
  sanitize(function, kwargs)
1953
2109
  return function(self, *args, **kwargs)
1954
2110
 
@@ -1956,8 +2112,8 @@ def ensure(token = None, context = None):
1956
2112
 
1957
2113
  return decorator
1958
2114
 
1959
- def delayed(function):
1960
2115
 
2116
+ def delayed(function):
1961
2117
  @functools.wraps(function)
1962
2118
  def _delayed(self, *args, **kwargs):
1963
2119
  _args = [self] + list(args)
@@ -1965,55 +2121,50 @@ def delayed(function):
1965
2121
 
1966
2122
  return _delayed
1967
2123
 
1968
- def route(
1969
- url,
1970
- method = "GET",
1971
- asynchronous = False,
1972
- json = False,
1973
- opts = None,
1974
- priority = 1
1975
- ):
1976
2124
 
2125
+ def route(url, method="GET", asynchronous=False, json=False, opts=None, priority=1):
1977
2126
  def decorator(function, *args, **kwargs):
1978
- if is_detached(function): delay(function, *args, **kwargs)
1979
- else: common.base().App.add_route(
1980
- method,
1981
- url,
1982
- function,
1983
- asynchronous = asynchronous,
1984
- json = json,
1985
- opts = opts,
1986
- priority = priority
1987
- )
2127
+ if is_detached(function):
2128
+ delay(function, *args, **kwargs)
2129
+ else:
2130
+ common.base().App.add_route(
2131
+ method,
2132
+ url,
2133
+ function,
2134
+ asynchronous=asynchronous,
2135
+ json=json,
2136
+ opts=opts,
2137
+ priority=priority,
2138
+ )
1988
2139
  return function
1989
2140
 
1990
2141
  def delay(function, *args, **kwargs):
1991
2142
  global CREATION_COUNTER
1992
2143
  route = (url, method, asynchronous, json, opts, priority)
1993
- if not hasattr(function, "_routes"): function._routes = []
2144
+ if not hasattr(function, "_routes"):
2145
+ function._routes = []
1994
2146
  function._routes.append(route)
1995
2147
  function.creation_counter = CREATION_COUNTER
1996
2148
  CREATION_COUNTER += 1
1997
2149
 
1998
2150
  return decorator
1999
2151
 
2000
- def error_handler(code, scope = None, json = False, opts = None, priority = 1):
2001
2152
 
2153
+ def error_handler(code, scope=None, json=False, opts=None, priority=1):
2002
2154
  def decorator(function, *args, **kwargs):
2003
- if is_detached(function): delay(function, *args, **kwargs)
2004
- else: common.base().App.add_error(
2005
- code,
2006
- function,
2007
- json = json,
2008
- opts = opts,
2009
- priority = priority
2010
- )
2155
+ if is_detached(function):
2156
+ delay(function, *args, **kwargs)
2157
+ else:
2158
+ common.base().App.add_error(
2159
+ code, function, json=json, opts=opts, priority=priority
2160
+ )
2011
2161
  return function
2012
2162
 
2013
2163
  def delay(function, *args, **kwargs):
2014
2164
  global CREATION_COUNTER
2015
2165
  error = (code, scope, json, opts, priority)
2016
- if not hasattr(function, "_errors"): function._errors = []
2166
+ if not hasattr(function, "_errors"):
2167
+ function._errors = []
2017
2168
  function._errors.append(error)
2018
2169
  function.creation_counter = CREATION_COUNTER
2019
2170
  CREATION_COUNTER += 1
@@ -2021,23 +2172,22 @@ def error_handler(code, scope = None, json = False, opts = None, priority = 1):
2021
2172
 
2022
2173
  return decorator
2023
2174
 
2024
- def exception_handler(exception, scope = None, json = False, opts = None, priority = 1):
2025
2175
 
2176
+ def exception_handler(exception, scope=None, json=False, opts=None, priority=1):
2026
2177
  def decorator(function, *args, **kwargs):
2027
- if is_detached(function): delay(function, *args, **kwargs)
2028
- else: common.base().App.add_exception(
2029
- exception,
2030
- function,
2031
- json = json,
2032
- opts = opts,
2033
- priority = priority
2034
- )
2178
+ if is_detached(function):
2179
+ delay(function, *args, **kwargs)
2180
+ else:
2181
+ common.base().App.add_exception(
2182
+ exception, function, json=json, opts=opts, priority=priority
2183
+ )
2035
2184
  return function
2036
2185
 
2037
2186
  def delay(function, *args, **kwargs):
2038
2187
  global CREATION_COUNTER
2039
2188
  _exception = (exception, scope, json, opts, priority)
2040
- if not hasattr(function, "_exceptions"): function._exceptions = []
2189
+ if not hasattr(function, "_exceptions"):
2190
+ function._exceptions = []
2041
2191
  function._exceptions.append(_exception)
2042
2192
  function.creation_counter = CREATION_COUNTER
2043
2193
  CREATION_COUNTER += 1
@@ -2045,22 +2195,22 @@ def exception_handler(exception, scope = None, json = False, opts = None, priori
2045
2195
 
2046
2196
  return decorator
2047
2197
 
2048
- def before_request(scope = "all", opts = None, priority = 1):
2049
2198
 
2199
+ def before_request(scope="all", opts=None, priority=1):
2050
2200
  def decorator(function, *args, **kwargs):
2051
- if is_detached(function): delay(function, *args, **kwargs)
2052
- else: common.base().App.add_custom(
2053
- "before_request",
2054
- function,
2055
- opts = opts,
2056
- priority = priority
2057
- )
2201
+ if is_detached(function):
2202
+ delay(function, *args, **kwargs)
2203
+ else:
2204
+ common.base().App.add_custom(
2205
+ "before_request", function, opts=opts, priority=priority
2206
+ )
2058
2207
  return function
2059
2208
 
2060
2209
  def delay(function, *args, **kwargs):
2061
2210
  global CREATION_COUNTER
2062
2211
  _custom = ("before_request", opts, priority)
2063
- if not hasattr(function, "_customs"): function._customs = []
2212
+ if not hasattr(function, "_customs"):
2213
+ function._customs = []
2064
2214
  function._customs.append(_custom)
2065
2215
  function.creation_counter = CREATION_COUNTER
2066
2216
  CREATION_COUNTER += 1
@@ -2068,22 +2218,22 @@ def before_request(scope = "all", opts = None, priority = 1):
2068
2218
 
2069
2219
  return decorator
2070
2220
 
2071
- def after_request(scope = "all", opts = None, priority = 1):
2072
2221
 
2222
+ def after_request(scope="all", opts=None, priority=1):
2073
2223
  def decorator(function, *args, **kwargs):
2074
- if is_detached(function): delay(function, *args, **kwargs)
2075
- else: common.base().App.add_custom(
2076
- "after_request",
2077
- function,
2078
- opts = opts,
2079
- priority = priority
2080
- )
2224
+ if is_detached(function):
2225
+ delay(function, *args, **kwargs)
2226
+ else:
2227
+ common.base().App.add_custom(
2228
+ "after_request", function, opts=opts, priority=priority
2229
+ )
2081
2230
  return function
2082
2231
 
2083
2232
  def delay(function, *args, **kwargs):
2084
2233
  global CREATION_COUNTER
2085
2234
  _custom = ("after_request", opts, priority)
2086
- if not hasattr(function, "_customs"): function._customs = []
2235
+ if not hasattr(function, "_customs"):
2236
+ function._customs = []
2087
2237
  function._customs.append(_custom)
2088
2238
  function.creation_counter = CREATION_COUNTER
2089
2239
  CREATION_COUNTER += 1
@@ -2091,6 +2241,7 @@ def after_request(scope = "all", opts = None, priority = 1):
2091
2241
 
2092
2242
  return decorator
2093
2243
 
2244
+
2094
2245
  def is_detached(function):
2095
2246
  """
2096
2247
  Verifies if the provided function value is considered to be
@@ -2114,112 +2265,119 @@ def is_detached(function):
2114
2265
  # verifies if the provided value is a valid function type
2115
2266
  # an in case it's not it's considered to not be a detached
2116
2267
  is_function = isinstance(function, types.FunctionType)
2117
- if not is_function: return False
2268
+ if not is_function:
2269
+ return False
2118
2270
 
2119
2271
  # retrieves the function's specification (should include arguments)
2120
2272
  # and then verifies that they are valid and that at least one valid
2121
2273
  # argument exists for the specification (as required by methods)
2122
2274
  spec = legacy.getargspec(function)
2123
- if not spec: return False
2124
- if not spec.args: return False
2275
+ if not spec:
2276
+ return False
2277
+ if not spec.args:
2278
+ return False
2125
2279
 
2126
2280
  # verifies that the name of the first argument of the function is the
2127
2281
  # the instance one, if that's the case this should be a detached method
2128
2282
  # that is currently being identified as a function
2129
2283
  return spec.args[0] == "self"
2130
2284
 
2285
+
2131
2286
  def sanitize(function, kwargs):
2132
2287
  removal = []
2133
2288
  method_a = legacy.getargspec(function)[0]
2134
2289
  for name in kwargs:
2135
- if name in method_a: continue
2290
+ if name in method_a:
2291
+ continue
2136
2292
  removal.append(name)
2137
- for name in removal: del kwargs[name]
2293
+ for name in removal:
2294
+ del kwargs[name]
2295
+
2138
2296
 
2139
- def verify(condition, message = None, code = None, exception = None, **kwargs):
2140
- if condition: return
2297
+ def verify(condition, message=None, code=None, exception=None, **kwargs):
2298
+ if condition:
2299
+ return
2141
2300
  exception = exception or exceptions.AssertionError
2142
2301
  kwargs = dict(kwargs)
2143
- if not message == None: kwargs["message"] = message
2144
- if not code == None: kwargs["code"] = code
2302
+ if not message == None:
2303
+ kwargs["message"] = message
2304
+ if not code == None:
2305
+ kwargs["code"] = code
2145
2306
  raise exception(**kwargs)
2146
2307
 
2147
- def verify_equal(first, second, message = None, code = None, exception = None, **kwargs):
2308
+
2309
+ def verify_equal(first, second, message=None, code=None, exception=None, **kwargs):
2148
2310
  message = message or "Expected %s got %s" % (repr(second), repr(first))
2149
2311
  return verify(
2150
- first == second,
2151
- message = message,
2152
- code = code,
2153
- exception = exception,
2154
- **kwargs
2312
+ first == second, message=message, code=code, exception=exception, **kwargs
2155
2313
  )
2156
2314
 
2157
- def verify_not_equal(first, second, message = None, code = None, exception = None, **kwargs):
2315
+
2316
+ def verify_not_equal(first, second, message=None, code=None, exception=None, **kwargs):
2158
2317
  message = message or "Expected %s not equal to %s" % (repr(first), repr(second))
2159
2318
  return verify(
2160
- not first == second,
2161
- message = message,
2162
- code = code,
2163
- exception = exception,
2164
- **kwargs
2319
+ not first == second, message=message, code=code, exception=exception, **kwargs
2165
2320
  )
2166
2321
 
2167
- def verify_type(value, types, null = True, message = None, code = None, exception = None, **kwargs):
2322
+
2323
+ def verify_type(
2324
+ value, types, null=True, message=None, code=None, exception=None, **kwargs
2325
+ ):
2168
2326
  message = message or "Expected %s to have type %s" % (repr(value), repr(types))
2169
2327
  return verify(
2170
2328
  (null and value == None) or isinstance(value, types),
2171
- message = message,
2172
- code = code,
2173
- exception = exception,
2329
+ message=message,
2330
+ code=code,
2331
+ exception=exception,
2174
2332
  **kwargs
2175
2333
  )
2176
2334
 
2177
- def verify_many(sequence, message = None, code = None, exception = None, **kwargs):
2335
+
2336
+ def verify_many(sequence, message=None, code=None, exception=None, **kwargs):
2178
2337
  for condition in sequence:
2179
- verify(
2180
- condition,
2181
- message = message,
2182
- code = code,
2183
- exception = exception,
2184
- **kwargs
2185
- )
2338
+ verify(condition, message=message, code=code, exception=exception, **kwargs)
2186
2339
 
2187
- def execute(args, command = None, path = None, shell = None, encoding = None):
2188
- if shell == None: shell = os.name == "nt"
2189
- if not encoding: encoding = sys.getfilesystemencoding()
2190
- if command: args = command.split(" ")
2340
+
2341
+ def execute(args, command=None, path=None, shell=None, encoding=None):
2342
+ if shell == None:
2343
+ shell = os.name == "nt"
2344
+ if not encoding:
2345
+ encoding = sys.getfilesystemencoding()
2346
+ if command:
2347
+ args = command.split(" ")
2191
2348
  process = subprocess.Popen(
2192
- args,
2193
- stdout = subprocess.PIPE,
2194
- stderr = subprocess.PIPE,
2195
- shell = shell,
2196
- cwd = path
2349
+ args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=path
2197
2350
  )
2198
2351
  code = process.wait()
2199
2352
  stdout = process.stdout.read()
2200
2353
  stderr = process.stderr.read()
2201
2354
  stdout = stdout.decode(encoding)
2202
2355
  stderr = stderr.decode(encoding)
2203
- return dict(
2204
- stdout = stdout,
2205
- stderr = stderr,
2206
- code = code
2207
- )
2356
+ return dict(stdout=stdout, stderr=stderr, code=code)
2357
+
2208
2358
 
2209
2359
  @contextlib.contextmanager
2210
- def ctx_locale(name = "", force = False):
2360
+ def ctx_locale(name="", force=False):
2211
2361
  saved = locale.setlocale(locale.LC_ALL)
2212
- if saved == name and not force: yield saved; return
2213
- try: yield locale.setlocale(locale.LC_ALL, name)
2214
- finally: locale.setlocale(locale.LC_ALL, saved)
2362
+ if saved == name and not force:
2363
+ yield saved
2364
+ return
2365
+ try:
2366
+ yield locale.setlocale(locale.LC_ALL, name)
2367
+ finally:
2368
+ locale.setlocale(locale.LC_ALL, saved)
2369
+
2215
2370
 
2216
2371
  @contextlib.contextmanager
2217
- def ctx_request(app = None):
2372
+ def ctx_request(app=None):
2218
2373
  app = app or common.base().get_app()
2219
2374
  _request = app._request
2220
2375
  app._request = app._mock
2221
- try: yield True
2222
- finally: app._request = _request
2376
+ try:
2377
+ yield True
2378
+ finally:
2379
+ app._request = _request
2380
+
2223
2381
 
2224
2382
  class FileTuple(tuple):
2225
2383
  """
@@ -2238,39 +2396,45 @@ class FileTuple(tuple):
2238
2396
  self._position = 0
2239
2397
 
2240
2398
  @classmethod
2241
- def from_data(cls, data, name = None, mime = None):
2399
+ def from_data(cls, data, name=None, mime=None):
2242
2400
  file_tuple = cls((name, mime, data))
2243
2401
  return file_tuple
2244
2402
 
2245
2403
  @classmethod
2246
- def from_file(cls, file, name = None, mime = None):
2404
+ def from_file(cls, file, name=None, mime=None):
2247
2405
  data = file.read()
2248
- file_tuple = cls.from_data(data, name = name, mime = mime)
2406
+ file_tuple = cls.from_data(data, name=name, mime=mime)
2249
2407
  return file_tuple
2250
2408
 
2251
2409
  @classmethod
2252
- def from_path(cls, path, name = None, mime = None, guess = True):
2410
+ def from_path(cls, path, name=None, mime=None, guess=True):
2253
2411
  mime = cls.guess(path) if mime == None and guess else mime
2254
2412
  file = open(path, "rb")
2255
- try: file_tuple = cls.from_file(file, name = name, mime = mime)
2256
- finally: file.close()
2413
+ try:
2414
+ file_tuple = cls.from_file(file, name=name, mime=mime)
2415
+ finally:
2416
+ file.close()
2257
2417
  return file_tuple
2258
2418
 
2259
2419
  @classmethod
2260
2420
  def guess(cls, name):
2261
- mime = mimetypes.guess_type(name, strict = False)[0]
2262
- if mime: return mime
2421
+ mime = mimetypes.guess_type(name, strict=False)[0]
2422
+ if mime:
2423
+ return mime
2263
2424
  return None
2264
2425
 
2265
- def read(self, count = None):
2426
+ def read(self, count=None):
2266
2427
  data, data_l = self[2], len(self[2])
2267
- if not count and self._position == 0: data, offset = data, data_l
2268
- elif not count: data, offset = data[self._position:], data_l - self._position
2269
- else: data, offset = data[self._position:self._position + count], count
2428
+ if not count and self._position == 0:
2429
+ data, offset = data, data_l
2430
+ elif not count:
2431
+ data, offset = data[self._position :], data_l - self._position
2432
+ else:
2433
+ data, offset = data[self._position : self._position + count], count
2270
2434
  self._position += offset
2271
2435
  return data
2272
2436
 
2273
- def seek(self, offset, whence = os.SEEK_SET):
2437
+ def seek(self, offset, whence=os.SEEK_SET):
2274
2438
  if whence == os.SEEK_SET:
2275
2439
  self._position = offset
2276
2440
  if whence == os.SEEK_CUR:
@@ -2281,14 +2445,17 @@ class FileTuple(tuple):
2281
2445
  def tell(self):
2282
2446
  return self._position
2283
2447
 
2284
- def save(self, path, close = True):
2448
+ def save(self, path, close=True):
2285
2449
  contents = self[2]
2286
- if legacy.is_string(path): file = open(path, "wb")
2287
- else: file = path
2450
+ if legacy.is_string(path):
2451
+ file = open(path, "wb")
2452
+ else:
2453
+ file = path
2288
2454
  try:
2289
2455
  file.write(contents)
2290
2456
  finally:
2291
- if close: file.close()
2457
+ if close:
2458
+ file.close()
2292
2459
 
2293
2460
  def seekable(self):
2294
2461
  return True
@@ -2305,6 +2472,7 @@ class FileTuple(tuple):
2305
2472
  def data(self):
2306
2473
  return self[2]
2307
2474
 
2475
+
2308
2476
  class BaseThread(threading.Thread):
2309
2477
  """
2310
2478
  The top level thread class that is meant to encapsulate
@@ -2314,24 +2482,27 @@ class BaseThread(threading.Thread):
2314
2482
  a main thread to continue with execution logic.
2315
2483
  """
2316
2484
 
2317
- def __init__(self, owner = None, daemon = False, *args, **kwargs):
2485
+ def __init__(self, owner=None, daemon=False, *args, **kwargs):
2318
2486
  threading.Thread.__init__(self, *args, **kwargs)
2319
2487
  self.owner = owner
2320
2488
  self.daemon = daemon
2321
2489
 
2322
2490
  def run(self):
2323
2491
  threading.Thread.run(self)
2324
- if not self.owner: return
2492
+ if not self.owner:
2493
+ return
2325
2494
  self.owner.start()
2326
2495
  self.owner = None
2327
2496
 
2328
- class JSONEncoder(json.JSONEncoder):
2329
2497
 
2498
+ class JSONEncoder(json.JSONEncoder):
2330
2499
  def __init__(self, *args, **kwargs):
2331
2500
  self.permissive = kwargs.pop("permissive", True)
2332
2501
  json.JSONEncoder.__init__(self, *args, **kwargs)
2333
2502
 
2334
2503
  def default(self, obj, **kwargs):
2335
- if hasattr(obj, "json_v"): return obj.json_v()
2336
- if self.permissive: return str(obj)
2504
+ if hasattr(obj, "json_v"):
2505
+ return obj.json_v()
2506
+ if self.permissive:
2507
+ return str(obj)
2337
2508
  return json.JSONEncoder.default(self, obj, **kwargs)