appier 1.34.6__py2.py3-none-any.whl → 1.34.8__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
appier/util.py CHANGED
@@ -1,2503 +1,2517 @@
1
- #!/usr/bin/python
2
- # -*- coding: utf-8 -*-
3
-
4
- # Hive Appier Framework
5
- # Copyright (c) 2008-2024 Hive Solutions Lda.
6
- #
7
- # This file is part of Hive Appier Framework.
8
- #
9
- # Hive Appier Framework is free software: you can redistribute it and/or modify
10
- # it under the terms of the Apache License as published by the Apache
11
- # Foundation, either version 2.0 of the License, or (at your option) any
12
- # later version.
13
- #
14
- # Hive Appier Framework is distributed in the hope that it will be useful,
15
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
16
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
- # Apache License for more details.
18
- #
19
- # You should have received a copy of the Apache License along with
20
- # Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
21
-
22
- __author__ = "João Magalhães <joamag@hive.pt>"
23
- """ The author(s) of the module """
24
-
25
- __copyright__ = "Copyright (c) 2008-2024 Hive Solutions Lda."
26
- """ The copyright for the module """
27
-
28
- __license__ = "Apache License, Version 2.0"
29
- """ The license for the module """
30
-
31
- import os
32
- import re
33
- import sys
34
- import json
35
- import copy
36
- import uuid
37
- import types
38
- import locale
39
- import hashlib
40
- import calendar
41
- import datetime
42
- import warnings
43
- import functools
44
- import threading
45
- import mimetypes
46
- import contextlib
47
- import subprocess
48
- import multiprocessing
49
-
50
- from . import smtp
51
- from . import config
52
- from . import legacy
53
- from . import common
54
- from . import defines
55
- from . import exceptions
56
-
57
- CREATION_COUNTER = 0
58
- """ The global static creation counter value that
59
- will be used to create an order in the declaration
60
- of attributes for a set of classes """
61
-
62
- FIRST_CAP_REGEX = re.compile(r"(.)([A-Z][a-z]+)")
63
- """ Regular expression that ensures that the first
64
- token of each camel string is properly capitalized """
65
-
66
- ALL_CAP_REGEX = re.compile(r"([a-z0-9])([A-Z])")
67
- """ The generalized transition from lower case to
68
- upper case letter regex that will provide a way of
69
- putting the underscore in the middle of the transition """
70
-
71
- SORT_MAP = {"1": 1, "-1": -1, "ascending": 1, "descending": -1}
72
- """ The map associating the normalized (text) way of
73
- representing sorting with the current infra-structure
74
- number way of representing the same information """
75
-
76
- SEQUENCE_TYPES = (list, tuple)
77
- """ The sequence defining the various types that are
78
- considered to be sequence based for python """
79
-
80
- defines = defines
81
-
82
-
83
- def to_limit(limit_s):
84
- limit = int(limit_s)
85
- if limit < 0:
86
- return 0
87
- return limit
88
-
89
-
90
- def to_find(find_s):
91
- if not find_s:
92
- return []
93
- find_t = type(find_s)
94
- if find_t == list:
95
- return find_s
96
- return [find_s]
97
-
98
-
99
- def to_sort(sort_s):
100
- sort_l = []
101
- sorts = sort_s.split(",")
102
- for sort_i in sorts:
103
- values = sort_i.split(":", 1)
104
- if len(values) == 1:
105
- values.append("descending")
106
- name, direction = values
107
- if name == "default":
108
- return None
109
- values[1] = SORT_MAP.get(direction, 1)
110
- sort_l.append(tuple(values))
111
- return sort_l
112
-
113
-
114
- ALIAS = {
115
- "context": "find_d",
116
- "filters": "find_d",
117
- "filters[]": "find_d",
118
- "filter_def": "find_d",
119
- "filter_string": "find_s",
120
- "filter_name": "find_n",
121
- "filter_operator": "find_o",
122
- "insensitive": "find_i",
123
- "order": "sort",
124
- "offset": "skip",
125
- "start_record": "skip",
126
- "number_records": "limit",
127
- }
128
- """ The map containing the various attribute alias
129
- between the normalized manned and the appier manner """
130
-
131
- FIND_TYPES = dict(
132
- skip=int,
133
- limit=to_limit,
134
- find_s=legacy.UNICODE,
135
- find_d=to_find,
136
- find_i=bool,
137
- find_t=legacy.UNICODE,
138
- find_n=legacy.UNICODE,
139
- find_o=legacy.UNICODE,
140
- sort=to_sort,
141
- meta=bool,
142
- fields=list,
143
- )
144
- """ The map associating the various find fields with
145
- their respective types, note that in case a special
146
- conversion operation is required the associated value
147
- may represent a conversion function instead """
148
-
149
- FIND_DEFAULTS = dict(limit=10)
150
- """ The map that defines the various default values
151
- for a series of find related attributes """
152
-
153
-
154
- def is_iterable(object):
155
- """
156
- Verifies if the provided object (value) is iterable
157
- meaning that the type of it is listed in a list of
158
- sequence based data types.
159
-
160
- :type object: Object
161
- :param object: The value that is going to be tested
162
- for iterable type.
163
- :rtype: bool
164
- :return: If the provided object represents an iterable
165
- object meaning that it belongs to sequence type.
166
- """
167
-
168
- return isinstance(object, defines.ITERABLES)
169
-
170
-
171
- def is_mobile(user_agent):
172
- """
173
- Verifies if the provided user agent string represents a
174
- mobile agent, for that a series of regular expressions
175
- are matched against the user agent string.
176
-
177
- :type user_agent: String
178
- :param user_agent: The string containing the user agent
179
- value that is going to be verified against a series of
180
- regular expressions for mobile verification.
181
- :rtype: bool
182
- :return: If the provided user agent string represents a
183
- mobile browser or a regular (desktop) one.
184
- """
185
-
186
- if not user_agent:
187
- return False
188
- prefix = user_agent[:4]
189
- mobile = defines.MOBILE_REGEX.search(user_agent)
190
- mobile_prefix = defines.MOBILE_PREFIX_REGEX.search(prefix)
191
- is_mobile = True if mobile or mobile_prefix else False
192
- return is_mobile
193
-
194
-
195
- def is_tablet(user_agent):
196
- """
197
- Verifies if the provided user agent string represents a
198
- tablet agent, for that a series of regular expressions
199
- are matched against the user agent string.
200
-
201
- :type user_agent: String
202
- :param user_agent: The string containing the user agent
203
- value that is going to be verified against a series of
204
- regular expressions for tablet verification.
205
- :rtype: bool
206
- :return: If the provided user agent string represents a
207
- tablet browser or a regular (desktop) one.
208
- """
209
-
210
- if not user_agent:
211
- return False
212
- prefix = user_agent[:4]
213
- tablet = defines.TABLET_REGEX.search(user_agent)
214
- mobile_prefix = defines.MOBILE_PREFIX_REGEX.search(prefix)
215
- is_tablet = True if tablet or mobile_prefix else False
216
- return is_tablet
217
-
218
-
219
- def is_browser(user_agent):
220
- """
221
- Verifies if the provided user agent string represents a
222
- browser (interactive) agent, for that a series of verifications
223
- are going to be performed against the user agent string.
224
-
225
- :type user_agent: String
226
- :param user_agent: The string containing the user agent
227
- value that is going to be verified for browser presence.
228
- :rtype: bool
229
- :return: If the provided user agent string represents an
230
- interactive browser or not.
231
- """
232
-
233
- info = browser_info(user_agent)
234
- if not info:
235
- return False
236
- interactive = info.get("interactive", False)
237
- if not interactive:
238
- return False
239
- return True
240
-
241
-
242
- def is_bot(user_agent):
243
- """
244
- Verifies if the provided user agent string represents a
245
- bot (automated) agent, for that a series of verifications
246
- are going to be performed against the user agent string.
247
-
248
- :type user_agent: String
249
- :param user_agent: The string containing the user agent
250
- value that is going to be verified for bot presence.
251
- :rtype: bool
252
- :return: If the provided user agent string represents an
253
- automated bot or not.
254
- """
255
-
256
- info = browser_info(user_agent=user_agent)
257
- if not info:
258
- return False
259
- bot = info.get("bot", False)
260
- if not bot:
261
- return False
262
- return True
263
-
264
-
265
- def browser_info(user_agent):
266
- """
267
- Retrieves a dictionary containing information about the browser
268
- and the operative system associated with the provided user agent.
269
-
270
- The retrieval of the information depends on the kind of user
271
- agent string provided, as coverage is limited.
272
-
273
- :type user_agent: String
274
- :param user_agent: The HTTP based user agent string to be processed.
275
- :rtype: Dictionary
276
- :return: The dictionary/map containing the information processed from
277
- the provided user agent.
278
- """
279
-
280
- if not user_agent:
281
- return None
282
-
283
- info = dict()
284
-
285
- for browser_i in defines.BROWSER_INFO:
286
- identity = browser_i["identity"]
287
- sub_string = browser_i.get("sub_string", identity)
288
- version_search = browser_i.get("version_search", sub_string + "/")
289
- interactive = browser_i.get("interactive", True)
290
- bot = browser_i.get("bot", False)
291
-
292
- if not sub_string in user_agent:
293
- continue
294
- if not version_search in user_agent:
295
- continue
296
-
297
- version_i = user_agent.index(version_search) + len(version_search)
298
- version = user_agent[version_i:].split(" ", 1)[0].strip(" ;")
299
- try:
300
- version_f = float(".".join(version.split(".")[:2]))
301
- except ValueError:
302
- version_f = 0.0
303
- try:
304
- version_i = int(version_f)
305
- except ValueError:
306
- version_f = 0
307
-
308
- info.update(
309
- name=identity,
310
- version=version,
311
- version_f=version_f,
312
- version_i=version_i,
313
- interactive=interactive,
314
- bot=bot,
315
- )
316
- break
317
-
318
- for os_i in defines.OS_INFO:
319
- identity = os_i["identity"]
320
- sub_string = os_i.get("sub_string", identity)
321
-
322
- if not sub_string in user_agent:
323
- continue
324
-
325
- info.update(os=identity)
326
- break
327
-
328
- return info if info else None
329
-
330
-
331
- def email_parts(base, strip=True):
332
- """
333
- Unpacks the complete set of parts (name and email) from the
334
- provided generalized email string. The provided string may
335
- be a single email or the more complex form (eg: Name <email>).
336
-
337
- Note that the provided base argument may be a single string
338
- or a sequence of strings and the returning type will reflect
339
- that same provided parameter.
340
-
341
- :type base: String/List
342
- :param base: The base value that is going to be parsed as an
343
- email string or a sequence of such values.
344
- :type strip: bool
345
- :param strip: If the provided base value should be stripped
346
- of any extra space characters before processing.
347
- :rtype: Tuple/List
348
- :return: The resulting parsed tuple/tuples for the provided
349
- email strings, these tuples contain name and emails for each
350
- of the parsed values.
351
- """
352
-
353
- base_t = type(base)
354
- if base_t in SEQUENCE_TYPES:
355
- return [email_parts(base, strip=strip) for base in base]
356
-
357
- if not base:
358
- return (None, None)
359
- if strip:
360
- base = base.strip()
361
-
362
- match = defines.EMAIL_REGEX.match(base)
363
- if not match:
364
- return (None, None)
365
-
366
- email = match.group("email_a") or match.group("email_b")
367
- name = match.group("name") or email
368
-
369
- return (name, email)
370
-
371
-
372
- def email_mime(base, encoding="utf-8"):
373
- if legacy.PYTHON_3:
374
- encoding = None
375
-
376
- base_t = type(base)
377
- if base_t in SEQUENCE_TYPES:
378
- return [
379
- value
380
- for value in (email_mime(item, encoding=encoding) for item in base)
381
- if value
382
- ]
383
-
384
- name, email = email_parts(base)
385
- if not name or not email:
386
- return None
387
-
388
- name = smtp.header(name, encoding=encoding)
389
-
390
- return "%s <%s>" % (name, email)
391
-
392
-
393
- def email_name(base):
394
- base_t = type(base)
395
- if base_t in SEQUENCE_TYPES:
396
- return [
397
- value
398
- for value in (email_name(base) for base in base if email_name(base))
399
- if value
400
- ]
401
- name, _email = email_parts(base)
402
- return name
403
-
404
-
405
- def email_base(base):
406
- base_t = type(base)
407
- if base_t in SEQUENCE_TYPES:
408
- return [
409
- value
410
- for value in (email_base(base) for base in base if email_base(base))
411
- if value
412
- ]
413
- _name, email = email_parts(base)
414
- return email
415
-
416
-
417
- def date_to_timestamp(value, format="%d/%m/%Y"):
418
- if not value:
419
- return None
420
- try:
421
- value = datetime.datetime.strptime(value, format)
422
- except Exception:
423
- return None
424
- value = value.utctimetuple()
425
- return calendar.timegm(value)
426
-
427
-
428
- def obfuscate(value, display_l=3, token="*"):
429
- value_l = len(value)
430
- display_l = min([value_l, display_l])
431
- obfuscated = value[:display_l] + ((value_l - display_l) * token)
432
- return obfuscated
433
-
434
-
435
- def import_pip(name, package=None, default=None):
436
- package = package or name
437
- try:
438
- module = __import__(name)
439
- except ImportError:
440
- try:
441
- module = install_pip_s(package)
442
- except Exception:
443
- return default
444
- try:
445
- module = __import__(name)
446
- except ImportError:
447
- return default
448
- return module
449
-
450
-
451
- def ensure_pip(name, package=None, delayed=False):
452
- package = package or name
453
- try:
454
- __import__(name)
455
- except ImportError:
456
- install_pip_s(package, delayed=delayed)
457
-
458
-
459
- def install_pip(package, delayed=False, isolated=True, user=None):
460
- try:
461
- import pip
462
-
463
- pip_internal = pip
464
- finally:
465
- pass
466
- try:
467
- import pip._internal
468
-
469
- pip_internal = pip._internal
470
- except ImportError:
471
- pass
472
- try:
473
- import pip._internal.main
474
-
475
- pip_internal = pip._internal.main
476
- except ImportError:
477
- pass
478
- user = config.conf("PIP_USER", False, cast=bool)
479
- args = ["install", package]
480
- if hasattr(pip_internal, "main"):
481
- pip_main = pip_internal.main
482
- elif hasattr(pip, "main"):
483
- pip_main = pip.main # @UndefinedVariable
484
- else:
485
- raise exceptions.OperationalError(message="pip not found")
486
- if user:
487
- args.insert(1, "--user")
488
- if delayed:
489
- process = multiprocessing.Process(target=pip_main, args=(args,))
490
- process.start()
491
- result = 0
492
- elif isolated:
493
- process = multiprocessing.Process(target=pip_main, args=(args,))
494
- process.start()
495
- process.join()
496
- result = process.exitcode
497
- else:
498
- result = pip_main(args)
499
- if result == 0:
500
- return
501
- raise exceptions.OperationalError(message="pip error, exit code (%d)" % result)
502
-
503
-
504
- def install_pip_s(package, delayed=False):
505
- try:
506
- install_pip(package, delayed=delayed, user=False)
507
- except exceptions.OperationalError:
508
- install_pip(package, delayed=delayed, user=True)
509
-
510
-
511
- def request_json(request=None, encoding="utf-8"):
512
- # retrieves the proper request object, either the provided
513
- # request or the default base request object and then in
514
- # case the the JSON data is already in the request properties
515
- # it is used (cached value) otherwise continues with the parse
516
- request = request or common.base().get_request()
517
- if "_data_j" in request.properties:
518
- return request.properties["_data_j"]
519
-
520
- # retrieves the current request data and tries to
521
- # "load" it as JSON data, in case it fails gracefully
522
- # handles the failure setting the value as an empty map
523
- data = request.data
524
- try:
525
- is_bytes = legacy.is_bytes(data)
526
- if is_bytes:
527
- data = data.decode(encoding)
528
- data_j = json.loads(data)
529
- except Exception:
530
- data_j = {}
531
- request.properties["_data_j"] = data_j
532
-
533
- # returns the JSON data object to the caller method so that it
534
- # may be used as the parsed value (post information)
535
- return data_j
536
-
537
-
538
- def get_context(self):
539
- """
540
- Retrieves the "best" possible context object for the current
541
- execution life-cycle, typically this should be an "attached"
542
- request object.
543
-
544
- Multiple strategies should be used while trying to retrieved
545
- the "current" context.
546
- """
547
-
548
- # tries to retrieve the request attached to the current instance
549
- # (typically a property) and verifies the object compliance,
550
- # returning the object to the caller in case it's valid
551
- if hasattr(self, "request"):
552
- request = self.request
553
- is_valid = hasattr(request, "is_mock") and not request.is_mock()
554
- if is_valid:
555
- return request
556
-
557
- # uses the global strategy to try to retrieve a request for the
558
- # current execution environment (not thread safe)
559
- request = common.base().get_request()
560
- is_valid = hasattr(request, "is_mock") and not request.is_mock()
561
- if is_valid:
562
- return request
563
-
564
- # fallback return value meaning that it was not possible to retrieve
565
- # any valid execution context for the current environment
566
- return None
567
-
568
-
569
- def get_object(object=None, alias=False, page=False, find=False, norm=True, **kwargs):
570
- # retrieves the base request object that is going to be used in
571
- # the construction of the object
572
- request = common.base().get_request()
573
-
574
- # verifies if the provided object is valid in such case creates
575
- # a copy of it and uses it as the base object for validation
576
- # otherwise used an empty map (form validation)
577
- object = object and copy.copy(object) or {}
578
-
579
- # retrieves the current request data and tries to
580
- # "load" it as JSON data, in case it fails gracefully
581
- # handles the failure setting the value as an empty map
582
- data_j = request_json()
583
-
584
- # uses all the values referencing data in the request to try
585
- # to populate the object this way it may be constructed using
586
- # any of theses strategies (easier for the developer)
587
- for name, value in data_j.items():
588
- object[name] = value
589
- for name, value in request.files_s.items():
590
- object[name] = value
591
- for name, value in request.post_s.items():
592
- object[name] = value
593
- for name, value in request.params_s.items():
594
- object[name] = value
595
-
596
- # in case the alias flag is set tries to resolve the attribute
597
- # alias and in case the find types are set converts the find
598
- # based attributes using the currently defined mapping map
599
- if alias:
600
- resolve_alias(object)
601
- if page:
602
- page_types(object)
603
- if find:
604
- find_types(object)
605
- if find:
606
- find_defaults(object, kwargs)
607
-
608
- # in case the normalization flag is set runs the normalization
609
- # of the provided object so that sequences are properly handled
610
- # as defined in the specification (this allows multiple references)
611
- if norm:
612
- norm_object(object)
613
-
614
- # returns the constructed object to the caller method this object
615
- # should be a structured representation of the data in the request
616
- return object
617
-
618
-
619
- def resolve_alias(object):
620
- for name, value in legacy.eager(object.items()):
621
- if not name in ALIAS:
622
- continue
623
- _alias = ALIAS[name]
624
- object[_alias] = value
625
- del object[name]
626
-
627
-
628
- def page_types(object, size=50):
629
- page = object.get("page", 1)
630
- size = object.get("size", size)
631
- sorter = object.get("sorter", None)
632
- direction = object.get("direction", "descending")
633
- page = int(page)
634
- size = int(size)
635
- offset = page - 1
636
- object["skip"] = offset * size
637
- object["limit"] = size
638
- if sorter:
639
- object["sort"] = "%s:%s" % (sorter, direction)
640
-
641
-
642
- def find_types(object):
643
- for name, value in legacy.eager(object.items()):
644
- if not name in FIND_TYPES:
645
- del object[name]
646
- continue
647
- find_type = FIND_TYPES[name]
648
- object[name] = find_type(value)
649
-
650
-
651
- def find_defaults(object, kwargs):
652
- for name, value in legacy.iteritems(kwargs):
653
- if name in object:
654
- continue
655
- if not name in FIND_TYPES:
656
- continue
657
- object[name] = value
658
-
659
- for name, value in legacy.iteritems(FIND_DEFAULTS):
660
- if name in object:
661
- continue
662
- object[name] = value
663
-
664
-
665
- def norm_object(object):
666
- # iterates over all the key value association in the
667
- # object, trying to find the ones that refer sequences
668
- # so that they may be normalized
669
- for name, value in object.items():
670
- # verifies if the current name references a sequence
671
- # and if that's not the case continues the loop trying
672
- # to find any other sequence based value
673
- if not name.endswith("[]"):
674
- continue
675
-
676
- # removes the current reference to the name as the value
677
- # is not in the valid structure and then normalizes the
678
- # name by removing the extra sequence indication value
679
- del object[name]
680
- name = name[:-2]
681
-
682
- # in case the current value is not valid (empty) the object
683
- # is set with an empty list for the current iteration as this
684
- # is considered to be the default value
685
- if not value:
686
- object[name] = []
687
- continue
688
-
689
- # retrieves the normalized and linearized list of leafs
690
- # for the current value and ten verifies the size of each
691
- # of its values and uses it to measure the number of
692
- # dictionary elements that are going to be contained in
693
- # the sequence to be "generated", then uses this (size)
694
- # value to pre-generate the complete set of dictionaries
695
- leafs_l = leafs(value)
696
- first = leafs_l[0] if leafs_l else (None, [])
697
- _fqn, values = first
698
- size = len(values)
699
- list = [dict() for _index in range(size)]
700
-
701
- # sets the list of generates dictionaries in the object for
702
- # the newly normalized name of structure
703
- object[name] = list
704
-
705
- # iterates over the complete set of key value pairs in the
706
- # leafs list to gather the value into the various objects that
707
- # are contained in the sequence (normalization process)
708
- for _name, _value in leafs_l:
709
- for index in range(size):
710
- _object = list[index]
711
- _name_l = _name.split(".")
712
- set_object(_object, _name_l, _value[index])
713
-
714
-
715
- def set_object(object, name_l, value):
716
- """
717
- Sets a composite value in an object, allowing for
718
- dynamic setting of random size key values.
719
-
720
- This method is useful for situations where one wants
721
- to set a value at a randomly defined depth inside
722
- an object without having to much work with the creation
723
- of the inner dictionaries.
724
-
725
- :type object: Dictionary
726
- :param object: The target object that is going to be
727
- changed and set with the target value.
728
- :type name_l: List
729
- :param name_l: The list of names that defined the fully
730
- qualified name to be used in the setting of the value
731
- for example path.to.end will be a three size list containing
732
- each of the partial names.
733
- :type value: Object
734
- :param value: The value that is going to be set in the
735
- defined target of the object.
736
- """
737
-
738
- # retrieves the first name in the names list this is the
739
- # value that is going to be used for the current iteration
740
- name = name_l[0]
741
-
742
- # in case the length of the current names list has reached
743
- # one this is the final iteration and so the value is set
744
- # at the current naming point
745
- if len(name_l) == 1:
746
- object[name] = value
747
-
748
- # otherwise this is a "normal" step and so a new map must
749
- # be created/retrieved and the iteration step should be
750
- # performed on this new map as it's set on the current naming
751
- # place (recursion step)
752
- else:
753
- map = object.get(name, {})
754
- object[name] = map
755
- set_object(map, name_l[1:], value)
756
-
757
-
758
- def leafs(object):
759
- """
760
- Retrieves a list containing a series of tuples that
761
- each represent a leaf of the current object structure.
762
-
763
- A leaf is the last element of an object that is not a
764
- map, the other intermediary maps are considered to be
765
- trunks and should be percolated recursively.
766
-
767
- This is a recursive function that takes some memory for
768
- the construction of the list, and so should be used with
769
- the proper care to avoid bottlenecks.
770
-
771
- :type object: Dictionary
772
- :param object: The object for which the leafs list
773
- structure is meant to be retrieved.
774
- :rtype: List
775
- :return: The list of leaf node tuples for the provided
776
- object, as requested for each of the sequences.
777
- """
778
-
779
- # creates the list that will hold the various leaf nodes
780
- # "gathered" by the current recursion function
781
- leafs_l = []
782
-
783
- # iterates over all the key and value relations in the
784
- # object trying to find the leaf nodes (no map nodes)
785
- # creating a tuple of fqn (fully qualified name) and value
786
- for name, value in object.items():
787
- # retrieves the data type for the current value and
788
- # validation if it is a dictionary or any other type
789
- # in case it's a dictionary a new iteration step must
790
- # be performed retrieving the leafs of the value and
791
- # then incrementing the name with the current prefix
792
- value_t = type(value)
793
- if value_t == dict:
794
- _leafs = leafs(value)
795
- _leafs = [(name + "." + _name, value) for _name, value in _leafs]
796
- leafs_l.extend(_leafs)
797
-
798
- # otherwise this is a leaf node and so the leaf tuple
799
- # node must be constructed with the current value
800
- # (properly validated for sequence presence)
801
- else:
802
- value_t = type(value)
803
- if not value_t == list:
804
- value = [value]
805
- leafs_l.append((name, value))
806
-
807
- # returns the list of leaf nodes that was "just" created
808
- # to the caller method so that it may be used there
809
- return leafs_l
810
-
811
-
812
- def gather_errors(lazy_dict, resolve=True):
813
- """
814
- Function responsible for the iterative gathering of
815
- lazy evaluation errors, allowing for a complete gathering
816
- of error instead of a single evaluation.
817
-
818
- :type lazy_dict: LazyDict
819
- :param lazy_dict: The lazy dictionary that is going to be
820
- percolated and evaluated sequentially.
821
- :type resolve: bool
822
- :param resolve: If the lazy dictionary values should be evaluated
823
- even if they have already been eager loaded, by unsetting this value
824
- there's a risk of not gathering all of the errors.
825
- :rtype: Dictionary
826
- :return: The final dictionary containing the complete set of
827
- errors that have been found.
828
- """
829
-
830
- # creates the dictionary that is going to hold sequences of
831
- # string based error indexed by parameter name
832
- errors = dict()
833
-
834
- # iterates over the complete set of keys in the lazy dictionary
835
- # to evaluate the values and check if there are errors associated
836
- for key in lazy_dict:
837
- try:
838
- _value = lazy_dict.__getitem__(key, resolve=resolve)
839
- except (exceptions.AppierException, exceptions.BaseInternalError) as exception:
840
- _errors = errors.get(key, [])
841
- _errors.append(exception.message)
842
- errors[key] = _errors
843
-
844
- # returns the final dictionary of error (indexed by name) to
845
- # the caller method so that it may be used for error handling
846
- return errors
847
-
848
-
849
- def gen_token(limit=None, hash=hashlib.sha256):
850
- """
851
- Generates a random cryptographic ready token according
852
- to the framework specification, this is generated using
853
- a truly random UUID based seed and hashed using the
854
- provided hash digest strategy (SHA256 by default).
855
-
856
- The resulting value is returned as an hexadecimal based
857
- string according to the standard.
858
-
859
- :type limit: int
860
- :param limit: The maximum number of characters allowed
861
- for the token to be generated.
862
- :type hash: Function
863
- :param hash: The hashing method that is going to be used
864
- for the hash of the generated token, this should be compliant
865
- with the base python hashing infra-structure.
866
- :rtype: String
867
- :return: The hexadecimal based string value
868
- """
869
-
870
- token_s = str(uuid.uuid4())
871
- token_s = token_s.encode("utf-8")
872
- token = hash(token_s).hexdigest()
873
- if limit:
874
- token = token[:limit]
875
- return token
876
-
877
-
878
- def html_to_text(data):
879
- """
880
- Converts the provided HTML textual data into a plain text
881
- representation of it. This method uses a series of heuristics
882
- for this conversion, and such conversion should not be considered
883
- to be completely reliable.
884
-
885
- The current implementation is not memory or processor efficient
886
- and should be used carefully to avoid performance problems.
887
-
888
- :type data: String
889
- :param data: The HTML string of text that is going to be used for
890
- the conversion into the plain text representation.
891
- :rtype: String
892
- :return: The approximate plain text representation to the provided
893
- HTML contents.
894
- """
895
-
896
- data = data.strip()
897
- data = data.replace("\n", "\r")
898
-
899
- data = data.replace("&copy;", "Copyright")
900
- data = data.replace("&middot;", "-")
901
-
902
- result = re.findall(defines.BODY_REGEX, data)
903
- data = result[0] if result else ""
904
-
905
- data = defines.TAG_REGEX.sub("", data)
906
-
907
- valid = []
908
- lines = data.splitlines(False)
909
- for line in lines:
910
- line = line.strip()
911
- if not line:
912
- continue
913
- valid.append(line)
914
-
915
- data = "\n".join(valid)
916
- data = data.replace("\n.", ".")
917
- return data
918
-
919
-
920
- def camel_to_underscore(camel, separator="_", lower=True):
921
- """
922
- Converts the provided camel cased based value into
923
- a normalized underscore based string.
924
-
925
- An optional lower parameter may be used to avoid the case
926
- of the letters from being lower cased.
927
-
928
- This is useful as most of the python string standards
929
- are compliant with the underscore strategy.
930
-
931
- :type camel: String
932
- :param camel: The camel cased string that is going to be
933
- converted into an underscore based string.
934
- :type separator: String
935
- :param separator: The separator token that is going to
936
- be used in the camel to underscore conversion.
937
- :type lower: bool
938
- :param lower: If the letter casing should be changed while
939
- convert the value from camel to underscore.
940
- :rtype: String
941
- :return: The underscore based string resulting from the
942
- conversion of the provided camel cased one.
943
- """
944
-
945
- if not camel:
946
- return camel
947
- value = FIRST_CAP_REGEX.sub(r"\1" + separator + r"\2", camel)
948
- value = ALL_CAP_REGEX.sub(r"\1" + separator + r"\2", value)
949
- if lower:
950
- value = value.lower()
951
- return value
952
-
953
-
954
- def camel_to_readable(camel, lower=False, capitalize=False):
955
- """
956
- Converts the given camel cased oriented string value
957
- into a readable one meaning that the returned value
958
- is a set of strings separated by spaces.
959
-
960
- This method may be used to convert class names into
961
- something that is readable by an end user.
962
-
963
- :type camel: String
964
- :param camel: The camel case string value that is going
965
- to be used in the conversion into a readable string.
966
- :type lower: bool
967
- :param lower: If the camel based value should be lower
968
- cased before the conversion to readable.
969
- :type capitalize: bool
970
- :param capitalize: If all of the words should be capitalized
971
- or if instead only the first one should.
972
- :rtype: String
973
- :return: The final human readable string that may be
974
- used to display a value to an end user.
975
- """
976
-
977
- if not camel:
978
- return camel
979
- underscore = camel_to_underscore(camel, lower=lower)
980
- return underscore_to_readable(underscore, capitalize=capitalize)
981
-
982
-
983
- def underscore_to_camel(underscore, lower=False):
984
- """
985
- Converts the provided underscore cased based value into
986
- a normalized camel cased string.
987
-
988
- An optional lower parameter may be provided to obtain a
989
- lower came case version of the string.
990
-
991
- This is useful as most of the python string standards
992
- are compliant with the underscore strategy.
993
-
994
- :type underscore: String
995
- :param underscore: The underscore cased string that is going to be
996
- converted into an camel case based string.
997
- :type lower: bool
998
- :param lower: If the the first letter of the resulting camel
999
- case string should be lower case (lower camel case).
1000
- :rtype: String
1001
- :return: The camel case based string resulting from the
1002
- conversion of the provided underscore cased one.
1003
- """
1004
-
1005
- if not underscore:
1006
- return underscore
1007
- camel = underscore_to_readable(underscore, capitalize=True, separator="")
1008
- if not lower:
1009
- return camel
1010
- return camel[0].lower() + camel[1:]
1011
-
1012
-
1013
- def underscore_to_readable(underscore, capitalize=False, separator=" "):
1014
- """
1015
- Converts the given underscore oriented string value
1016
- into a readable one meaning that the returned value
1017
- is a set of strings separated by spaces.
1018
-
1019
- This method may be used to class attributes into
1020
- something that is readable by an end user.
1021
-
1022
- :type camel: String
1023
- :param camel: The underscore string value that is going
1024
- to be used in the conversion into a readable string.
1025
- :type capitalize: bool
1026
- :param capitalize: If all of the words should be capitalized
1027
- or if instead only the first one should.
1028
- :type separator: String
1029
- :param separator: The separator to be used to join the multiple
1030
- parts of the resulting readable tokens.
1031
- :rtype: String
1032
- :return: The final human readable string that may be
1033
- used to display a value to an end user.
1034
- """
1035
-
1036
- if not underscore:
1037
- return underscore
1038
- parts = underscore.split("_")
1039
- parts = [part for part in parts if part]
1040
- if capitalize:
1041
- parts = [part[0].upper() + part[1:] for part in parts]
1042
- else:
1043
- parts[0] = parts[0][0].upper() + parts[0][1:]
1044
- return separator.join(parts)
1045
-
1046
-
1047
- def quote(value, *args, **kwargs):
1048
- """
1049
- Quotes the passed value according to the defined
1050
- standard for URL escaping, the value is first encoded
1051
- into the expected UTF-8 encoding as defined by standard.
1052
-
1053
- This method should be used instead of a direct call to
1054
- the equivalent call in the URL library.
1055
-
1056
- :type value: String
1057
- :param value: The string value that is going to be quoted
1058
- according to the URL escaping scheme.
1059
- :rtype: String
1060
- :return: The quoted value according to the URL scheme this
1061
- value may be safely used in urls.
1062
- """
1063
-
1064
- is_unicode = isinstance(value, legacy.UNICODE)
1065
- if is_unicode:
1066
- value = value.encode("utf-8")
1067
- return legacy.quote(value, *args, **kwargs)
1068
-
1069
-
1070
- def unquote(value, *args, **kwargs):
1071
- """
1072
- Unquotes the provided value according to the URL scheme
1073
- the resulting value should be an unicode string representing
1074
- the same value, the intermediary string value from the decoding
1075
- should be an UTF-8 based value.
1076
-
1077
- This method should be used instead of a direct call to
1078
- the equivalent call in the URL library.
1079
-
1080
- :type value: String
1081
- :param value: The string value that is going to be unquoted
1082
- according to the URL escaping scheme.
1083
- :rtype: String
1084
- :return: The unquoted value extracted as an unicode
1085
- string that the represents the same value.
1086
- """
1087
-
1088
- value = legacy.unquote(value, *args, **kwargs)
1089
- is_bytes = isinstance(value, legacy.BYTES)
1090
- if is_bytes:
1091
- value = value.decode("utf-8")
1092
- return value
1093
-
1094
-
1095
- def escape(value, char, escape="\\"):
1096
- """
1097
- Escapes the provided string value according to the requested
1098
- target character(s) and escape value. Meaning that all the characters
1099
- are going to be replaced by the escape plus character sequence.
1100
-
1101
- :type value: String
1102
- :param value: The string that is going to have the target characters
1103
- escaped according to the escape character.
1104
- :type char: String/List/Tuple
1105
- :param char: The character(s) that is going to be "target" of escaping
1106
- or a list of characters for escaping.
1107
- :type escape: String
1108
- :param escape: The character to be used for escaping (typically '\').
1109
- :rtype: String
1110
- :return: The final string with the target character properly escaped.
1111
- """
1112
-
1113
- if not isinstance(char, (list, tuple)):
1114
- char = (char,)
1115
- value = value.replace(escape, escape + escape)
1116
- for _char in char:
1117
- value = value.replace(_char, escape + _char)
1118
- return value
1119
-
1120
-
1121
- def unescape(value, escape="\\"):
1122
- """
1123
- Unescapes the provided string value using the provided escape
1124
- character as the reference for the unescape operation.
1125
-
1126
- This is considered to be a very expensive operation and so it
1127
- should be used carefully.
1128
-
1129
- :type value: String
1130
- :param value: The string value that is going to be unescape.
1131
- :rtype: String
1132
- :return: The final unescaped value.
1133
- """
1134
-
1135
- result = []
1136
- iterator = iter(value)
1137
- for char in iterator:
1138
- if char == escape:
1139
- try:
1140
- result.append(next(iterator))
1141
- except StopIteration:
1142
- result.append(escape)
1143
- else:
1144
- result.append(char)
1145
- return "".join(result)
1146
-
1147
-
1148
- def count_unescape(value, sub, escape="\\"):
1149
- """
1150
- Runs the sub string count operation on an escaped string
1151
- so that it takes into account the escaped values avoiding
1152
- them for the count operation.
1153
-
1154
- :type value: String
1155
- :param value: The base string value to have the number of
1156
- occurrences of a sub string counted.
1157
- :type sub: String
1158
- :param sub: The sub string to be evaluated for occurrences,
1159
- this value should be constrained to strings of single character.
1160
- :type escape: String
1161
- :param escape: The "special" escape character that will allow the
1162
- delimiter to be also present in the choices selection.
1163
- :rtype: int
1164
- :return: The final count of occurrences of the sub string
1165
- taking into account the proper escaping of the string.
1166
- """
1167
-
1168
- count = 0
1169
- iterator = iter(value)
1170
- for char in iterator:
1171
- if char == escape:
1172
- try:
1173
- next(iterator)
1174
- except StopIteration:
1175
- pass
1176
- elif char == sub:
1177
- count += 1
1178
- return count
1179
-
1180
-
1181
- def split_unescape(value, delimiter=" ", max=-1, escape="\\", unescape=True):
1182
- """
1183
- Splits the provided string around the delimiter character that
1184
- has been provided and allows proper escaping of it using the
1185
- provided escape character.
1186
-
1187
- This is considered to be a very expensive operation when compared
1188
- to the simple split operation and so it should be used carefully.
1189
-
1190
- :type value: String
1191
- :param value: The string value that is going to be split around
1192
- the proper delimiter value taking into account the escaping.
1193
- :type delimiter: String
1194
- :param delimiter: The delimiter character to be used in the split
1195
- operation.
1196
- :type max: int
1197
- :param max: The maximum number of split operations that are going
1198
- to be performed by this operation.
1199
- :type escape: String
1200
- :param escape: The "special" escape character that will allow the
1201
- delimiter to be also present in the choices selection.
1202
- :type unescape: bool
1203
- :param unescape: If the final resulting string should be already
1204
- unescaped (normalized).
1205
- :rtype: List
1206
- :return: The final list containing the multiple string parts separated
1207
- by the delimiter character and respecting the escape sequences.
1208
- """
1209
-
1210
- result = []
1211
- current = []
1212
- iterator = iter(value)
1213
- count = 0
1214
- for char in iterator:
1215
- if char == escape:
1216
- try:
1217
- if not unescape:
1218
- current.append(escape)
1219
- current.append(next(iterator))
1220
- except StopIteration:
1221
- if unescape:
1222
- current.append(escape)
1223
- elif char == delimiter and not count == max:
1224
- result.append("".join(current))
1225
- current = []
1226
- count += 1
1227
- else:
1228
- current.append(char)
1229
- result.append("".join(current))
1230
- return result
1231
-
1232
-
1233
- def call_safe(callable, *args, **kwargs):
1234
- """
1235
- Method used to call a callable object using a "safe" approach,
1236
- meaning that each of its keyword arguments will be validated
1237
- for existence in the target callable definition.
1238
-
1239
- In case the validation of the keyword argument fails the same
1240
- argument is removed from the map of keyword arguments.
1241
-
1242
- Note that in case the wildcard based kwargs value exists in
1243
- the callable definition the callable is immediately considered
1244
- to be valid and the call is ran.
1245
-
1246
- :type callable: Callable
1247
- :callable callable: The callable that is going to have the keyword
1248
- based arguments validated and the get called.
1249
- :rtype: object
1250
- :return: The resulting value from the safe call of the provided
1251
- callable, this may have any data type.
1252
- """
1253
-
1254
- # retrieves the arguments specification to the provided callable
1255
- # and retrieves the various argument names and the existence or
1256
- # not of the wildcard kwargs value in the callable and in case it
1257
- # exists runs the callable call immediately
1258
- argspec = legacy.getargspec(callable)
1259
- method_args = argspec[0]
1260
- method_kwargs = argspec[2]
1261
- if method_kwargs:
1262
- return callable(*args, **kwargs)
1263
-
1264
- # iterates over the complete set of keyword based arguments to be
1265
- # used in the call and validates them against the method specification
1266
- # in case they do not exist in the specification deletes them from
1267
- # the map of keyword based arguments (not going to be sent)
1268
- for name in legacy.keys(kwargs):
1269
- if name in method_args:
1270
- continue
1271
- del kwargs[name]
1272
-
1273
- # runs the callable with the "remaining" arguments and keyword arguments
1274
- # returning the value to the caller method
1275
- return callable(*args, **kwargs)
1276
-
1277
-
1278
- def base_name(name, suffix="_controller"):
1279
- """
1280
- Retrieves the base name of a class name that contains
1281
- a suffix (eg: controller) the resulting value is the
1282
- underscore version of the name without the suffix.
1283
-
1284
- This method provides an easy way to expose class names
1285
- in external environments.
1286
-
1287
- :type name: String
1288
- :param name: The name from which the base name will be
1289
- extracted and treated.
1290
- :type suffix: String
1291
- :param suffix: The optional suffix value that if sent will
1292
- be removed from the last part of the name string.
1293
- :rtype: String
1294
- :return: The resulting base name for the provided name, treated
1295
- and with the suffix removed (in case it exists).
1296
- """
1297
-
1298
- suffix_l = len(suffix)
1299
- name = camel_to_underscore(name)
1300
- if name.endswith(suffix):
1301
- name = name[: suffix_l * -1]
1302
- return name
1303
-
1304
-
1305
- def base_name_m(name, suffixes=("_controller", "_part", "_app")):
1306
- """
1307
- Multiple iteration version of the base name function that provides
1308
- a simple strategy for the retrieval of a "base name" without the
1309
- complete set of provided suffixes attached to the value.
1310
-
1311
- :type name: String
1312
- :param name: The name from which the base name will be
1313
- extracted and treated, with multiple value strategy.
1314
- :type suffixes: List/Tuple
1315
- :param suffixes: The complete set of suffixes that are going
1316
- to be removed from the provided value creating the base name.
1317
- :rtype: String
1318
- :return: The resulting base name for the provided name, treated
1319
- and without the complete set of provided suffixes.
1320
- """
1321
-
1322
- for suffix in suffixes:
1323
- name = base_name(name, suffix=suffix)
1324
- return name
1325
-
1326
-
1327
- def is_content_type(data, target):
1328
- """
1329
- Verifies if the any of the provided mime types (target) is
1330
- valid for the provided content type string.
1331
-
1332
- :type data: String
1333
- :param data: The content type string to be parsed and matched
1334
- against the target mime type values.
1335
- :type target: Tuple/String
1336
- :param target: The tuple containing the multiple mime type values
1337
- to be verified against the content type mime strings.
1338
- :rtype: bool
1339
- :return: If any of the provided mime types is considered valid
1340
- for the content type.
1341
- """
1342
-
1343
- if not isinstance(target, (list, tuple)):
1344
- target = (target,)
1345
- mime, _extra = parse_content_type(data)
1346
- for item in target:
1347
- type, _sub_type = item.split("/")
1348
- wildcard = type + "/*"
1349
- if item in mime:
1350
- return True
1351
- if wildcard in mime:
1352
- return True
1353
- return False
1354
-
1355
-
1356
- def parse_content_type(data):
1357
- """
1358
- Parses the provided content type string retrieving both the multiple
1359
- mime types associated with the resource and the extra key to value
1360
- items associated with the string in case they are defined (it's optional).
1361
-
1362
- :type data: String
1363
- :param data: The content type data that is going to be parsed to
1364
- obtain the structure of values for the content type string, this must
1365
- be a plain unicode string and not a binary string.
1366
- :rtype: Tuple
1367
- :return: The sequence of mime types of the the content and the multiple
1368
- extra values associated with the content type (eg: charset, boundary, etc.)
1369
- """
1370
-
1371
- # creates the list of final normalized mime types and the
1372
- # dictionary to store the extra values.
1373
- types = []
1374
- extra_m = dict()
1375
-
1376
- # in case no valid type has been sent returns the values
1377
- # immediately to avoid further problems
1378
- if not data:
1379
- return types, extra_m
1380
-
1381
- # extracts the mime and the extra parts from the data string
1382
- # they are the basis of the processing method
1383
- data = data.strip(";")
1384
- parts = data.split(";")
1385
- mime = parts[0]
1386
- extra = parts[1:]
1387
- mime = mime.strip()
1388
-
1389
- # runs a series of verifications on the base mime value and in
1390
- # case it's not valid returns the default values immediately
1391
- if not "/" in mime:
1392
- return types, extra_m
1393
-
1394
- # strips the complete set of valid extra values, note
1395
- # that these values are going to be processed as key
1396
- # to value items
1397
- extra = [value.strip() for value in extra if extra]
1398
-
1399
- # splits the complete mime type into its type and sub
1400
- # type components (first step of normalization)
1401
- type, sub_type = mime.split("/", 1)
1402
- sub_types = sub_type.split("+")
1403
-
1404
- # iterates over the complete set of sub types to
1405
- # create the full mime type for each of them and
1406
- # add the new full items to the types list (normalization)
1407
- for sub_type in sub_types:
1408
- types.append(type + "/" + sub_type)
1409
-
1410
- # goes through all of the extra key to value items
1411
- # and converts them into proper dictionary values
1412
- for extra_item in extra:
1413
- if not "=" in extra_item:
1414
- continue
1415
- extra_item = extra_item.strip()
1416
- key, value = extra_item.split("=")
1417
- extra_m[key] = value
1418
-
1419
- # returns the final tuple containing both the normalized
1420
- # mime types for the content and the extra key to value items
1421
- return types, extra_m
1422
-
1423
-
1424
- def parse_cookie(data):
1425
- """
1426
- Parses/interprets the provided cookie data string, returning a
1427
- map structure containing key to value associations of the various
1428
- parts of the cookie.
1429
-
1430
- In case no key value association exists for the cookie the value
1431
- for such cookie (key) is stored and an empty string (unset).
1432
-
1433
- :type data: String
1434
- :param data: The cookie serialized data that is going to be parsed
1435
- in order to create the final cookie dictionary/map.
1436
- :rtype: Dictionary
1437
- :return: The final map containing key the value association for the
1438
- various parts of the provided cookie string.
1439
- """
1440
-
1441
- # creates the dictionary that is going to hold the various cookie
1442
- # key to value associations parsed from the "raw" data
1443
- cookie_m = dict()
1444
-
1445
- # splits the data information around the proper cookie separator
1446
- # and then iterates over each of the cookies to set them in the
1447
- # final cookie map (with the key to value associations)
1448
- cookies = [cookie.strip() for cookie in data.split(";")]
1449
- for cookie in cookies:
1450
- if not "=" in cookie:
1451
- cookie += "="
1452
- name, value = cookie.split("=", 1)
1453
- cookie_m[name] = value
1454
-
1455
- # returns the final map of cookies to the caller method so that
1456
- # proper and easy access is possible to the cookie
1457
- return cookie_m
1458
-
1459
-
1460
- def parse_multipart(data, boundary):
1461
- """
1462
- Parses the provided data buffer as a set of multipart data
1463
- the content type is not verified inside this method.
1464
-
1465
- The function returns a tuple containing both a map of "basic"
1466
- form parameters, a map containing the set of file tuples and
1467
- a sequence containing the name and values tuples in order.
1468
-
1469
- :type data: String
1470
- :param data: The string containing the complete set of data
1471
- that is going to be processed as multipart.
1472
- :type boundary: String
1473
- :param boundary: The string containing the basic boundary header
1474
- value, should be provided from the caller function.
1475
- :rtype: Tuple
1476
- :return: A tuple containing both the map of post attributes,
1477
- the map of file attributes and a list with the various name and
1478
- value tuples (to be able to access ordered values).
1479
- """
1480
-
1481
- ordered = []
1482
- ordered_m = dict()
1483
- post = dict()
1484
- files = dict()
1485
-
1486
- boundary = str(boundary)
1487
- boundary = boundary.strip()
1488
- boundary_base = "--" + boundary[9:].strip('"')
1489
- boundary_value = legacy.bytes(boundary_base + "\r\n")
1490
- boundary_extra = legacy.bytes(boundary_base + "--" + "\r\n")
1491
- boundary_extra_l = len(boundary_extra)
1492
- parts = data.split(boundary_value)
1493
- parts[-1] = parts[-1][: boundary_extra_l * -1]
1494
-
1495
- # iterates over the complete set of parts in the multi part payload
1496
- # to process them and add them to the appropriate dictionary and list
1497
- for part in parts:
1498
- # in case the current part is not valid or empty skips the
1499
- # current cycle (nothing to be done)
1500
- if not part:
1501
- continue
1502
-
1503
- # splits the current part around the beginning of part sequence
1504
- # and retrieves the proper contents if they exist
1505
- part_s = part.split(b"\r\n\r\n", 1)
1506
- headers = part_s[0]
1507
- if len(part_s) > 1:
1508
- contents = part_s[1]
1509
- else:
1510
- contents = None
1511
-
1512
- # strips the current headers string and then splits it around
1513
- # the various lines that define the various headers
1514
- headers_data = headers.strip()
1515
- headers_lines = headers_data.split(b"\r\n")
1516
-
1517
- # creates the initial headers map of the headers that contains
1518
- # the association between the byte based key and the data value
1519
- # then retrieves the tuple of values and resets the map as it's
1520
- # going to be changed and normalized with the new values
1521
- headers = dict([line.split(b":", 1) for line in headers_lines])
1522
- headers_t = legacy.eager(headers.items())
1523
- headers.clear()
1524
-
1525
- # runs the normalization process using the header tuples, this
1526
- # should create a map of headers with the key as a normal string
1527
- # and the values encoded as byte based strings (contain data)
1528
- # note that the headers are defined
1529
- for key, value in headers_t:
1530
- key = legacy.str(key).lower()
1531
- value = value.strip()
1532
- headers[key] = value
1533
-
1534
- # tries to retrieve the content disposition header for the current
1535
- # part and in case there's none it's not possible to process the
1536
- # current part (this header is considered required)
1537
- disposition = headers.get("content-disposition", None)
1538
- if not disposition:
1539
- continue
1540
-
1541
- # creates the dictionary that will hold the various parts of the
1542
- # content disposition header that are going to be extracted for
1543
- # latter processing, this is required to make some decisions on
1544
- # the type of part that is currently being processed
1545
- parts = dict()
1546
- parts_data = disposition.split(b";")
1547
- for value in parts_data:
1548
- value_s = value.split(b"=", 1)
1549
- key = legacy.str(value_s[0]).strip().lower()
1550
- if len(value_s) > 1:
1551
- value = value_s[1].strip()
1552
- else:
1553
- value = None
1554
- parts[key] = value
1555
-
1556
- # retrieves the various characteristics values from the headers
1557
- # and from the content disposition of the current part, these
1558
- # values are going to be used to decide on whether the current
1559
- # part is a file or a normal key value attribute
1560
- content_type = headers.get("content-type", None)
1561
- name = parts.get("name", b'"undefined"').strip(b'"')
1562
- filename = parts.get("filename", b"").strip(b'"')
1563
-
1564
- # decodes the various content disposition values into an unicode
1565
- # based string so that may be latter be used safely inside the
1566
- # application environment(as expected by the current structure)
1567
- if content_type:
1568
- content_type = content_type.decode("utf-8")
1569
- name = name.decode("utf-8")
1570
- filename = filename.decode("utf-8")
1571
-
1572
- # in case the currently discovered contents are valid they
1573
- # must be stripped from the last two bytes so that the real
1574
- # value is retrieved from the provided contents
1575
- contents = contents if contents == None else contents[:-2]
1576
-
1577
- # verifies if the file name is included in the parts unpacked
1578
- # from the content type in case it does this is considered to be
1579
- # file part otherwise it's a normal key value part
1580
- if "filename" in parts:
1581
- is_file = True
1582
- else:
1583
- is_file = False
1584
-
1585
- if is_file:
1586
- target = files
1587
- file_tuple = (filename, content_type, contents)
1588
- value = FileTuple(file_tuple)
1589
- else:
1590
- target = post
1591
- value = contents if contents == None else contents.decode("utf-8")
1592
-
1593
- exists = name in ordered_m
1594
-
1595
- sequence = target.get(name, [])
1596
- sequence.append(value)
1597
- target[name] = sequence
1598
-
1599
- sequence_o = ordered_m.get(name, [])
1600
- sequence_o.append(value)
1601
- ordered_m[name] = sequence_o
1602
-
1603
- if exists:
1604
- continue
1605
-
1606
- tuple_s = (name, sequence_o)
1607
- ordered.append(tuple_s)
1608
-
1609
- return (post, files, ordered)
1610
-
1611
-
1612
- def decode_params(params):
1613
- """
1614
- Decodes the complete set of parameters defined in the
1615
- provided map so that all of keys and values are created
1616
- as unicode strings instead of UTF-8 based strings.
1617
-
1618
- This method's execution is mandatory on the retrieval of
1619
- the parameters from the sent data.
1620
-
1621
- :type params: Dictionary
1622
- :param params: The map containing the encoded set of values
1623
- that are going to be decoded from the UTF-8 form.
1624
- :rtype: Dictionary
1625
- :return: The decoded map meaning that all the keys and values
1626
- are in the unicode form instead of the string form.
1627
- """
1628
-
1629
- # creates the dictionary that will hold the processed/decoded
1630
- # sequences of parameters created from the provided (and original)
1631
- # map of encoded parameters (raw values)
1632
- _params = dict()
1633
-
1634
- for key, value in params.items():
1635
- items = []
1636
- for item in value:
1637
- is_bytes = legacy.is_bytes(item)
1638
- if is_bytes:
1639
- item = item.decode("utf-8")
1640
- items.append(item)
1641
- is_bytes = legacy.is_bytes(key)
1642
- if is_bytes:
1643
- key = key.decode("utf-8")
1644
- _params[key] = items
1645
-
1646
- return _params
1647
-
1648
-
1649
- def load_form(form):
1650
- # creates the map that is going to hold the "structured"
1651
- # version of the form with key value associations
1652
- form_s = dict()
1653
-
1654
- # iterates over all the form items to parse their values
1655
- # and populate the form structured version of it, note that
1656
- # for the sake of parsing the order of the elements in the
1657
- # form is relevant, in case there's multiple values for the
1658
- # same name they are considered as a list, otherwise they are
1659
- # considered as a single value
1660
- for name in form:
1661
- # retrieves the value (as a list) for the current name, then
1662
- # in case the sequence is larger than one element sets it,
1663
- # otherwise retrieves and sets the value as the first element
1664
- value = form[name]
1665
- value = (
1666
- value[0] if isinstance(value, (list, tuple)) and len(value) == 1 else value
1667
- )
1668
-
1669
- # splits the complete name into its various components
1670
- # and retrieves both the final (last) element and the
1671
- # various partial elements from it
1672
- names = name.split(".")
1673
- final = names[-1]
1674
- partials = names[:-1]
1675
-
1676
- # sets the initial "struct" reference as the form structured
1677
- # that has just been created (initial structure for iteration)
1678
- # then starts the iteration to retrieve or create the various
1679
- # intermediate structures
1680
- struct = form_s
1681
- for _name in partials:
1682
- _struct = struct.get(_name, {})
1683
- struct[_name] = _struct
1684
- struct = _struct
1685
-
1686
- # sets the current value in the currently loaded "struct" element
1687
- # so that the reference gets properly updated
1688
- struct[final] = value
1689
-
1690
- # retrieves the final "normalized" form structure containing
1691
- # a series of chained maps resulting from the parsing of the
1692
- # linear version of the attribute names
1693
- return form_s
1694
-
1695
-
1696
- def check_login(self, token=None, request=None):
1697
- # tries to retrieve the request from the current context
1698
- # in case it has not been passed through other manner
1699
- request = request or (self.request if self else None)
1700
-
1701
- # retrieves the data type of the token and creates the
1702
- # tokens sequence value taking into account its type
1703
- if isinstance(token, SEQUENCE_TYPES):
1704
- tokens = token
1705
- else:
1706
- tokens = (token,)
1707
-
1708
- # in case the username value is set in session and there's
1709
- # no token to be validated returns valid and in case the checking
1710
- # of the complete set of tokens is valid also returns valid
1711
- if check_user(self, request=request) and not token:
1712
- return True
1713
- if check_tokens(self, tokens, request=request):
1714
- return True
1715
-
1716
- # returns the default value as invalid because if all the
1717
- # validation procedures have failed the check is invalid
1718
- return False
1719
-
1720
-
1721
- def check_user(self, request=None):
1722
- # tries to retrieve the reference to the current request
1723
- # either from the provided arguments or from the current context
1724
- request = request or (self.request if self else None)
1725
-
1726
- # runs the multiple verification strategies available an
1727
- # in case at least one of them succeeds the user is considered
1728
- # to be currently authenticated
1729
- if request and "username" in request.session:
1730
- return True
1731
- if request and hasattr(request, "tokens_p"):
1732
- return True
1733
-
1734
- # by default the user is considered to be not authenticated, all
1735
- # of the tests for authentication have failed
1736
- return False
1737
-
1738
-
1739
- def check_token(self, token, tokens_m=None, request=None):
1740
- # in case the provided token is invalid or empty the method
1741
- # return immediately in success (simple validation)
1742
- if not token:
1743
- return True
1744
-
1745
- # tries to retrieve the tokens map from the provided argument
1746
- # defaulting to the session one in case none is provided
1747
- if tokens_m == None:
1748
- tokens_m = get_tokens_m(self, request=request)
1749
-
1750
- # splits the provided token string into its parts, note that
1751
- # a namespace is defined around the dot character
1752
- token_l = token.split(".")
1753
-
1754
- # iterates over the complete set of parts in the token list
1755
- # of parts to validate the complete chain of values against
1756
- # the map of token parts (namespace validation)
1757
- for token_p in token_l:
1758
- if not isinstance(tokens_m, dict):
1759
- return False
1760
- if "*" in tokens_m and tokens_m["*"] == True:
1761
- return True
1762
- if not token_p in tokens_m:
1763
- return False
1764
- tokens_m = tokens_m[token_p]
1765
-
1766
- # determines if the final tokens map value is a dictionary
1767
- # and "selects" the proper validation result accordingly
1768
- is_dict = isinstance(tokens_m, dict)
1769
- result = tokens_m.get("_", False) if is_dict else tokens_m
1770
-
1771
- # verifies if the "final" result value is valid and returns
1772
- # the final validation result accordingly
1773
- return True if result == True else False
1774
-
1775
-
1776
- def check_tokens(self, tokens, tokens_m=None, request=None):
1777
- # iterates over the complete set of tokens that are going
1778
- # to be validated against the current context and if any of
1779
- # them fails an invalid result is returned otherwise a valid
1780
- # result is returned (indicating that all is valid)
1781
- for token in tokens:
1782
- if not check_token(self, token, tokens_m=tokens_m, request=request):
1783
- return False
1784
- return True
1785
-
1786
-
1787
- def ensure_login(self, token=None, context=None, request=None):
1788
- request = request or (self.request if self else None)
1789
- is_auth = check_user(self, request=request)
1790
- if not is_auth:
1791
- raise exceptions.AppierException(
1792
- message="User not authenticated", code=403, token=token, context=context
1793
- )
1794
- if check_token(self, token, request=request):
1795
- return
1796
- raise exceptions.AppierException(
1797
- message="Not enough permissions", code=403, token=token, context=context
1798
- )
1799
-
1800
-
1801
- def get_tokens_m(self, request=None, set=None):
1802
- """
1803
- Retrieves the map of tokens from the current session so that
1804
- they can be used for proper ACL validation.
1805
-
1806
- In case the current session contains a sequence based representation
1807
- of the tokens they are converted to their equivalent map value.
1808
-
1809
- :type request: Request
1810
- :param request: The request that is going to be used to access
1811
- the session information, if any.
1812
- :type set: bool
1813
- :param set: If the possibly converted tokens list should be persisted
1814
- into the current session, sparing some CPU cycles on next execution,
1815
- in case no value is provided a default value is applied taking into
1816
- account the current execution context.
1817
- :rtype: Dictionary
1818
- :return: The map of tokens to be used for ACL validation.
1819
- """
1820
-
1821
- # tries to retrieve the request from the current context
1822
- # in case it has not been passed through other manner, if
1823
- # no valid context is found returns invalid value immediately
1824
- request = request or (self.request if self else None)
1825
- if not request:
1826
- return dict()
1827
-
1828
- # verifies if the set flag is set and if that's not the case
1829
- # ensures proper default value taking into account if there's
1830
- # a token "provider method" defined or not
1831
- if set == None:
1832
- set = False if hasattr(request, "tokens_p") else True
1833
-
1834
- # tries to retrieve the "provider method "for the tokens under the
1835
- # current request an in case it's not available used the default
1836
- # one (simple session access)
1837
- try:
1838
- if hasattr(request, "tokens_p"):
1839
- tokens_m = request.tokens_p()
1840
- else:
1841
- tokens_m = request.session.get("tokens", {})
1842
- except Exception:
1843
- return dict()
1844
-
1845
- # verifies if the resulting value is either a map or a sequence,
1846
- # going to be used for decisions on normalization
1847
- is_map = isinstance(tokens_m, dict)
1848
- is_sequence = isinstance(tokens_m, (list, tuple))
1849
-
1850
- # if the tokens value is already a map then an immediate return
1851
- # is going to be performed (it is a valid tokens map)
1852
- if is_map:
1853
- return tokens_m
1854
-
1855
- # in case the value present in the tokens value is a sequence
1856
- # it must be properly converted into the equivalent map value
1857
- if is_sequence:
1858
- # converts the tokens sequence into a map version of it
1859
- # so that proper structured verification is possible
1860
- tokens_m = to_tokens_m(tokens_m)
1861
-
1862
- # in case the set flag is set the tokens map should
1863
- # be set in the request session (may be dangerous)
1864
- # and then returns the tokens map to the caller method
1865
- if set:
1866
- request.session["tokens"] = tokens_m
1867
- return tokens_m
1868
-
1869
- # returns the "default" empty tokens map as it was not possible
1870
- # to retrieve any information regarding tokens from the
1871
- # current context and environment
1872
- return dict()
1873
-
1874
-
1875
- def to_tokens_m(tokens):
1876
- # creates a new map to be used to store tokens map that is
1877
- # going to be created from the list/sequence version
1878
- tokens_m = dict()
1879
-
1880
- # iterates over the complete set of tokens in the
1881
- # sequence to properly add their namespace parts
1882
- # to the tokens map (as specified)
1883
- for token in tokens:
1884
- tokens_c = tokens_m
1885
- token_l = token.split(".")
1886
- head, tail = token_l[:-1], token_l[-1]
1887
-
1888
- for token_p in head:
1889
- current = tokens_c.get(token_p, {})
1890
- is_dict = isinstance(current, dict)
1891
- if not is_dict:
1892
- current = {"_": current}
1893
- tokens_c[token_p] = current
1894
- tokens_c = current
1895
-
1896
- leaf = tokens_c.get(tail, None)
1897
- if leaf and isinstance(leaf, dict):
1898
- leaf["_"] = True
1899
- else:
1900
- tokens_c[tail] = True
1901
-
1902
- # returns the final map version of the token to the caller
1903
- # method so that it may be used for structure verification
1904
- return tokens_m
1905
-
1906
-
1907
- def dict_merge(first, second, override=True, recursive=False, callback=None):
1908
- """
1909
- Merges two dictionaries, optionally using a deep (recursive)
1910
- strategy to achieve the merge.
1911
-
1912
- The default "way" of the merge is from the second to the first
1913
- and overriding the values of the first dictionary.
1914
-
1915
- :type first: Dictionary
1916
- :param first: The target dictionary of the merge operation and
1917
- that will have its contents overriden if requested.
1918
- :type second: Dictionary
1919
- :param second: The base dictionary of the merge that will be
1920
- "copied" into the first one.
1921
- :type override: bool
1922
- :param override: If the contents of the first dictionary should
1923
- be overriden (overwritten) in case of "collision".
1924
- :type recursive: bool
1925
- :param recursive: If the merge operation should be performed using
1926
- a deep and recursive approach for dictionary types.
1927
- :type callback: Function
1928
- :param callback: Optional function to to be called in case there's
1929
- a conflicting value for the same key with both the first and second
1930
- values to be merged, allowing control over merge operations, this
1931
- is only used in case of a recursive approach.
1932
- :rtype: Dictionary
1933
- :return: The resulting dictionary (new instance) from the merge
1934
- operation of the second dictionary into the first.
1935
- """
1936
-
1937
- # in case no override exists then the order of the items is
1938
- # exchanged so that the first overrides the second values
1939
- # and not the exact opposite
1940
- if not override:
1941
- first, second = second, first
1942
-
1943
- # in case the recursive flag is set, must iterate over all
1944
- # of the first items to try to merge any possible dictionary
1945
- # value using a recursive strategy
1946
- if recursive:
1947
- # creates the dictionary that is going to store the final
1948
- # merged value resulting from both dictionaries
1949
- final = dict()
1950
-
1951
- # runs the main iteration cycles around the first dictionary
1952
- # trying to find possible conflicts that would required a
1953
- # smarter merge strategy
1954
- for key, value in legacy.iteritems(first):
1955
- # in case the current key is not present in the second
1956
- # dictionary (there's no conflict) and so a simple set
1957
- # strategy should be applied
1958
- if not key in second:
1959
- final[key] = value
1960
- continue
1961
-
1962
- # grabs the other (second) value that is going to be used
1963
- # as the basis for the merge operation
1964
- other = second[key]
1965
-
1966
- # in case a callback is defined calls it to determine the
1967
- # final merged value from both the original and the other
1968
- if callback:
1969
- final[key] = callback(value, other)
1970
-
1971
- # if it represents a dictionary (smart merge) then both
1972
- # values are going to be merged recursively
1973
- elif isinstance(value, dict) and isinstance(other, dict):
1974
- if not override:
1975
- value, other = other, value
1976
- final[key] = dict_merge(
1977
- value, other, override=override, recursive=recursive
1978
- )
1979
-
1980
- # otherwise the previous value is simply replaced with the
1981
- # the other value, (fallback operation) this is considered
1982
- # to be a non smart merge operation
1983
- else:
1984
- final[key] = other
1985
-
1986
- # runs the final iteration cycles around the second dictionary
1987
- # values to try to set the unique second values in the final
1988
- for key, value in legacy.iteritems(second):
1989
- if key in final:
1990
- continue
1991
- final[key] = value
1992
-
1993
- # returns the final merged result to the caller method, this
1994
- # result should contain all of its dictionary values properly
1995
- # merged within both the first and second values
1996
- return final
1997
-
1998
- # otherwise (uses a simple strategy) and creates a new dictionary
1999
- # for the first value, then updates it with the second set of
2000
- # dictionary values, returning then the newly created dictionary
2001
- # to the caller method (basic update strategy)
2002
- else:
2003
- final = dict(first)
2004
- final.update(second)
2005
- return final
2006
-
2007
-
2008
- def deprecated(message="Function %s is now deprecated"):
2009
- """
2010
- Decorator that marks a certain function or method as
2011
- deprecated so that whenever such function is called
2012
- an output messaged warns the developer about the
2013
- deprecation (incentive).
2014
-
2015
- :type message: String
2016
- :param message: The message template to be used in the
2017
- output operation of the error.
2018
- :rtype: Decorator
2019
- :return: The decorator that should be used to wrap a
2020
- function and mark it as deprecated (send warning).
2021
- """
2022
-
2023
- def decorator(function):
2024
- name = function.__name__ if hasattr(function, "__name__") else None
2025
-
2026
- @functools.wraps(function)
2027
- def interceptor(*args, **kwargs):
2028
- warnings.simplefilter("always", DeprecationWarning)
2029
- warnings.warn(message % name, category=DeprecationWarning, stacklevel=2)
2030
- warnings.simplefilter("default", DeprecationWarning)
2031
- return function(*args, **kwargs)
2032
-
2033
- return interceptor
2034
-
2035
- return decorator
2036
-
2037
-
2038
- def cached(function):
2039
- """
2040
- Decorator that marks a certain function as cached meaning that
2041
- the local context of the instance associated with the function
2042
- (method) is going to be used to store the result and further
2043
- requests to the function will use the cached result, resulting
2044
- in an improved resolution time.
2045
-
2046
- The life-cycle of the context is critical to avoid issues with
2047
- invalid cache invalidation.
2048
-
2049
- :rtype: Decorator
2050
- :return: The decorator that should be used to wrap a function
2051
- marking it as ready to cache it's return value on current context.
2052
- """
2053
-
2054
- name = function.__name__
2055
-
2056
- @functools.wraps(function)
2057
- def _cached(self, *args, **kwargs):
2058
- # tries to retrieve the current execution context, most
2059
- # of the times this should be a request object for the
2060
- # current temporary execution life-cycle
2061
- context = get_context(self)
2062
-
2063
- # retrieves the properties map (if possible) and then
2064
- # verifies the existence or not of the name in such map
2065
- # returning the value immediately if it's cached
2066
- properties = context.properties if context else None
2067
- exists = name in properties if properties else False
2068
- if exists:
2069
- return properties[name]
2070
-
2071
- # as no cache retrieval was possible executes the function
2072
- # operation and caches the resulting value into the properties
2073
- # map (in case it exists)
2074
- value = function(self, *args, **kwargs)
2075
- if not properties == None:
2076
- properties[name] = value
2077
- return value
2078
-
2079
- return _cached
2080
-
2081
-
2082
- def private(function):
2083
- @functools.wraps(function)
2084
- def _private(self, *args, **kwargs):
2085
- ensure = kwargs.get("ensure", True)
2086
- request = kwargs.get("request", self.request)
2087
- if ensure:
2088
- ensure_login(self, request=request)
2089
- sanitize(function, kwargs)
2090
- return function(self, *args, **kwargs)
2091
-
2092
- return _private
2093
-
2094
-
2095
- def ensure(token=None, context=None):
2096
- def decorator(function):
2097
- @functools.wraps(function)
2098
- def interceptor(self, *args, **kwargs):
2099
- ensure = kwargs.get("ensure", True)
2100
- request = kwargs.get("request", self.request)
2101
- if ensure:
2102
- ensure_login(self, token=token, context=context, request=request)
2103
- sanitize(function, kwargs)
2104
- return function(self, *args, **kwargs)
2105
-
2106
- return interceptor
2107
-
2108
- return decorator
2109
-
2110
-
2111
- def delayed(function):
2112
- @functools.wraps(function)
2113
- def _delayed(self, *args, **kwargs):
2114
- _args = [self] + list(args)
2115
- return self.owner.delay(function, _args, kwargs)
2116
-
2117
- return _delayed
2118
-
2119
-
2120
- def route(url, method="GET", asynchronous=False, json=False, opts=None, priority=1):
2121
- def decorator(function, *args, **kwargs):
2122
- if is_detached(function):
2123
- delay(function, *args, **kwargs)
2124
- else:
2125
- common.base().App.add_route(
2126
- method,
2127
- url,
2128
- function,
2129
- asynchronous=asynchronous,
2130
- json=json,
2131
- opts=opts,
2132
- priority=priority,
2133
- )
2134
- return function
2135
-
2136
- def delay(function, *args, **kwargs):
2137
- global CREATION_COUNTER
2138
- route = (url, method, asynchronous, json, opts, priority)
2139
- if not hasattr(function, "_routes"):
2140
- function._routes = []
2141
- function._routes.append(route)
2142
- function.creation_counter = CREATION_COUNTER
2143
- CREATION_COUNTER += 1
2144
-
2145
- return decorator
2146
-
2147
-
2148
- def error_handler(code, scope=None, json=False, opts=None, priority=1):
2149
- def decorator(function, *args, **kwargs):
2150
- if is_detached(function):
2151
- delay(function, *args, **kwargs)
2152
- else:
2153
- common.base().App.add_error(
2154
- code, function, json=json, opts=opts, priority=priority
2155
- )
2156
- return function
2157
-
2158
- def delay(function, *args, **kwargs):
2159
- global CREATION_COUNTER
2160
- error = (code, scope, json, opts, priority)
2161
- if not hasattr(function, "_errors"):
2162
- function._errors = []
2163
- function._errors.append(error)
2164
- function.creation_counter = CREATION_COUNTER
2165
- CREATION_COUNTER += 1
2166
- return function
2167
-
2168
- return decorator
2169
-
2170
-
2171
- def exception_handler(exception, scope=None, json=False, opts=None, priority=1):
2172
- def decorator(function, *args, **kwargs):
2173
- if is_detached(function):
2174
- delay(function, *args, **kwargs)
2175
- else:
2176
- common.base().App.add_exception(
2177
- exception, function, json=json, opts=opts, priority=priority
2178
- )
2179
- return function
2180
-
2181
- def delay(function, *args, **kwargs):
2182
- global CREATION_COUNTER
2183
- _exception = (exception, scope, json, opts, priority)
2184
- if not hasattr(function, "_exceptions"):
2185
- function._exceptions = []
2186
- function._exceptions.append(_exception)
2187
- function.creation_counter = CREATION_COUNTER
2188
- CREATION_COUNTER += 1
2189
- return function
2190
-
2191
- return decorator
2192
-
2193
-
2194
- def before_request(scope="all", opts=None, priority=1):
2195
- def decorator(function, *args, **kwargs):
2196
- if is_detached(function):
2197
- delay(function, *args, **kwargs)
2198
- else:
2199
- common.base().App.add_custom(
2200
- "before_request", function, opts=opts, priority=priority
2201
- )
2202
- return function
2203
-
2204
- def delay(function, *args, **kwargs):
2205
- global CREATION_COUNTER
2206
- _custom = ("before_request", opts, priority)
2207
- if not hasattr(function, "_customs"):
2208
- function._customs = []
2209
- function._customs.append(_custom)
2210
- function.creation_counter = CREATION_COUNTER
2211
- CREATION_COUNTER += 1
2212
- return function
2213
-
2214
- return decorator
2215
-
2216
-
2217
- def after_request(scope="all", opts=None, priority=1):
2218
- def decorator(function, *args, **kwargs):
2219
- if is_detached(function):
2220
- delay(function, *args, **kwargs)
2221
- else:
2222
- common.base().App.add_custom(
2223
- "after_request", function, opts=opts, priority=priority
2224
- )
2225
- return function
2226
-
2227
- def delay(function, *args, **kwargs):
2228
- global CREATION_COUNTER
2229
- _custom = ("after_request", opts, priority)
2230
- if not hasattr(function, "_customs"):
2231
- function._customs = []
2232
- function._customs.append(_custom)
2233
- function.creation_counter = CREATION_COUNTER
2234
- CREATION_COUNTER += 1
2235
- return function
2236
-
2237
- return decorator
2238
-
2239
-
2240
- def is_detached(function):
2241
- """
2242
- Verifies if the provided function value is considered to be
2243
- a detached method from a class, this is valid for situations
2244
- where the type of the value is a function and there's a reference
2245
- to the parent class of definition.
2246
-
2247
- This method is not completely safe as it relies on the fact that
2248
- by convention the first argument of a "future" method is the "self"
2249
- one, meaning that a "normal function" would be detected as a
2250
- method if the first argument of it is named self.
2251
-
2252
- :type function: Function
2253
- :param function: The function value that is going to be evaluated
2254
- for the presence of a detached method.
2255
- :rtype: bool
2256
- :return: If the provided function value refers a detached method
2257
- of a certain class.
2258
- """
2259
-
2260
- # verifies if the provided value is a valid function type
2261
- # an in case it's not it's considered to not be a detached
2262
- is_function = isinstance(function, types.FunctionType)
2263
- if not is_function:
2264
- return False
2265
-
2266
- # retrieves the function's specification (should include arguments)
2267
- # and then verifies that they are valid and that at least one valid
2268
- # argument exists for the specification (as required by methods)
2269
- spec = legacy.getargspec(function)
2270
- if not spec:
2271
- return False
2272
- if not spec.args:
2273
- return False
2274
-
2275
- # verifies that the name of the first argument of the function is the
2276
- # the instance one, if that's the case this should be a detached method
2277
- # that is currently being identified as a function
2278
- return spec.args[0] == "self"
2279
-
2280
-
2281
- def sanitize(function, kwargs):
2282
- removal = []
2283
- method_a = legacy.getargspec(function)[0]
2284
- for name in kwargs:
2285
- if name in method_a:
2286
- continue
2287
- removal.append(name)
2288
- for name in removal:
2289
- del kwargs[name]
2290
-
2291
-
2292
- def verify(condition, message=None, code=None, exception=None, **kwargs):
2293
- if condition:
2294
- return
2295
- exception = exception or exceptions.AssertionError
2296
- kwargs = dict(kwargs)
2297
- if not message == None:
2298
- kwargs["message"] = message
2299
- if not code == None:
2300
- kwargs["code"] = code
2301
- raise exception(**kwargs)
2302
-
2303
-
2304
- def verify_equal(first, second, message=None, code=None, exception=None, **kwargs):
2305
- message = message or "Expected %s got %s" % (repr(second), repr(first))
2306
- return verify(
2307
- first == second, message=message, code=code, exception=exception, **kwargs
2308
- )
2309
-
2310
-
2311
- def verify_not_equal(first, second, message=None, code=None, exception=None, **kwargs):
2312
- message = message or "Expected %s not equal to %s" % (repr(first), repr(second))
2313
- return verify(
2314
- not first == second, message=message, code=code, exception=exception, **kwargs
2315
- )
2316
-
2317
-
2318
- def verify_type(
2319
- value, types, null=True, message=None, code=None, exception=None, **kwargs
2320
- ):
2321
- message = message or "Expected %s to have type %s" % (repr(value), repr(types))
2322
- return verify(
2323
- (null and value == None) or isinstance(value, types),
2324
- message=message,
2325
- code=code,
2326
- exception=exception,
2327
- **kwargs
2328
- )
2329
-
2330
-
2331
- def verify_many(sequence, message=None, code=None, exception=None, **kwargs):
2332
- for condition in sequence:
2333
- verify(condition, message=message, code=code, exception=exception, **kwargs)
2334
-
2335
-
2336
- def execute(args, command=None, path=None, shell=None, encoding=None):
2337
- if shell == None:
2338
- shell = os.name == "nt"
2339
- if not encoding:
2340
- encoding = sys.getfilesystemencoding()
2341
- if command:
2342
- args = command.split(" ")
2343
- process = subprocess.Popen(
2344
- args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=path
2345
- )
2346
- code = process.wait()
2347
- stdout = process.stdout.read()
2348
- stderr = process.stderr.read()
2349
- stdout = stdout.decode(encoding)
2350
- stderr = stderr.decode(encoding)
2351
- return dict(stdout=stdout, stderr=stderr, code=code)
2352
-
2353
-
2354
- @contextlib.contextmanager
2355
- def ctx_locale(name="", force=False):
2356
- saved = locale.setlocale(locale.LC_ALL)
2357
- if saved == name and not force:
2358
- yield saved
2359
- return
2360
- try:
2361
- yield locale.setlocale(locale.LC_ALL, name)
2362
- finally:
2363
- locale.setlocale(locale.LC_ALL, saved)
2364
-
2365
-
2366
- @contextlib.contextmanager
2367
- def ctx_request(app=None):
2368
- app = app or common.base().get_app()
2369
- _request = app._request
2370
- app._request = app._mock
2371
- try:
2372
- yield True
2373
- finally:
2374
- app._request = _request
2375
-
2376
-
2377
- class FileTuple(tuple):
2378
- """
2379
- Tuple class (inherits from tuple) that represents
2380
- the name, content type and (data) contents of a file
2381
- in the context of the appier infra-structure.
2382
-
2383
- This class shares many of the signature with the
2384
- typical python file interface, allowing most of
2385
- the operation to be performed (eg: read, seek,
2386
- tell, etc.).
2387
- """
2388
-
2389
- def __init__(self, *args, **kwargs):
2390
- tuple.__init__(*args, **kwargs)
2391
- self._position = 0
2392
-
2393
- @classmethod
2394
- def from_data(cls, data, name=None, mime=None):
2395
- file_tuple = cls((name, mime, data))
2396
- return file_tuple
2397
-
2398
- @classmethod
2399
- def from_file(cls, file, name=None, mime=None):
2400
- data = file.read()
2401
- file_tuple = cls.from_data(data, name=name, mime=mime)
2402
- return file_tuple
2403
-
2404
- @classmethod
2405
- def from_path(cls, path, name=None, mime=None, guess=True):
2406
- mime = cls.guess(path) if mime == None and guess else mime
2407
- file = open(path, "rb")
2408
- try:
2409
- file_tuple = cls.from_file(file, name=name, mime=mime)
2410
- finally:
2411
- file.close()
2412
- return file_tuple
2413
-
2414
- @classmethod
2415
- def guess(cls, name):
2416
- mime = mimetypes.guess_type(name, strict=False)[0]
2417
- if mime:
2418
- return mime
2419
- return None
2420
-
2421
- def read(self, count=None):
2422
- data, data_l = self[2], len(self[2])
2423
- if not count and self._position == 0:
2424
- data, offset = data, data_l
2425
- elif not count:
2426
- data, offset = data[self._position :], data_l - self._position
2427
- else:
2428
- data, offset = data[self._position : self._position + count], count
2429
- self._position += offset
2430
- return data
2431
-
2432
- def seek(self, offset, whence=os.SEEK_SET):
2433
- if whence == os.SEEK_SET:
2434
- self._position = offset
2435
- if whence == os.SEEK_CUR:
2436
- self._position += offset
2437
- if whence == os.SEEK_END:
2438
- self._position = len(self[2]) + offset
2439
-
2440
- def tell(self):
2441
- return self._position
2442
-
2443
- def save(self, path, close=True):
2444
- contents = self[2]
2445
- if legacy.is_string(path):
2446
- file = open(path, "wb")
2447
- else:
2448
- file = path
2449
- try:
2450
- file.write(contents)
2451
- finally:
2452
- if close:
2453
- file.close()
2454
-
2455
- def seekable(self):
2456
- return True
2457
-
2458
- @property
2459
- def name(self):
2460
- return self[0]
2461
-
2462
- @property
2463
- def mime(self):
2464
- return self[1]
2465
-
2466
- @property
2467
- def data(self):
2468
- return self[2]
2469
-
2470
-
2471
- class BaseThread(threading.Thread):
2472
- """
2473
- The top level thread class that is meant to encapsulate
2474
- a running base object and run it in a new context.
2475
-
2476
- This base thread may be used to run a network loop allowing
2477
- a main thread to continue with execution logic.
2478
- """
2479
-
2480
- def __init__(self, owner=None, daemon=False, *args, **kwargs):
2481
- threading.Thread.__init__(self, *args, **kwargs)
2482
- self.owner = owner
2483
- self.daemon = daemon
2484
-
2485
- def run(self):
2486
- threading.Thread.run(self)
2487
- if not self.owner:
2488
- return
2489
- self.owner.start()
2490
- self.owner = None
2491
-
2492
-
2493
- class JSONEncoder(json.JSONEncoder):
2494
- def __init__(self, *args, **kwargs):
2495
- self.permissive = kwargs.pop("permissive", True)
2496
- json.JSONEncoder.__init__(self, *args, **kwargs)
2497
-
2498
- def default(self, obj, **kwargs):
2499
- if hasattr(obj, "json_v"):
2500
- return obj.json_v()
2501
- if self.permissive:
2502
- return str(obj)
2503
- return json.JSONEncoder.default(self, obj, **kwargs)
1
+ #!/usr/bin/python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Hive Appier Framework
5
+ # Copyright (c) 2008-2024 Hive Solutions Lda.
6
+ #
7
+ # This file is part of Hive Appier Framework.
8
+ #
9
+ # Hive Appier Framework is free software: you can redistribute it and/or modify
10
+ # it under the terms of the Apache License as published by the Apache
11
+ # Foundation, either version 2.0 of the License, or (at your option) any
12
+ # later version.
13
+ #
14
+ # Hive Appier Framework is distributed in the hope that it will be useful,
15
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
16
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
+ # Apache License for more details.
18
+ #
19
+ # You should have received a copy of the Apache License along with
20
+ # Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
21
+
22
+ """appier.util
23
+
24
+ General-purpose utilities used across the Appier code-base.
25
+ Offers helpers for I/O, date handling, MIME types, hashing and
26
+ thread-safe counters. Includes convenience wrappers for subprocess,
27
+ virtualenv and dynamic module import. Widely depended on by other
28
+ modules; keep changes here backward-compatible.
29
+ """
30
+
31
+ __author__ = "João Magalhães <joamag@hive.pt>"
32
+ """ The author(s) of the module """
33
+
34
+ __copyright__ = "Copyright (c) 2008-2024 Hive Solutions Lda."
35
+ """ The copyright for the module """
36
+
37
+ __license__ = "Apache License, Version 2.0"
38
+ """ The license for the module """
39
+
40
+ import os
41
+ import re
42
+ import sys
43
+ import json
44
+ import copy
45
+ import uuid
46
+ import types
47
+ import locale
48
+ import hashlib
49
+ import calendar
50
+ import datetime
51
+ import warnings
52
+ import functools
53
+ import threading
54
+ import mimetypes
55
+ import contextlib
56
+ import subprocess
57
+ import multiprocessing
58
+
59
+ from . import smtp
60
+ from . import config
61
+ from . import legacy
62
+ from . import common
63
+ from . import defines
64
+ from . import exceptions
65
+
66
+ CREATION_COUNTER = 0
67
+ """ The global static creation counter value that
68
+ will be used to create an order in the declaration
69
+ of attributes for a set of classes """
70
+
71
+ FIRST_CAP_REGEX = re.compile(r"(.)([A-Z][a-z]+)")
72
+ """ Regular expression that ensures that the first
73
+ token of each camel string is properly capitalized """
74
+
75
+ ALL_CAP_REGEX = re.compile(r"([a-z0-9])([A-Z])")
76
+ """ The generalized transition from lower case to
77
+ upper case letter regex that will provide a way of
78
+ putting the underscore in the middle of the transition """
79
+
80
+ SORT_MAP = {"1": 1, "-1": -1, "ascending": 1, "descending": -1}
81
+ """ The map associating the normalized (text) way of
82
+ representing sorting with the current infra-structure
83
+ number way of representing the same information """
84
+
85
+ SEQUENCE_TYPES = (list, tuple)
86
+ """ The sequence defining the various types that are
87
+ considered to be sequence based for python """
88
+
89
+ defines = defines
90
+
91
+
92
+ def to_limit(limit_s):
93
+ limit = int(limit_s)
94
+ if limit < 0:
95
+ return 0
96
+ return limit
97
+
98
+
99
+ def to_find(find_s):
100
+ if not find_s:
101
+ return []
102
+ find_t = type(find_s)
103
+ if find_t == list:
104
+ return find_s
105
+ return [find_s]
106
+
107
+
108
+ def to_sort(sort_s):
109
+ sort_l = []
110
+ sorts = sort_s.split(",")
111
+ for sort_i in sorts:
112
+ values = sort_i.split(":", 1)
113
+ if len(values) == 1:
114
+ values.append("descending")
115
+ name, direction = values
116
+ if name == "default":
117
+ return None
118
+ values[1] = SORT_MAP.get(direction, 1)
119
+ sort_l.append(tuple(values))
120
+ return sort_l
121
+
122
+
123
+ ALIAS = {
124
+ "context": "find_d",
125
+ "filters": "find_d",
126
+ "filters[]": "find_d",
127
+ "filter_def": "find_d",
128
+ "filter_string": "find_s",
129
+ "filter_name": "find_n",
130
+ "filter_operator": "find_o",
131
+ "insensitive": "find_i",
132
+ "order": "sort",
133
+ "offset": "skip",
134
+ "start_record": "skip",
135
+ "number_records": "limit",
136
+ }
137
+ """ The map containing the various attribute alias
138
+ between the normalized manned and the appier manner """
139
+
140
+ FIND_TYPES = dict(
141
+ skip=int,
142
+ limit=to_limit,
143
+ find_s=legacy.UNICODE,
144
+ find_d=to_find,
145
+ find_i=bool,
146
+ find_t=legacy.UNICODE,
147
+ find_n=legacy.UNICODE,
148
+ find_o=legacy.UNICODE,
149
+ sort=to_sort,
150
+ meta=bool,
151
+ fields=list,
152
+ )
153
+ """ The map associating the various find fields with
154
+ their respective types, note that in case a special
155
+ conversion operation is required the associated value
156
+ may represent a conversion function instead """
157
+
158
+ FIND_DEFAULTS = dict(limit=10)
159
+ """ The map that defines the various default values
160
+ for a series of find related attributes """
161
+
162
+
163
+ def is_iterable(object):
164
+ """
165
+ Verifies if the provided object (value) is iterable
166
+ meaning that the type of it is listed in a list of
167
+ sequence based data types.
168
+
169
+ :type object: Object
170
+ :param object: The value that is going to be tested
171
+ for iterable type.
172
+ :rtype: bool
173
+ :return: If the provided object represents an iterable
174
+ object meaning that it belongs to sequence type.
175
+ """
176
+
177
+ return isinstance(object, defines.ITERABLES)
178
+
179
+
180
+ def is_mobile(user_agent):
181
+ """
182
+ Verifies if the provided user agent string represents a
183
+ mobile agent, for that a series of regular expressions
184
+ are matched against the user agent string.
185
+
186
+ :type user_agent: String
187
+ :param user_agent: The string containing the user agent
188
+ value that is going to be verified against a series of
189
+ regular expressions for mobile verification.
190
+ :rtype: bool
191
+ :return: If the provided user agent string represents a
192
+ mobile browser or a regular (desktop) one.
193
+ """
194
+
195
+ if not user_agent:
196
+ return False
197
+ prefix = user_agent[:4]
198
+ mobile = defines.MOBILE_REGEX.search(user_agent)
199
+ mobile_prefix = defines.MOBILE_PREFIX_REGEX.search(prefix)
200
+ is_mobile = True if mobile or mobile_prefix else False
201
+ return is_mobile
202
+
203
+
204
+ def is_tablet(user_agent):
205
+ """
206
+ Verifies if the provided user agent string represents a
207
+ tablet agent, for that a series of regular expressions
208
+ are matched against the user agent string.
209
+
210
+ :type user_agent: String
211
+ :param user_agent: The string containing the user agent
212
+ value that is going to be verified against a series of
213
+ regular expressions for tablet verification.
214
+ :rtype: bool
215
+ :return: If the provided user agent string represents a
216
+ tablet browser or a regular (desktop) one.
217
+ """
218
+
219
+ if not user_agent:
220
+ return False
221
+ prefix = user_agent[:4]
222
+ tablet = defines.TABLET_REGEX.search(user_agent)
223
+ mobile_prefix = defines.MOBILE_PREFIX_REGEX.search(prefix)
224
+ is_tablet = True if tablet or mobile_prefix else False
225
+ return is_tablet
226
+
227
+
228
+ def is_browser(user_agent):
229
+ """
230
+ Verifies if the provided user agent string represents a
231
+ browser (interactive) agent, for that a series of verifications
232
+ are going to be performed against the user agent string.
233
+
234
+ :type user_agent: String
235
+ :param user_agent: The string containing the user agent
236
+ value that is going to be verified for browser presence.
237
+ :rtype: bool
238
+ :return: If the provided user agent string represents an
239
+ interactive browser or not.
240
+ """
241
+
242
+ info = browser_info(user_agent)
243
+ if not info:
244
+ return False
245
+ interactive = info.get("interactive", False)
246
+ if not interactive:
247
+ return False
248
+ return True
249
+
250
+
251
+ def is_bot(user_agent):
252
+ """
253
+ Verifies if the provided user agent string represents a
254
+ bot (automated) agent, for that a series of verifications
255
+ are going to be performed against the user agent string.
256
+
257
+ :type user_agent: String
258
+ :param user_agent: The string containing the user agent
259
+ value that is going to be verified for bot presence.
260
+ :rtype: bool
261
+ :return: If the provided user agent string represents an
262
+ automated bot or not.
263
+ """
264
+
265
+ info = browser_info(user_agent=user_agent)
266
+ if not info:
267
+ return False
268
+ bot = info.get("bot", False)
269
+ if not bot:
270
+ return False
271
+ return True
272
+
273
+
274
+ def browser_info(user_agent):
275
+ """
276
+ Retrieves a dictionary containing information about the browser
277
+ and the operative system associated with the provided user agent.
278
+
279
+ The retrieval of the information depends on the kind of user
280
+ agent string provided, as coverage is limited.
281
+
282
+ :type user_agent: String
283
+ :param user_agent: The HTTP based user agent string to be processed.
284
+ :rtype: Dictionary
285
+ :return: The dictionary/map containing the information processed from
286
+ the provided user agent.
287
+ """
288
+
289
+ if not user_agent:
290
+ return None
291
+
292
+ info = dict()
293
+
294
+ for browser_i in defines.BROWSER_INFO:
295
+ identity = browser_i["identity"]
296
+ sub_string = browser_i.get("sub_string", identity)
297
+ version_search = browser_i.get("version_search", sub_string + "/")
298
+ interactive = browser_i.get("interactive", True)
299
+ bot = browser_i.get("bot", False)
300
+
301
+ if not sub_string in user_agent:
302
+ continue
303
+ if not version_search in user_agent:
304
+ continue
305
+
306
+ version_i = user_agent.index(version_search) + len(version_search)
307
+ version = user_agent[version_i:].split(" ", 1)[0].strip(" ;")
308
+ try:
309
+ version_f = float(".".join(version.split(".")[:2]))
310
+ except ValueError:
311
+ version_f = 0.0
312
+ try:
313
+ version_i = int(version_f)
314
+ except ValueError:
315
+ version_f = 0
316
+
317
+ info.update(
318
+ name=identity,
319
+ version=version,
320
+ version_f=version_f,
321
+ version_i=version_i,
322
+ interactive=interactive,
323
+ bot=bot,
324
+ )
325
+ break
326
+
327
+ for os_i in defines.OS_INFO:
328
+ identity = os_i["identity"]
329
+ sub_string = os_i.get("sub_string", identity)
330
+
331
+ if not sub_string in user_agent:
332
+ continue
333
+
334
+ info.update(os=identity)
335
+ break
336
+
337
+ return info if info else None
338
+
339
+
340
+ def email_parts(base, strip=True):
341
+ """
342
+ Unpacks the complete set of parts (name and email) from the
343
+ provided generalized email string. The provided string may
344
+ be a single email or the more complex form (eg: Name <email>).
345
+
346
+ Note that the provided base argument may be a single string
347
+ or a sequence of strings and the returning type will reflect
348
+ that same provided parameter.
349
+
350
+ :type base: String/List
351
+ :param base: The base value that is going to be parsed as an
352
+ email string or a sequence of such values.
353
+ :type strip: bool
354
+ :param strip: If the provided base value should be stripped
355
+ of any extra space characters before processing.
356
+ :rtype: Tuple/List
357
+ :return: The resulting parsed tuple/tuples for the provided
358
+ email strings, these tuples contain name and emails for each
359
+ of the parsed values.
360
+ """
361
+
362
+ base_t = type(base)
363
+ if base_t in SEQUENCE_TYPES:
364
+ return [email_parts(base, strip=strip) for base in base]
365
+
366
+ if not base:
367
+ return (None, None)
368
+ if strip:
369
+ base = base.strip()
370
+
371
+ match = defines.EMAIL_REGEX.match(base)
372
+ if not match:
373
+ return (None, None)
374
+
375
+ email = match.group("email_a") or match.group("email_b")
376
+ name = match.group("name") or email
377
+
378
+ return (name, email)
379
+
380
+
381
+ def email_mime(base, encoding="utf-8"):
382
+ if legacy.PYTHON_3:
383
+ encoding = None
384
+
385
+ base_t = type(base)
386
+ if base_t in SEQUENCE_TYPES:
387
+ return [
388
+ value
389
+ for value in (email_mime(item, encoding=encoding) for item in base)
390
+ if value
391
+ ]
392
+
393
+ name, email = email_parts(base)
394
+ if not name or not email:
395
+ return None
396
+
397
+ name = smtp.header(name, encoding=encoding)
398
+
399
+ return "%s <%s>" % (name, email)
400
+
401
+
402
+ def email_name(base):
403
+ base_t = type(base)
404
+ if base_t in SEQUENCE_TYPES:
405
+ return [
406
+ value
407
+ for value in (email_name(base) for base in base if email_name(base))
408
+ if value
409
+ ]
410
+ name, _email = email_parts(base)
411
+ return name
412
+
413
+
414
+ def email_base(base):
415
+ base_t = type(base)
416
+ if base_t in SEQUENCE_TYPES:
417
+ return [
418
+ value
419
+ for value in (email_base(base) for base in base if email_base(base))
420
+ if value
421
+ ]
422
+ _name, email = email_parts(base)
423
+ return email
424
+
425
+
426
+ def date_to_timestamp(value, format="%d/%m/%Y"):
427
+ if not value:
428
+ return None
429
+ try:
430
+ value = datetime.datetime.strptime(value, format)
431
+ except Exception:
432
+ return None
433
+ value = value.utctimetuple()
434
+ return calendar.timegm(value)
435
+
436
+
437
+ def obfuscate(value, display_l=3, token="*"):
438
+ value_l = len(value)
439
+ display_l = min([value_l, display_l])
440
+ obfuscated = value[:display_l] + ((value_l - display_l) * token)
441
+ return obfuscated
442
+
443
+
444
+ def import_pip(name, package=None, default=None):
445
+ package = package or name
446
+ try:
447
+ module = __import__(name)
448
+ except ImportError:
449
+ try:
450
+ module = install_pip_s(package)
451
+ except Exception:
452
+ return default
453
+ try:
454
+ module = __import__(name)
455
+ except ImportError:
456
+ return default
457
+ return module
458
+
459
+
460
+ def ensure_pip(name, package=None, delayed=False):
461
+ package = package or name
462
+ try:
463
+ __import__(name)
464
+ except ImportError:
465
+ install_pip_s(package, delayed=delayed)
466
+
467
+
468
+ def install_pip(package, delayed=False, isolated=True, user=None):
469
+ try:
470
+ import pip
471
+
472
+ pip_internal = pip
473
+ finally:
474
+ pass
475
+ try:
476
+ import pip._internal
477
+
478
+ pip_internal = pip._internal
479
+ except ImportError:
480
+ pass
481
+ try:
482
+ import pip._internal.main
483
+
484
+ pip_internal = pip._internal.main
485
+ except ImportError:
486
+ pass
487
+ user = config.conf("PIP_USER", False, cast=bool)
488
+ args = ["install", package]
489
+ if hasattr(pip_internal, "main"):
490
+ pip_main = pip_internal.main
491
+ elif hasattr(pip, "main"):
492
+ pip_main = pip.main # @UndefinedVariable
493
+ else:
494
+ raise exceptions.OperationalError(message="pip not found")
495
+ if user:
496
+ args.insert(1, "--user")
497
+ if delayed:
498
+ process = multiprocessing.Process(target=pip_main, args=(args,))
499
+ process.start()
500
+ result = 0
501
+ elif isolated:
502
+ process = multiprocessing.Process(target=pip_main, args=(args,))
503
+ process.start()
504
+ process.join()
505
+ result = process.exitcode
506
+ else:
507
+ result = pip_main(args)
508
+ if result == 0:
509
+ return
510
+ raise exceptions.OperationalError(message="pip error, exit code (%d)" % result)
511
+
512
+
513
+ def install_pip_s(package, delayed=False):
514
+ try:
515
+ install_pip(package, delayed=delayed, user=False)
516
+ except exceptions.OperationalError:
517
+ install_pip(package, delayed=delayed, user=True)
518
+
519
+
520
+ def request_json(request=None, encoding="utf-8"):
521
+ # retrieves the proper request object, either the provided
522
+ # request or the default base request object and then in
523
+ # case the the JSON data is already in the request properties
524
+ # it is used (cached value) otherwise continues with the parse
525
+ request = request or common.base().get_request()
526
+ if "_data_j" in request.properties:
527
+ return request.properties["_data_j"]
528
+
529
+ # retrieves the current request data and tries to
530
+ # "load" it as JSON data, in case it fails gracefully
531
+ # handles the failure setting the value as an empty map
532
+ data = request.data
533
+ try:
534
+ is_bytes = legacy.is_bytes(data)
535
+ if is_bytes:
536
+ data = data.decode(encoding)
537
+ data_j = json.loads(data)
538
+ except Exception:
539
+ data_j = {}
540
+ request.properties["_data_j"] = data_j
541
+
542
+ # returns the JSON data object to the caller method so that it
543
+ # may be used as the parsed value (post information)
544
+ return data_j
545
+
546
+
547
+ def get_context(self):
548
+ """
549
+ Retrieves the "best" possible context object for the current
550
+ execution life-cycle, typically this should be an "attached"
551
+ request object.
552
+
553
+ Multiple strategies should be used while trying to retrieved
554
+ the "current" context.
555
+ """
556
+
557
+ # tries to retrieve the request attached to the current instance
558
+ # (typically a property) and verifies the object compliance,
559
+ # returning the object to the caller in case it's valid
560
+ if hasattr(self, "request"):
561
+ request = self.request
562
+ is_valid = hasattr(request, "is_mock") and not request.is_mock()
563
+ if is_valid:
564
+ return request
565
+
566
+ # uses the global strategy to try to retrieve a request for the
567
+ # current execution environment (not thread safe)
568
+ request = common.base().get_request()
569
+ is_valid = hasattr(request, "is_mock") and not request.is_mock()
570
+ if is_valid:
571
+ return request
572
+
573
+ # fallback return value meaning that it was not possible to retrieve
574
+ # any valid execution context for the current environment
575
+ return None
576
+
577
+
578
+ def get_object(object=None, alias=False, page=False, find=False, norm=True, **kwargs):
579
+ # retrieves the base request object that is going to be used in
580
+ # the construction of the object
581
+ request = common.base().get_request()
582
+
583
+ # verifies if the provided object is valid in such case creates
584
+ # a copy of it and uses it as the base object for validation
585
+ # otherwise used an empty map (form validation)
586
+ object = object and copy.copy(object) or {}
587
+
588
+ # retrieves the current request data and tries to
589
+ # "load" it as JSON data, in case it fails gracefully
590
+ # handles the failure setting the value as an empty map
591
+ data_j = request_json()
592
+
593
+ # uses all the values referencing data in the request to try
594
+ # to populate the object this way it may be constructed using
595
+ # any of theses strategies (easier for the developer)
596
+ for name, value in data_j.items():
597
+ object[name] = value
598
+ for name, value in request.files_s.items():
599
+ object[name] = value
600
+ for name, value in request.post_s.items():
601
+ object[name] = value
602
+ for name, value in request.params_s.items():
603
+ object[name] = value
604
+
605
+ # in case the alias flag is set tries to resolve the attribute
606
+ # alias and in case the find types are set converts the find
607
+ # based attributes using the currently defined mapping map
608
+ if alias:
609
+ resolve_alias(object)
610
+ if page:
611
+ page_types(object)
612
+ if find:
613
+ find_types(object)
614
+ if find:
615
+ find_defaults(object, kwargs)
616
+
617
+ # in case the normalization flag is set runs the normalization
618
+ # of the provided object so that sequences are properly handled
619
+ # as defined in the specification (this allows multiple references)
620
+ if norm:
621
+ norm_object(object)
622
+
623
+ # returns the constructed object to the caller method this object
624
+ # should be a structured representation of the data in the request
625
+ return object
626
+
627
+
628
+ def resolve_alias(object):
629
+ for name, value in legacy.eager(object.items()):
630
+ if not name in ALIAS:
631
+ continue
632
+ _alias = ALIAS[name]
633
+ object[_alias] = value
634
+ del object[name]
635
+
636
+
637
+ def page_types(object, size=50):
638
+ page = object.get("page", 1)
639
+ size = object.get("size", size)
640
+ sorter = object.get("sorter", None)
641
+ direction = object.get("direction", "descending")
642
+ page = int(page)
643
+ size = int(size)
644
+ offset = page - 1
645
+ object["skip"] = offset * size
646
+ object["limit"] = size
647
+ if sorter:
648
+ object["sort"] = "%s:%s" % (sorter, direction)
649
+
650
+
651
+ def find_types(object):
652
+ for name, value in legacy.eager(object.items()):
653
+ if not name in FIND_TYPES:
654
+ del object[name]
655
+ continue
656
+ find_type = FIND_TYPES[name]
657
+ object[name] = find_type(value)
658
+
659
+
660
+ def find_defaults(object, kwargs):
661
+ for name, value in legacy.iteritems(kwargs):
662
+ if name in object:
663
+ continue
664
+ if not name in FIND_TYPES:
665
+ continue
666
+ object[name] = value
667
+
668
+ for name, value in legacy.iteritems(FIND_DEFAULTS):
669
+ if name in object:
670
+ continue
671
+ object[name] = value
672
+
673
+
674
+ def norm_object(object):
675
+ # iterates over all the key value association in the
676
+ # object, trying to find the ones that refer sequences
677
+ # so that they may be normalized
678
+ for name, value in object.items():
679
+ # verifies if the current name references a sequence
680
+ # and if that's not the case continues the loop trying
681
+ # to find any other sequence based value
682
+ if not name.endswith("[]"):
683
+ continue
684
+
685
+ # removes the current reference to the name as the value
686
+ # is not in the valid structure and then normalizes the
687
+ # name by removing the extra sequence indication value
688
+ del object[name]
689
+ name = name[:-2]
690
+
691
+ # in case the current value is not valid (empty) the object
692
+ # is set with an empty list for the current iteration as this
693
+ # is considered to be the default value
694
+ if not value:
695
+ object[name] = []
696
+ continue
697
+
698
+ # retrieves the normalized and linearized list of leafs
699
+ # for the current value and ten verifies the size of each
700
+ # of its values and uses it to measure the number of
701
+ # dictionary elements that are going to be contained in
702
+ # the sequence to be "generated", then uses this (size)
703
+ # value to pre-generate the complete set of dictionaries
704
+ leafs_l = leafs(value)
705
+ first = leafs_l[0] if leafs_l else (None, [])
706
+ _fqn, values = first
707
+ size = len(values)
708
+ list = [dict() for _index in range(size)]
709
+
710
+ # sets the list of generates dictionaries in the object for
711
+ # the newly normalized name of structure
712
+ object[name] = list
713
+
714
+ # iterates over the complete set of key value pairs in the
715
+ # leafs list to gather the value into the various objects that
716
+ # are contained in the sequence (normalization process)
717
+ for _name, _value in leafs_l:
718
+ for index in range(size):
719
+ _object = list[index]
720
+ _name_l = _name.split(".")
721
+ set_object(_object, _name_l, _value[index])
722
+
723
+
724
+ def set_object(object, name_l, value):
725
+ """
726
+ Sets a composite value in an object, allowing for
727
+ dynamic setting of random size key values.
728
+
729
+ This method is useful for situations where one wants
730
+ to set a value at a randomly defined depth inside
731
+ an object without having to much work with the creation
732
+ of the inner dictionaries.
733
+
734
+ :type object: Dictionary
735
+ :param object: The target object that is going to be
736
+ changed and set with the target value.
737
+ :type name_l: List
738
+ :param name_l: The list of names that defined the fully
739
+ qualified name to be used in the setting of the value
740
+ for example path.to.end will be a three size list containing
741
+ each of the partial names.
742
+ :type value: Object
743
+ :param value: The value that is going to be set in the
744
+ defined target of the object.
745
+ """
746
+
747
+ # retrieves the first name in the names list this is the
748
+ # value that is going to be used for the current iteration
749
+ name = name_l[0]
750
+
751
+ # in case the length of the current names list has reached
752
+ # one this is the final iteration and so the value is set
753
+ # at the current naming point
754
+ if len(name_l) == 1:
755
+ object[name] = value
756
+
757
+ # otherwise this is a "normal" step and so a new map must
758
+ # be created/retrieved and the iteration step should be
759
+ # performed on this new map as it's set on the current naming
760
+ # place (recursion step)
761
+ else:
762
+ map = object.get(name, {})
763
+ object[name] = map
764
+ set_object(map, name_l[1:], value)
765
+
766
+
767
+ def leafs(object):
768
+ """
769
+ Retrieves a list containing a series of tuples that
770
+ each represent a leaf of the current object structure.
771
+
772
+ A leaf is the last element of an object that is not a
773
+ map, the other intermediary maps are considered to be
774
+ trunks and should be percolated recursively.
775
+
776
+ This is a recursive function that takes some memory for
777
+ the construction of the list, and so should be used with
778
+ the proper care to avoid bottlenecks.
779
+
780
+ :type object: Dictionary
781
+ :param object: The object for which the leafs list
782
+ structure is meant to be retrieved.
783
+ :rtype: List
784
+ :return: The list of leaf node tuples for the provided
785
+ object, as requested for each of the sequences.
786
+ """
787
+
788
+ # creates the list that will hold the various leaf nodes
789
+ # "gathered" by the current recursion function
790
+ leafs_l = []
791
+
792
+ # iterates over all the key and value relations in the
793
+ # object trying to find the leaf nodes (no map nodes)
794
+ # creating a tuple of fqn (fully qualified name) and value
795
+ for name, value in object.items():
796
+ # retrieves the data type for the current value and
797
+ # validation if it is a dictionary or any other type
798
+ # in case it's a dictionary a new iteration step must
799
+ # be performed retrieving the leafs of the value and
800
+ # then incrementing the name with the current prefix
801
+ value_t = type(value)
802
+ if value_t == dict:
803
+ _leafs = leafs(value)
804
+ _leafs = [(name + "." + _name, value) for _name, value in _leafs]
805
+ leafs_l.extend(_leafs)
806
+
807
+ # otherwise this is a leaf node and so the leaf tuple
808
+ # node must be constructed with the current value
809
+ # (properly validated for sequence presence)
810
+ else:
811
+ value_t = type(value)
812
+ if not value_t == list:
813
+ value = [value]
814
+ leafs_l.append((name, value))
815
+
816
+ # returns the list of leaf nodes that was "just" created
817
+ # to the caller method so that it may be used there
818
+ return leafs_l
819
+
820
+
821
+ def gather_errors(lazy_dict, resolve=True):
822
+ """
823
+ Function responsible for the iterative gathering of
824
+ lazy evaluation errors, allowing for a complete gathering
825
+ of error instead of a single evaluation.
826
+
827
+ :type lazy_dict: LazyDict
828
+ :param lazy_dict: The lazy dictionary that is going to be
829
+ percolated and evaluated sequentially.
830
+ :type resolve: bool
831
+ :param resolve: If the lazy dictionary values should be evaluated
832
+ even if they have already been eager loaded, by unsetting this value
833
+ there's a risk of not gathering all of the errors.
834
+ :rtype: Dictionary
835
+ :return: The final dictionary containing the complete set of
836
+ errors that have been found.
837
+ """
838
+
839
+ # creates the dictionary that is going to hold sequences of
840
+ # string based error indexed by parameter name
841
+ errors = dict()
842
+
843
+ # iterates over the complete set of keys in the lazy dictionary
844
+ # to evaluate the values and check if there are errors associated
845
+ for key in lazy_dict:
846
+ try:
847
+ _value = lazy_dict.__getitem__(key, resolve=resolve)
848
+ except (exceptions.AppierException, exceptions.BaseInternalError) as exception:
849
+ _errors = errors.get(key, [])
850
+ _errors.append(exception.message)
851
+ errors[key] = _errors
852
+
853
+ # returns the final dictionary of error (indexed by name) to
854
+ # the caller method so that it may be used for error handling
855
+ return errors
856
+
857
+
858
+ def gen_token(limit=None, hash=hashlib.sha256):
859
+ """
860
+ Generates a random cryptographic ready token according
861
+ to the framework specification, this is generated using
862
+ a truly random UUID based seed and hashed using the
863
+ provided hash digest strategy (SHA256 by default).
864
+
865
+ The resulting value is returned as an hexadecimal based
866
+ string according to the standard.
867
+
868
+ :type limit: int
869
+ :param limit: The maximum number of characters allowed
870
+ for the token to be generated.
871
+ :type hash: Function
872
+ :param hash: The hashing method that is going to be used
873
+ for the hash of the generated token, this should be compliant
874
+ with the base python hashing infra-structure.
875
+ :rtype: String
876
+ :return: The hexadecimal based string value
877
+ """
878
+
879
+ token_s = str(uuid.uuid4())
880
+ token_s = token_s.encode("utf-8")
881
+ token = hash(token_s).hexdigest()
882
+ if limit:
883
+ token = token[:limit]
884
+ return token
885
+
886
+
887
+ def html_to_text(data):
888
+ """
889
+ Converts the provided HTML textual data into a plain text
890
+ representation of it. This method uses a series of heuristics
891
+ for this conversion, and such conversion should not be considered
892
+ to be completely reliable.
893
+
894
+ The current implementation is not memory or processor efficient
895
+ and should be used carefully to avoid performance problems.
896
+
897
+ :type data: String
898
+ :param data: The HTML string of text that is going to be used for
899
+ the conversion into the plain text representation.
900
+ :rtype: String
901
+ :return: The approximate plain text representation to the provided
902
+ HTML contents.
903
+ """
904
+
905
+ data = data.strip()
906
+ data = data.replace("\n", "\r")
907
+
908
+ data = data.replace("&copy;", "Copyright")
909
+ data = data.replace("&middot;", "-")
910
+
911
+ result = re.findall(defines.BODY_REGEX, data)
912
+ data = result[0] if result else ""
913
+
914
+ data = defines.TAG_REGEX.sub("", data)
915
+
916
+ valid = []
917
+ lines = data.splitlines(False)
918
+ for line in lines:
919
+ line = line.strip()
920
+ if not line:
921
+ continue
922
+ valid.append(line)
923
+
924
+ data = "\n".join(valid)
925
+ data = data.replace("\n.", ".")
926
+ return data
927
+
928
+
929
+ def camel_to_underscore(camel, separator="_", lower=True):
930
+ """
931
+ Converts the provided camel cased based value into
932
+ a normalized underscore based string.
933
+
934
+ An optional lower parameter may be used to avoid the case
935
+ of the letters from being lower cased.
936
+
937
+ This is useful as most of the python string standards
938
+ are compliant with the underscore strategy.
939
+
940
+ :type camel: String
941
+ :param camel: The camel cased string that is going to be
942
+ converted into an underscore based string.
943
+ :type separator: String
944
+ :param separator: The separator token that is going to
945
+ be used in the camel to underscore conversion.
946
+ :type lower: bool
947
+ :param lower: If the letter casing should be changed while
948
+ convert the value from camel to underscore.
949
+ :rtype: String
950
+ :return: The underscore based string resulting from the
951
+ conversion of the provided camel cased one.
952
+ """
953
+
954
+ if not camel:
955
+ return camel
956
+ value = FIRST_CAP_REGEX.sub(r"\1" + separator + r"\2", camel)
957
+ value = ALL_CAP_REGEX.sub(r"\1" + separator + r"\2", value)
958
+ if lower:
959
+ value = value.lower()
960
+ return value
961
+
962
+
963
+ def camel_to_readable(camel, lower=False, capitalize=False):
964
+ """
965
+ Converts the given camel cased oriented string value
966
+ into a readable one meaning that the returned value
967
+ is a set of strings separated by spaces.
968
+
969
+ This method may be used to convert class names into
970
+ something that is readable by an end user.
971
+
972
+ :type camel: String
973
+ :param camel: The camel case string value that is going
974
+ to be used in the conversion into a readable string.
975
+ :type lower: bool
976
+ :param lower: If the camel based value should be lower
977
+ cased before the conversion to readable.
978
+ :type capitalize: bool
979
+ :param capitalize: If all of the words should be capitalized
980
+ or if instead only the first one should.
981
+ :rtype: String
982
+ :return: The final human readable string that may be
983
+ used to display a value to an end user.
984
+ """
985
+
986
+ if not camel:
987
+ return camel
988
+ underscore = camel_to_underscore(camel, lower=lower)
989
+ return underscore_to_readable(underscore, capitalize=capitalize)
990
+
991
+
992
+ def underscore_to_camel(underscore, lower=False):
993
+ """
994
+ Converts the provided underscore cased based value into
995
+ a normalized camel cased string.
996
+
997
+ An optional lower parameter may be provided to obtain a
998
+ lower came case version of the string.
999
+
1000
+ This is useful as most of the python string standards
1001
+ are compliant with the underscore strategy.
1002
+
1003
+ :type underscore: String
1004
+ :param underscore: The underscore cased string that is going to be
1005
+ converted into an camel case based string.
1006
+ :type lower: bool
1007
+ :param lower: If the the first letter of the resulting camel
1008
+ case string should be lower case (lower camel case).
1009
+ :rtype: String
1010
+ :return: The camel case based string resulting from the
1011
+ conversion of the provided underscore cased one.
1012
+ """
1013
+
1014
+ if not underscore:
1015
+ return underscore
1016
+ camel = underscore_to_readable(underscore, capitalize=True, separator="")
1017
+ if not lower:
1018
+ return camel
1019
+ return camel[0].lower() + camel[1:]
1020
+
1021
+
1022
+ def underscore_to_readable(underscore, capitalize=False, separator=" "):
1023
+ """
1024
+ Converts the given underscore oriented string value
1025
+ into a readable one meaning that the returned value
1026
+ is a set of strings separated by spaces.
1027
+
1028
+ This method may be used to class attributes into
1029
+ something that is readable by an end user.
1030
+
1031
+ :type camel: String
1032
+ :param camel: The underscore string value that is going
1033
+ to be used in the conversion into a readable string.
1034
+ :type capitalize: bool
1035
+ :param capitalize: If all of the words should be capitalized
1036
+ or if instead only the first one should.
1037
+ :type separator: String
1038
+ :param separator: The separator to be used to join the multiple
1039
+ parts of the resulting readable tokens.
1040
+ :rtype: String
1041
+ :return: The final human readable string that may be
1042
+ used to display a value to an end user.
1043
+ """
1044
+
1045
+ if not underscore:
1046
+ return underscore
1047
+ parts = underscore.split("_")
1048
+ parts = [part for part in parts if part]
1049
+ if capitalize:
1050
+ parts = [part[0].upper() + part[1:] for part in parts]
1051
+ else:
1052
+ parts[0] = parts[0][0].upper() + parts[0][1:]
1053
+ return separator.join(parts)
1054
+
1055
+
1056
+ def quote(value, *args, **kwargs):
1057
+ """
1058
+ Quotes the passed value according to the defined
1059
+ standard for URL escaping, the value is first encoded
1060
+ into the expected UTF-8 encoding as defined by standard.
1061
+
1062
+ This method should be used instead of a direct call to
1063
+ the equivalent call in the URL library.
1064
+
1065
+ :type value: String
1066
+ :param value: The string value that is going to be quoted
1067
+ according to the URL escaping scheme.
1068
+ :rtype: String
1069
+ :return: The quoted value according to the URL scheme this
1070
+ value may be safely used in urls.
1071
+ """
1072
+
1073
+ is_unicode = isinstance(value, legacy.UNICODE)
1074
+ if is_unicode:
1075
+ value = value.encode("utf-8")
1076
+ return legacy.quote(value, *args, **kwargs)
1077
+
1078
+
1079
+ def unquote(value, *args, **kwargs):
1080
+ """
1081
+ Unquotes the provided value according to the URL scheme
1082
+ the resulting value should be an unicode string representing
1083
+ the same value, the intermediary string value from the decoding
1084
+ should be an UTF-8 based value.
1085
+
1086
+ This method should be used instead of a direct call to
1087
+ the equivalent call in the URL library.
1088
+
1089
+ :type value: String
1090
+ :param value: The string value that is going to be unquoted
1091
+ according to the URL escaping scheme.
1092
+ :rtype: String
1093
+ :return: The unquoted value extracted as an unicode
1094
+ string that the represents the same value.
1095
+ """
1096
+
1097
+ value = legacy.unquote(value, *args, **kwargs)
1098
+ is_bytes = isinstance(value, legacy.BYTES)
1099
+ if is_bytes:
1100
+ value = value.decode("utf-8")
1101
+ return value
1102
+
1103
+
1104
+ def escape(value, char, escape="\\"):
1105
+ """
1106
+ Escapes the provided string value according to the requested
1107
+ target character(s) and escape value. Meaning that all the characters
1108
+ are going to be replaced by the escape plus character sequence.
1109
+
1110
+ :type value: String
1111
+ :param value: The string that is going to have the target characters
1112
+ escaped according to the escape character.
1113
+ :type char: String/List/Tuple
1114
+ :param char: The character(s) that is going to be "target" of escaping
1115
+ or a list of characters for escaping.
1116
+ :type escape: String
1117
+ :param escape: The character to be used for escaping (typically '\').
1118
+ :rtype: String
1119
+ :return: The final string with the target character properly escaped.
1120
+ """
1121
+
1122
+ if not isinstance(char, (list, tuple)):
1123
+ char = (char,)
1124
+ value = value.replace(escape, escape + escape)
1125
+ for _char in char:
1126
+ value = value.replace(_char, escape + _char)
1127
+ return value
1128
+
1129
+
1130
+ def unescape(value, escape="\\"):
1131
+ """
1132
+ Unescapes the provided string value using the provided escape
1133
+ character as the reference for the unescape operation.
1134
+
1135
+ This is considered to be a very expensive operation and so it
1136
+ should be used carefully.
1137
+
1138
+ :type value: String
1139
+ :param value: The string value that is going to be unescape.
1140
+ :rtype: String
1141
+ :return: The final unescaped value.
1142
+ """
1143
+
1144
+ result = []
1145
+ iterator = iter(value)
1146
+ for char in iterator:
1147
+ if char == escape:
1148
+ try:
1149
+ result.append(next(iterator))
1150
+ except StopIteration:
1151
+ result.append(escape)
1152
+ else:
1153
+ result.append(char)
1154
+ return "".join(result)
1155
+
1156
+
1157
+ def count_unescape(value, sub, escape="\\"):
1158
+ """
1159
+ Runs the sub string count operation on an escaped string
1160
+ so that it takes into account the escaped values avoiding
1161
+ them for the count operation.
1162
+
1163
+ :type value: String
1164
+ :param value: The base string value to have the number of
1165
+ occurrences of a sub string counted.
1166
+ :type sub: String
1167
+ :param sub: The sub string to be evaluated for occurrences,
1168
+ this value should be constrained to strings of single character.
1169
+ :type escape: String
1170
+ :param escape: The "special" escape character that will allow the
1171
+ delimiter to be also present in the choices selection.
1172
+ :rtype: int
1173
+ :return: The final count of occurrences of the sub string
1174
+ taking into account the proper escaping of the string.
1175
+ """
1176
+
1177
+ count = 0
1178
+ iterator = iter(value)
1179
+ for char in iterator:
1180
+ if char == escape:
1181
+ try:
1182
+ next(iterator)
1183
+ except StopIteration:
1184
+ pass
1185
+ elif char == sub:
1186
+ count += 1
1187
+ return count
1188
+
1189
+
1190
+ def split_unescape(value, delimiter=" ", max=-1, escape="\\", unescape=True):
1191
+ """
1192
+ Splits the provided string around the delimiter character that
1193
+ has been provided and allows proper escaping of it using the
1194
+ provided escape character.
1195
+
1196
+ This is considered to be a very expensive operation when compared
1197
+ to the simple split operation and so it should be used carefully.
1198
+
1199
+ :type value: String
1200
+ :param value: The string value that is going to be split around
1201
+ the proper delimiter value taking into account the escaping.
1202
+ :type delimiter: String
1203
+ :param delimiter: The delimiter character to be used in the split
1204
+ operation.
1205
+ :type max: int
1206
+ :param max: The maximum number of split operations that are going
1207
+ to be performed by this operation.
1208
+ :type escape: String
1209
+ :param escape: The "special" escape character that will allow the
1210
+ delimiter to be also present in the choices selection.
1211
+ :type unescape: bool
1212
+ :param unescape: If the final resulting string should be already
1213
+ unescaped (normalized).
1214
+ :rtype: List
1215
+ :return: The final list containing the multiple string parts separated
1216
+ by the delimiter character and respecting the escape sequences.
1217
+ """
1218
+
1219
+ result = []
1220
+ current = []
1221
+ iterator = iter(value)
1222
+ count = 0
1223
+ for char in iterator:
1224
+ if char == escape:
1225
+ try:
1226
+ if not unescape:
1227
+ current.append(escape)
1228
+ current.append(next(iterator))
1229
+ except StopIteration:
1230
+ if unescape:
1231
+ current.append(escape)
1232
+ elif char == delimiter and not count == max:
1233
+ result.append("".join(current))
1234
+ current = []
1235
+ count += 1
1236
+ else:
1237
+ current.append(char)
1238
+ result.append("".join(current))
1239
+ return result
1240
+
1241
+
1242
+ def call_safe(callable, *args, **kwargs):
1243
+ """
1244
+ Method used to call a callable object using a "safe" approach,
1245
+ meaning that each of its keyword arguments will be validated
1246
+ for existence in the target callable definition.
1247
+
1248
+ In case the validation of the keyword argument fails the same
1249
+ argument is removed from the map of keyword arguments.
1250
+
1251
+ Note that in case the wildcard based kwargs value exists in
1252
+ the callable definition the callable is immediately considered
1253
+ to be valid and the call is ran.
1254
+
1255
+ :type callable: Callable
1256
+ :callable callable: The callable that is going to have the keyword
1257
+ based arguments validated and the get called.
1258
+ :rtype: object
1259
+ :return: The resulting value from the safe call of the provided
1260
+ callable, this may have any data type.
1261
+ """
1262
+
1263
+ # retrieves the arguments specification to the provided callable
1264
+ # and retrieves the various argument names and the existence or
1265
+ # not of the wildcard kwargs value in the callable and in case it
1266
+ # exists runs the callable call immediately
1267
+ argspec = legacy.getargspec(callable)
1268
+ method_args = argspec[0]
1269
+ method_kwargs = argspec[2]
1270
+ if method_kwargs:
1271
+ return callable(*args, **kwargs)
1272
+
1273
+ # iterates over the complete set of keyword based arguments to be
1274
+ # used in the call and validates them against the method specification
1275
+ # in case they do not exist in the specification deletes them from
1276
+ # the map of keyword based arguments (not going to be sent)
1277
+ for name in legacy.keys(kwargs):
1278
+ if name in method_args:
1279
+ continue
1280
+ del kwargs[name]
1281
+
1282
+ # runs the callable with the "remaining" arguments and keyword arguments
1283
+ # returning the value to the caller method
1284
+ return callable(*args, **kwargs)
1285
+
1286
+
1287
+ def base_name(name, suffix="_controller"):
1288
+ """
1289
+ Retrieves the base name of a class name that contains
1290
+ a suffix (eg: controller) the resulting value is the
1291
+ underscore version of the name without the suffix.
1292
+
1293
+ This method provides an easy way to expose class names
1294
+ in external environments.
1295
+
1296
+ :type name: String
1297
+ :param name: The name from which the base name will be
1298
+ extracted and treated.
1299
+ :type suffix: String
1300
+ :param suffix: The optional suffix value that if sent will
1301
+ be removed from the last part of the name string.
1302
+ :rtype: String
1303
+ :return: The resulting base name for the provided name, treated
1304
+ and with the suffix removed (in case it exists).
1305
+ """
1306
+
1307
+ suffix_l = len(suffix)
1308
+ name = camel_to_underscore(name)
1309
+ if name.endswith(suffix):
1310
+ name = name[: suffix_l * -1]
1311
+ return name
1312
+
1313
+
1314
+ def base_name_m(name, suffixes=("_controller", "_part", "_app")):
1315
+ """
1316
+ Multiple iteration version of the base name function that provides
1317
+ a simple strategy for the retrieval of a "base name" without the
1318
+ complete set of provided suffixes attached to the value.
1319
+
1320
+ :type name: String
1321
+ :param name: The name from which the base name will be
1322
+ extracted and treated, with multiple value strategy.
1323
+ :type suffixes: List/Tuple
1324
+ :param suffixes: The complete set of suffixes that are going
1325
+ to be removed from the provided value creating the base name.
1326
+ :rtype: String
1327
+ :return: The resulting base name for the provided name, treated
1328
+ and without the complete set of provided suffixes.
1329
+ """
1330
+
1331
+ for suffix in suffixes:
1332
+ name = base_name(name, suffix=suffix)
1333
+ return name
1334
+
1335
+
1336
+ def is_content_type(data, target):
1337
+ """
1338
+ Verifies if the any of the provided mime types (target) is
1339
+ valid for the provided content type string.
1340
+
1341
+ :type data: String
1342
+ :param data: The content type string to be parsed and matched
1343
+ against the target mime type values.
1344
+ :type target: Tuple/String
1345
+ :param target: The tuple containing the multiple mime type values
1346
+ to be verified against the content type mime strings.
1347
+ :rtype: bool
1348
+ :return: If any of the provided mime types is considered valid
1349
+ for the content type.
1350
+ """
1351
+
1352
+ if not isinstance(target, (list, tuple)):
1353
+ target = (target,)
1354
+ mime, _extra = parse_content_type(data)
1355
+ for item in target:
1356
+ type, _sub_type = item.split("/")
1357
+ wildcard = type + "/*"
1358
+ if item in mime:
1359
+ return True
1360
+ if wildcard in mime:
1361
+ return True
1362
+ return False
1363
+
1364
+
1365
+ def parse_content_type(data):
1366
+ """
1367
+ Parses the provided content type string retrieving both the multiple
1368
+ mime types associated with the resource and the extra key to value
1369
+ items associated with the string in case they are defined (it's optional).
1370
+
1371
+ :type data: String
1372
+ :param data: The content type data that is going to be parsed to
1373
+ obtain the structure of values for the content type string, this must
1374
+ be a plain unicode string and not a binary string.
1375
+ :rtype: Tuple
1376
+ :return: The sequence of mime types of the the content and the multiple
1377
+ extra values associated with the content type (eg: charset, boundary, etc.)
1378
+ """
1379
+
1380
+ # creates the list of final normalized mime types and the
1381
+ # dictionary to store the extra values.
1382
+ types = []
1383
+ extra_m = dict()
1384
+
1385
+ # in case no valid type has been sent returns the values
1386
+ # immediately to avoid further problems
1387
+ if not data:
1388
+ return types, extra_m
1389
+
1390
+ # extracts the mime and the extra parts from the data string
1391
+ # they are the basis of the processing method
1392
+ data = data.strip(";")
1393
+ parts = data.split(";")
1394
+ mime = parts[0]
1395
+ extra = parts[1:]
1396
+ mime = mime.strip()
1397
+
1398
+ # runs a series of verifications on the base mime value and in
1399
+ # case it's not valid returns the default values immediately
1400
+ if not "/" in mime:
1401
+ return types, extra_m
1402
+
1403
+ # strips the complete set of valid extra values, note
1404
+ # that these values are going to be processed as key
1405
+ # to value items
1406
+ extra = [value.strip() for value in extra if extra]
1407
+
1408
+ # splits the complete mime type into its type and sub
1409
+ # type components (first step of normalization)
1410
+ type, sub_type = mime.split("/", 1)
1411
+ sub_types = sub_type.split("+")
1412
+
1413
+ # iterates over the complete set of sub types to
1414
+ # create the full mime type for each of them and
1415
+ # add the new full items to the types list (normalization)
1416
+ for sub_type in sub_types:
1417
+ types.append(type + "/" + sub_type)
1418
+
1419
+ # goes through all of the extra key to value items
1420
+ # and converts them into proper dictionary values
1421
+ for extra_item in extra:
1422
+ if not "=" in extra_item:
1423
+ continue
1424
+ extra_item = extra_item.strip()
1425
+ key, value = extra_item.split("=")
1426
+ extra_m[key] = value
1427
+
1428
+ # returns the final tuple containing both the normalized
1429
+ # mime types for the content and the extra key to value items
1430
+ return types, extra_m
1431
+
1432
+
1433
+ def parse_cookie(data):
1434
+ """
1435
+ Parses/interprets the provided cookie data string, returning a
1436
+ map structure containing key to value associations of the various
1437
+ parts of the cookie.
1438
+
1439
+ In case no key value association exists for the cookie the value
1440
+ for such cookie (key) is stored and an empty string (unset).
1441
+
1442
+ :type data: String
1443
+ :param data: The cookie serialized data that is going to be parsed
1444
+ in order to create the final cookie dictionary/map.
1445
+ :rtype: Dictionary
1446
+ :return: The final map containing key the value association for the
1447
+ various parts of the provided cookie string.
1448
+ """
1449
+
1450
+ # creates the dictionary that is going to hold the various cookie
1451
+ # key to value associations parsed from the "raw" data
1452
+ cookie_m = dict()
1453
+
1454
+ # splits the data information around the proper cookie separator
1455
+ # and then iterates over each of the cookies to set them in the
1456
+ # final cookie map (with the key to value associations)
1457
+ cookies = [cookie.strip() for cookie in data.split(";")]
1458
+ for cookie in cookies:
1459
+ if not "=" in cookie:
1460
+ cookie += "="
1461
+ name, value = cookie.split("=", 1)
1462
+ cookie_m[name] = value
1463
+
1464
+ # returns the final map of cookies to the caller method so that
1465
+ # proper and easy access is possible to the cookie
1466
+ return cookie_m
1467
+
1468
+
1469
+ def parse_multipart(data, boundary):
1470
+ """
1471
+ Parses the provided data buffer as a set of multipart data
1472
+ the content type is not verified inside this method.
1473
+
1474
+ The function returns a tuple containing both a map of "basic"
1475
+ form parameters, a map containing the set of file tuples and
1476
+ a sequence containing the name and values tuples in order.
1477
+
1478
+ :type data: String
1479
+ :param data: The string containing the complete set of data
1480
+ that is going to be processed as multipart.
1481
+ :type boundary: String
1482
+ :param boundary: The string containing the basic boundary header
1483
+ value, should be provided from the caller function.
1484
+ :rtype: Tuple
1485
+ :return: A tuple containing both the map of post attributes,
1486
+ the map of file attributes and a list with the various name and
1487
+ value tuples (to be able to access ordered values).
1488
+ """
1489
+
1490
+ ordered = []
1491
+ ordered_m = dict()
1492
+ post = dict()
1493
+ files = dict()
1494
+
1495
+ boundary = str(boundary)
1496
+ boundary = boundary.strip()
1497
+ boundary_base = "--" + boundary[9:].strip('"')
1498
+ boundary_value = legacy.bytes(boundary_base + "\r\n")
1499
+ boundary_extra = legacy.bytes(boundary_base + "--" + "\r\n")
1500
+ boundary_extra_l = len(boundary_extra)
1501
+ parts = data.split(boundary_value)
1502
+ parts[-1] = parts[-1][: boundary_extra_l * -1]
1503
+
1504
+ # iterates over the complete set of parts in the multi part payload
1505
+ # to process them and add them to the appropriate dictionary and list
1506
+ for part in parts:
1507
+ # in case the current part is not valid or empty skips the
1508
+ # current cycle (nothing to be done)
1509
+ if not part:
1510
+ continue
1511
+
1512
+ # splits the current part around the beginning of part sequence
1513
+ # and retrieves the proper contents if they exist
1514
+ part_s = part.split(b"\r\n\r\n", 1)
1515
+ headers = part_s[0]
1516
+ if len(part_s) > 1:
1517
+ contents = part_s[1]
1518
+ else:
1519
+ contents = None
1520
+
1521
+ # strips the current headers string and then splits it around
1522
+ # the various lines that define the various headers
1523
+ headers_data = headers.strip()
1524
+ headers_lines = headers_data.split(b"\r\n")
1525
+
1526
+ # creates the initial headers map of the headers that contains
1527
+ # the association between the byte based key and the data value
1528
+ # then retrieves the tuple of values and resets the map as it's
1529
+ # going to be changed and normalized with the new values
1530
+ headers = dict([line.split(b":", 1) for line in headers_lines])
1531
+ headers_t = legacy.eager(headers.items())
1532
+ headers.clear()
1533
+
1534
+ # runs the normalization process using the header tuples, this
1535
+ # should create a map of headers with the key as a normal string
1536
+ # and the values encoded as byte based strings (contain data)
1537
+ # note that the headers are defined
1538
+ for key, value in headers_t:
1539
+ key = legacy.str(key).lower()
1540
+ value = value.strip()
1541
+ headers[key] = value
1542
+
1543
+ # tries to retrieve the content disposition header for the current
1544
+ # part and in case there's none it's not possible to process the
1545
+ # current part (this header is considered required)
1546
+ disposition = headers.get("content-disposition", None)
1547
+ if not disposition:
1548
+ continue
1549
+
1550
+ # creates the dictionary that will hold the various parts of the
1551
+ # content disposition header that are going to be extracted for
1552
+ # latter processing, this is required to make some decisions on
1553
+ # the type of part that is currently being processed
1554
+ parts = dict()
1555
+ parts_data = disposition.split(b";")
1556
+ for value in parts_data:
1557
+ value_s = value.split(b"=", 1)
1558
+ key = legacy.str(value_s[0]).strip().lower()
1559
+ if len(value_s) > 1:
1560
+ value = value_s[1].strip()
1561
+ else:
1562
+ value = None
1563
+ parts[key] = value
1564
+
1565
+ # retrieves the various characteristics values from the headers
1566
+ # and from the content disposition of the current part, these
1567
+ # values are going to be used to decide on whether the current
1568
+ # part is a file or a normal key value attribute
1569
+ content_type = headers.get("content-type", None)
1570
+ name = parts.get("name", b'"undefined"').strip(b'"')
1571
+ filename = parts.get("filename", b"").strip(b'"')
1572
+
1573
+ # decodes the various content disposition values into an unicode
1574
+ # based string so that may be latter be used safely inside the
1575
+ # application environment(as expected by the current structure)
1576
+ if content_type:
1577
+ content_type = content_type.decode("utf-8")
1578
+ name = name.decode("utf-8")
1579
+ filename = filename.decode("utf-8")
1580
+
1581
+ # in case the currently discovered contents are valid they
1582
+ # must be stripped from the last two bytes so that the real
1583
+ # value is retrieved from the provided contents
1584
+ contents = contents if contents == None else contents[:-2]
1585
+
1586
+ # verifies if the file name is included in the parts unpacked
1587
+ # from the content type in case it does this is considered to be
1588
+ # file part otherwise it's a normal key value part
1589
+ if "filename" in parts:
1590
+ is_file = True
1591
+ else:
1592
+ is_file = False
1593
+
1594
+ if is_file:
1595
+ target = files
1596
+ file_tuple = (filename, content_type, contents)
1597
+ value = FileTuple(file_tuple)
1598
+ else:
1599
+ target = post
1600
+ value = contents if contents == None else contents.decode("utf-8")
1601
+
1602
+ exists = name in ordered_m
1603
+
1604
+ sequence = target.get(name, [])
1605
+ sequence.append(value)
1606
+ target[name] = sequence
1607
+
1608
+ sequence_o = ordered_m.get(name, [])
1609
+ sequence_o.append(value)
1610
+ ordered_m[name] = sequence_o
1611
+
1612
+ if exists:
1613
+ continue
1614
+
1615
+ tuple_s = (name, sequence_o)
1616
+ ordered.append(tuple_s)
1617
+
1618
+ return (post, files, ordered)
1619
+
1620
+
1621
+ def decode_params(params):
1622
+ """
1623
+ Decodes the complete set of parameters defined in the
1624
+ provided map so that all of keys and values are created
1625
+ as unicode strings instead of UTF-8 based strings.
1626
+
1627
+ This method's execution is mandatory on the retrieval of
1628
+ the parameters from the sent data.
1629
+
1630
+ :type params: Dictionary
1631
+ :param params: The map containing the encoded set of values
1632
+ that are going to be decoded from the UTF-8 form.
1633
+ :rtype: Dictionary
1634
+ :return: The decoded map meaning that all the keys and values
1635
+ are in the unicode form instead of the string form.
1636
+ """
1637
+
1638
+ # creates the dictionary that will hold the processed/decoded
1639
+ # sequences of parameters created from the provided (and original)
1640
+ # map of encoded parameters (raw values)
1641
+ _params = dict()
1642
+
1643
+ for key, value in params.items():
1644
+ items = []
1645
+ for item in value:
1646
+ is_bytes = legacy.is_bytes(item)
1647
+ if is_bytes:
1648
+ item = item.decode("utf-8")
1649
+ items.append(item)
1650
+ is_bytes = legacy.is_bytes(key)
1651
+ if is_bytes:
1652
+ key = key.decode("utf-8")
1653
+ _params[key] = items
1654
+
1655
+ return _params
1656
+
1657
+
1658
+ def load_form(form):
1659
+ # creates the map that is going to hold the "structured"
1660
+ # version of the form with key value associations
1661
+ form_s = dict()
1662
+
1663
+ # iterates over all the form items to parse their values
1664
+ # and populate the form structured version of it, note that
1665
+ # for the sake of parsing the order of the elements in the
1666
+ # form is relevant, in case there's multiple values for the
1667
+ # same name they are considered as a list, otherwise they are
1668
+ # considered as a single value
1669
+ for name in form:
1670
+ # retrieves the value (as a list) for the current name, then
1671
+ # in case the sequence is larger than one element sets it,
1672
+ # otherwise retrieves and sets the value as the first element
1673
+ value = form[name]
1674
+ value = (
1675
+ value[0] if isinstance(value, (list, tuple)) and len(value) == 1 else value
1676
+ )
1677
+
1678
+ # splits the complete name into its various components
1679
+ # and retrieves both the final (last) element and the
1680
+ # various partial elements from it
1681
+ names = name.split(".")
1682
+ final = names[-1]
1683
+ partials = names[:-1]
1684
+
1685
+ # sets the initial "struct" reference as the form structured
1686
+ # that has just been created (initial structure for iteration)
1687
+ # then starts the iteration to retrieve or create the various
1688
+ # intermediate structures
1689
+ struct = form_s
1690
+ for _name in partials:
1691
+ _struct = struct.get(_name, {})
1692
+ struct[_name] = _struct
1693
+ struct = _struct
1694
+
1695
+ # sets the current value in the currently loaded "struct" element
1696
+ # so that the reference gets properly updated
1697
+ struct[final] = value
1698
+
1699
+ # retrieves the final "normalized" form structure containing
1700
+ # a series of chained maps resulting from the parsing of the
1701
+ # linear version of the attribute names
1702
+ return form_s
1703
+
1704
+
1705
+ def check_login(self, token=None, request=None):
1706
+ # tries to retrieve the request from the current context
1707
+ # in case it has not been passed through other manner
1708
+ request = request or (self.request if self else None)
1709
+
1710
+ # retrieves the data type of the token and creates the
1711
+ # tokens sequence value taking into account its type
1712
+ if isinstance(token, SEQUENCE_TYPES):
1713
+ tokens = token
1714
+ else:
1715
+ tokens = (token,)
1716
+
1717
+ # in case the username value is set in session and there's
1718
+ # no token to be validated returns valid and in case the checking
1719
+ # of the complete set of tokens is valid also returns valid
1720
+ if check_user(self, request=request) and not token:
1721
+ return True
1722
+ if check_tokens(self, tokens, request=request):
1723
+ return True
1724
+
1725
+ # returns the default value as invalid because if all the
1726
+ # validation procedures have failed the check is invalid
1727
+ return False
1728
+
1729
+
1730
+ def check_user(self, request=None):
1731
+ # tries to retrieve the reference to the current request
1732
+ # either from the provided arguments or from the current context
1733
+ request = request or (self.request if self else None)
1734
+
1735
+ # runs the multiple verification strategies available an
1736
+ # in case at least one of them succeeds the user is considered
1737
+ # to be currently authenticated
1738
+ if request and "username" in request.session:
1739
+ return True
1740
+ if request and hasattr(request, "tokens_p"):
1741
+ return True
1742
+
1743
+ # by default the user is considered to be not authenticated, all
1744
+ # of the tests for authentication have failed
1745
+ return False
1746
+
1747
+
1748
+ def check_token(self, token, tokens_m=None, request=None):
1749
+ # in case the provided token is invalid or empty the method
1750
+ # return immediately in success (simple validation)
1751
+ if not token:
1752
+ return True
1753
+
1754
+ # tries to retrieve the tokens map from the provided argument
1755
+ # defaulting to the session one in case none is provided
1756
+ if tokens_m == None:
1757
+ tokens_m = get_tokens_m(self, request=request)
1758
+
1759
+ # splits the provided token string into its parts, note that
1760
+ # a namespace is defined around the dot character
1761
+ token_l = token.split(".")
1762
+
1763
+ # iterates over the complete set of parts in the token list
1764
+ # of parts to validate the complete chain of values against
1765
+ # the map of token parts (namespace validation)
1766
+ for token_p in token_l:
1767
+ if not isinstance(tokens_m, dict):
1768
+ return False
1769
+ if "*" in tokens_m and tokens_m["*"] == True:
1770
+ return True
1771
+ if not token_p in tokens_m:
1772
+ return False
1773
+ tokens_m = tokens_m[token_p]
1774
+
1775
+ # determines if the final tokens map value is a dictionary
1776
+ # and "selects" the proper validation result accordingly
1777
+ is_dict = isinstance(tokens_m, dict)
1778
+ result = tokens_m.get("_", False) if is_dict else tokens_m
1779
+
1780
+ # verifies if the "final" result value is valid and returns
1781
+ # the final validation result accordingly
1782
+ return True if result == True else False
1783
+
1784
+
1785
+ def check_tokens(self, tokens, tokens_m=None, request=None):
1786
+ # iterates over the complete set of tokens that are going
1787
+ # to be validated against the current context and if any of
1788
+ # them fails an invalid result is returned otherwise a valid
1789
+ # result is returned (indicating that all is valid)
1790
+ for token in tokens:
1791
+ if not check_token(self, token, tokens_m=tokens_m, request=request):
1792
+ return False
1793
+ return True
1794
+
1795
+
1796
+ def ensure_login(self, token=None, context=None, request=None):
1797
+ request = request or (self.request if self else None)
1798
+ is_auth = check_user(self, request=request)
1799
+ if not is_auth:
1800
+ raise exceptions.AppierException(
1801
+ message="User not authenticated", code=403, token=token, context=context
1802
+ )
1803
+ if check_token(self, token, request=request):
1804
+ return
1805
+ raise exceptions.AppierException(
1806
+ message="Not enough permissions", code=403, token=token, context=context
1807
+ )
1808
+
1809
+
1810
+ def get_tokens_m(self, request=None, set=None):
1811
+ """
1812
+ Retrieves the map of tokens from the current session so that
1813
+ they can be used for proper ACL validation.
1814
+
1815
+ In case the current session contains a sequence based representation
1816
+ of the tokens they are converted to their equivalent map value.
1817
+
1818
+ :type request: Request
1819
+ :param request: The request that is going to be used to access
1820
+ the session information, if any.
1821
+ :type set: bool
1822
+ :param set: If the possibly converted tokens list should be persisted
1823
+ into the current session, sparing some CPU cycles on next execution,
1824
+ in case no value is provided a default value is applied taking into
1825
+ account the current execution context.
1826
+ :rtype: Dictionary
1827
+ :return: The map of tokens to be used for ACL validation.
1828
+ """
1829
+
1830
+ # tries to retrieve the request from the current context
1831
+ # in case it has not been passed through other manner, if
1832
+ # no valid context is found returns invalid value immediately
1833
+ request = request or (self.request if self else None)
1834
+ if not request:
1835
+ return dict()
1836
+
1837
+ # verifies if the set flag is set and if that's not the case
1838
+ # ensures proper default value taking into account if there's
1839
+ # a token "provider method" defined or not
1840
+ if set == None:
1841
+ set = False if hasattr(request, "tokens_p") else True
1842
+
1843
+ # tries to retrieve the "provider method "for the tokens under the
1844
+ # current request an in case it's not available used the default
1845
+ # one (simple session access)
1846
+ try:
1847
+ if hasattr(request, "tokens_p"):
1848
+ tokens_m = request.tokens_p()
1849
+ else:
1850
+ tokens_m = request.session.get("tokens", {})
1851
+ except Exception:
1852
+ return dict()
1853
+
1854
+ # verifies if the resulting value is either a map or a sequence,
1855
+ # going to be used for decisions on normalization
1856
+ is_map = isinstance(tokens_m, dict)
1857
+ is_sequence = isinstance(tokens_m, (list, tuple))
1858
+
1859
+ # if the tokens value is already a map then an immediate return
1860
+ # is going to be performed (it is a valid tokens map)
1861
+ if is_map:
1862
+ return tokens_m
1863
+
1864
+ # in case the value present in the tokens value is a sequence
1865
+ # it must be properly converted into the equivalent map value
1866
+ if is_sequence:
1867
+ # converts the tokens sequence into a map version of it
1868
+ # so that proper structured verification is possible
1869
+ tokens_m = to_tokens_m(tokens_m)
1870
+
1871
+ # in case the set flag is set the tokens map should
1872
+ # be set in the request session (may be dangerous)
1873
+ # and then returns the tokens map to the caller method
1874
+ if set:
1875
+ request.session["tokens"] = tokens_m
1876
+ return tokens_m
1877
+
1878
+ # returns the "default" empty tokens map as it was not possible
1879
+ # to retrieve any information regarding tokens from the
1880
+ # current context and environment
1881
+ return dict()
1882
+
1883
+
1884
+ def to_tokens_m(tokens):
1885
+ # creates a new map to be used to store tokens map that is
1886
+ # going to be created from the list/sequence version
1887
+ tokens_m = dict()
1888
+
1889
+ # iterates over the complete set of tokens in the
1890
+ # sequence to properly add their namespace parts
1891
+ # to the tokens map (as specified)
1892
+ for token in tokens:
1893
+ tokens_c = tokens_m
1894
+ token_l = token.split(".")
1895
+ head, tail = token_l[:-1], token_l[-1]
1896
+
1897
+ for token_p in head:
1898
+ current = tokens_c.get(token_p, {})
1899
+ is_dict = isinstance(current, dict)
1900
+ if not is_dict:
1901
+ current = {"_": current}
1902
+ tokens_c[token_p] = current
1903
+ tokens_c = current
1904
+
1905
+ leaf = tokens_c.get(tail, None)
1906
+ if leaf and isinstance(leaf, dict):
1907
+ leaf["_"] = True
1908
+ else:
1909
+ tokens_c[tail] = True
1910
+
1911
+ # returns the final map version of the token to the caller
1912
+ # method so that it may be used for structure verification
1913
+ return tokens_m
1914
+
1915
+
1916
+ def dict_merge(first, second, override=True, recursive=False, callback=None):
1917
+ """
1918
+ Merges two dictionaries, optionally using a deep (recursive)
1919
+ strategy to achieve the merge.
1920
+
1921
+ The default "way" of the merge is from the second to the first
1922
+ and overriding the values of the first dictionary.
1923
+
1924
+ :type first: Dictionary
1925
+ :param first: The target dictionary of the merge operation and
1926
+ that will have its contents overriden if requested.
1927
+ :type second: Dictionary
1928
+ :param second: The base dictionary of the merge that will be
1929
+ "copied" into the first one.
1930
+ :type override: bool
1931
+ :param override: If the contents of the first dictionary should
1932
+ be overriden (overwritten) in case of "collision".
1933
+ :type recursive: bool
1934
+ :param recursive: If the merge operation should be performed using
1935
+ a deep and recursive approach for dictionary types.
1936
+ :type callback: Function
1937
+ :param callback: Optional function to to be called in case there's
1938
+ a conflicting value for the same key with both the first and second
1939
+ values to be merged, allowing control over merge operations, this
1940
+ is only used in case of a recursive approach.
1941
+ :rtype: Dictionary
1942
+ :return: The resulting dictionary (new instance) from the merge
1943
+ operation of the second dictionary into the first.
1944
+ """
1945
+
1946
+ # in case no override exists then the order of the items is
1947
+ # exchanged so that the first overrides the second values
1948
+ # and not the exact opposite
1949
+ if not override:
1950
+ first, second = second, first
1951
+
1952
+ # in case the recursive flag is set, must iterate over all
1953
+ # of the first items to try to merge any possible dictionary
1954
+ # value using a recursive strategy
1955
+ if recursive:
1956
+ # creates the dictionary that is going to store the final
1957
+ # merged value resulting from both dictionaries
1958
+ final = dict()
1959
+
1960
+ # runs the main iteration cycles around the first dictionary
1961
+ # trying to find possible conflicts that would required a
1962
+ # smarter merge strategy
1963
+ for key, value in legacy.iteritems(first):
1964
+ # in case the current key is not present in the second
1965
+ # dictionary (there's no conflict) and so a simple set
1966
+ # strategy should be applied
1967
+ if not key in second:
1968
+ final[key] = value
1969
+ continue
1970
+
1971
+ # grabs the other (second) value that is going to be used
1972
+ # as the basis for the merge operation
1973
+ other = second[key]
1974
+
1975
+ # in case a callback is defined calls it to determine the
1976
+ # final merged value from both the original and the other
1977
+ if callback:
1978
+ final[key] = callback(value, other)
1979
+
1980
+ # if it represents a dictionary (smart merge) then both
1981
+ # values are going to be merged recursively
1982
+ elif isinstance(value, dict) and isinstance(other, dict):
1983
+ if not override:
1984
+ value, other = other, value
1985
+ final[key] = dict_merge(
1986
+ value, other, override=override, recursive=recursive
1987
+ )
1988
+
1989
+ # otherwise the previous value is simply replaced with the
1990
+ # the other value, (fallback operation) this is considered
1991
+ # to be a non smart merge operation
1992
+ else:
1993
+ final[key] = other
1994
+
1995
+ # runs the final iteration cycles around the second dictionary
1996
+ # values to try to set the unique second values in the final
1997
+ for key, value in legacy.iteritems(second):
1998
+ if key in final:
1999
+ continue
2000
+ final[key] = value
2001
+
2002
+ # returns the final merged result to the caller method, this
2003
+ # result should contain all of its dictionary values properly
2004
+ # merged within both the first and second values
2005
+ return final
2006
+
2007
+ # otherwise (uses a simple strategy) and creates a new dictionary
2008
+ # for the first value, then updates it with the second set of
2009
+ # dictionary values, returning then the newly created dictionary
2010
+ # to the caller method (basic update strategy)
2011
+ else:
2012
+ final = dict(first)
2013
+ final.update(second)
2014
+ return final
2015
+
2016
+
2017
+ def deprecated(message="Function %s is now deprecated"):
2018
+ """
2019
+ Decorator that marks a certain function or method as
2020
+ deprecated so that whenever such function is called
2021
+ an output messaged warns the developer about the
2022
+ deprecation (incentive).
2023
+
2024
+ :type message: String
2025
+ :param message: The message template to be used in the
2026
+ output operation of the error.
2027
+ :rtype: Decorator
2028
+ :return: The decorator that should be used to wrap a
2029
+ function and mark it as deprecated (send warning).
2030
+ """
2031
+
2032
+ def decorator(function):
2033
+ name = function.__name__ if hasattr(function, "__name__") else None
2034
+
2035
+ @functools.wraps(function)
2036
+ def interceptor(*args, **kwargs):
2037
+ warnings.simplefilter("always", DeprecationWarning)
2038
+ warnings.warn(message % name, category=DeprecationWarning, stacklevel=2)
2039
+ warnings.simplefilter("default", DeprecationWarning)
2040
+ return function(*args, **kwargs)
2041
+
2042
+ return interceptor
2043
+
2044
+ return decorator
2045
+
2046
+
2047
+ def cached(function):
2048
+ """
2049
+ Decorator that marks a certain function as cached meaning that
2050
+ the local context of the instance associated with the function
2051
+ (method) is going to be used to store the result and further
2052
+ requests to the function will use the cached result, resulting
2053
+ in an improved resolution time.
2054
+
2055
+ The life-cycle of the context is critical to avoid issues with
2056
+ invalid cache invalidation.
2057
+
2058
+ :rtype: Decorator
2059
+ :return: The decorator that should be used to wrap a function
2060
+ marking it as ready to cache it's return value on current context.
2061
+ """
2062
+
2063
+ name = function.__name__
2064
+
2065
+ @functools.wraps(function)
2066
+ def _cached(self, *args, **kwargs):
2067
+ # tries to retrieve the current execution context, most
2068
+ # of the times this should be a request object for the
2069
+ # current temporary execution life-cycle
2070
+ context = get_context(self)
2071
+
2072
+ # retrieves the properties map (if possible) and then
2073
+ # verifies the existence or not of the name in such map
2074
+ # returning the value immediately if it's cached
2075
+ properties = context.properties if context else None
2076
+ exists = name in properties if properties else False
2077
+ if exists:
2078
+ return properties[name]
2079
+
2080
+ # as no cache retrieval was possible executes the function
2081
+ # operation and caches the resulting value into the properties
2082
+ # map (in case it exists)
2083
+ value = function(self, *args, **kwargs)
2084
+ if not properties == None:
2085
+ properties[name] = value
2086
+ return value
2087
+
2088
+ return _cached
2089
+
2090
+
2091
+ def private(function):
2092
+ @functools.wraps(function)
2093
+ def _private(self, *args, **kwargs):
2094
+ ensure = kwargs.get("ensure", True)
2095
+ request = kwargs.get("request", self.request)
2096
+ if ensure:
2097
+ ensure_login(self, request=request)
2098
+ sanitize(function, kwargs)
2099
+ return function(self, *args, **kwargs)
2100
+
2101
+ return _private
2102
+
2103
+
2104
+ def ensure(token=None, context=None):
2105
+ def decorator(function):
2106
+ @functools.wraps(function)
2107
+ def interceptor(self, *args, **kwargs):
2108
+ ensure = kwargs.get("ensure", True)
2109
+ request = kwargs.get("request", self.request)
2110
+ if ensure:
2111
+ ensure_login(self, token=token, context=context, request=request)
2112
+ sanitize(function, kwargs)
2113
+ return function(self, *args, **kwargs)
2114
+
2115
+ return interceptor
2116
+
2117
+ return decorator
2118
+
2119
+
2120
+ def delayed(function):
2121
+ @functools.wraps(function)
2122
+ def _delayed(self, *args, **kwargs):
2123
+ _args = [self] + list(args)
2124
+ return self.owner.delay(function, _args, kwargs)
2125
+
2126
+ return _delayed
2127
+
2128
+
2129
+ def route(url, method="GET", asynchronous=False, json=False, opts=None, priority=1):
2130
+ def decorator(function, *args, **kwargs):
2131
+ if is_detached(function):
2132
+ delay(function, *args, **kwargs)
2133
+ else:
2134
+ common.base().App.add_route(
2135
+ method,
2136
+ url,
2137
+ function,
2138
+ asynchronous=asynchronous,
2139
+ json=json,
2140
+ opts=opts,
2141
+ priority=priority,
2142
+ )
2143
+ return function
2144
+
2145
+ def delay(function, *args, **kwargs):
2146
+ global CREATION_COUNTER
2147
+ route = (url, method, asynchronous, json, opts, priority)
2148
+ if not hasattr(function, "_routes"):
2149
+ function._routes = []
2150
+ function._routes.append(route)
2151
+ function.creation_counter = CREATION_COUNTER
2152
+ CREATION_COUNTER += 1
2153
+
2154
+ return decorator
2155
+
2156
+
2157
+ def error_handler(code, scope=None, json=None, opts=None, priority=1):
2158
+ def decorator(function, *args, **kwargs):
2159
+ if is_detached(function):
2160
+ delay(function, *args, **kwargs)
2161
+ else:
2162
+ common.base().App.add_error(
2163
+ code, function, scope=scope, json=json, opts=opts, priority=priority
2164
+ )
2165
+ return function
2166
+
2167
+ def delay(function, *args, **kwargs):
2168
+ global CREATION_COUNTER
2169
+ error = (code, scope, json, opts, priority)
2170
+ if not hasattr(function, "_errors"):
2171
+ function._errors = []
2172
+ function._errors.append(error)
2173
+ function.creation_counter = CREATION_COUNTER
2174
+ CREATION_COUNTER += 1
2175
+ return function
2176
+
2177
+ return decorator
2178
+
2179
+
2180
+ def exception_handler(exception, scope=None, json=None, opts=None, priority=1):
2181
+ def decorator(function, *args, **kwargs):
2182
+ if is_detached(function):
2183
+ delay(function, *args, **kwargs)
2184
+ else:
2185
+ common.base().App.add_exception(
2186
+ exception,
2187
+ function,
2188
+ scope=scope,
2189
+ json=json,
2190
+ opts=opts,
2191
+ priority=priority,
2192
+ )
2193
+ return function
2194
+
2195
+ def delay(function, *args, **kwargs):
2196
+ global CREATION_COUNTER
2197
+ _exception = (exception, scope, json, opts, priority)
2198
+ if not hasattr(function, "_exceptions"):
2199
+ function._exceptions = []
2200
+ function._exceptions.append(_exception)
2201
+ function.creation_counter = CREATION_COUNTER
2202
+ CREATION_COUNTER += 1
2203
+ return function
2204
+
2205
+ return decorator
2206
+
2207
+
2208
+ def before_request(scope="all", opts=None, priority=1):
2209
+ def decorator(function, *args, **kwargs):
2210
+ if is_detached(function):
2211
+ delay(function, *args, **kwargs)
2212
+ else:
2213
+ common.base().App.add_custom(
2214
+ "before_request", function, opts=opts, priority=priority
2215
+ )
2216
+ return function
2217
+
2218
+ def delay(function, *args, **kwargs):
2219
+ global CREATION_COUNTER
2220
+ _custom = ("before_request", opts, priority)
2221
+ if not hasattr(function, "_customs"):
2222
+ function._customs = []
2223
+ function._customs.append(_custom)
2224
+ function.creation_counter = CREATION_COUNTER
2225
+ CREATION_COUNTER += 1
2226
+ return function
2227
+
2228
+ return decorator
2229
+
2230
+
2231
+ def after_request(scope="all", opts=None, priority=1):
2232
+ def decorator(function, *args, **kwargs):
2233
+ if is_detached(function):
2234
+ delay(function, *args, **kwargs)
2235
+ else:
2236
+ common.base().App.add_custom(
2237
+ "after_request", function, opts=opts, priority=priority
2238
+ )
2239
+ return function
2240
+
2241
+ def delay(function, *args, **kwargs):
2242
+ global CREATION_COUNTER
2243
+ _custom = ("after_request", opts, priority)
2244
+ if not hasattr(function, "_customs"):
2245
+ function._customs = []
2246
+ function._customs.append(_custom)
2247
+ function.creation_counter = CREATION_COUNTER
2248
+ CREATION_COUNTER += 1
2249
+ return function
2250
+
2251
+ return decorator
2252
+
2253
+
2254
+ def is_detached(function):
2255
+ """
2256
+ Verifies if the provided function value is considered to be
2257
+ a detached method from a class, this is valid for situations
2258
+ where the type of the value is a function and there's a reference
2259
+ to the parent class of definition.
2260
+
2261
+ This method is not completely safe as it relies on the fact that
2262
+ by convention the first argument of a "future" method is the "self"
2263
+ one, meaning that a "normal function" would be detected as a
2264
+ method if the first argument of it is named self.
2265
+
2266
+ :type function: Function
2267
+ :param function: The function value that is going to be evaluated
2268
+ for the presence of a detached method.
2269
+ :rtype: bool
2270
+ :return: If the provided function value refers a detached method
2271
+ of a certain class.
2272
+ """
2273
+
2274
+ # verifies if the provided value is a valid function type
2275
+ # an in case it's not it's considered to not be a detached
2276
+ is_function = isinstance(function, types.FunctionType)
2277
+ if not is_function:
2278
+ return False
2279
+
2280
+ # retrieves the function's specification (should include arguments)
2281
+ # and then verifies that they are valid and that at least one valid
2282
+ # argument exists for the specification (as required by methods)
2283
+ spec = legacy.getargspec(function)
2284
+ if not spec:
2285
+ return False
2286
+ if not spec.args:
2287
+ return False
2288
+
2289
+ # verifies that the name of the first argument of the function is the
2290
+ # the instance one, if that's the case this should be a detached method
2291
+ # that is currently being identified as a function
2292
+ return spec.args[0] == "self"
2293
+
2294
+
2295
+ def sanitize(function, kwargs):
2296
+ removal = []
2297
+ method_a = legacy.getargspec(function)[0]
2298
+ for name in kwargs:
2299
+ if name in method_a:
2300
+ continue
2301
+ removal.append(name)
2302
+ for name in removal:
2303
+ del kwargs[name]
2304
+
2305
+
2306
+ def verify(condition, message=None, code=None, exception=None, **kwargs):
2307
+ if condition:
2308
+ return
2309
+ exception = exception or exceptions.AssertionError
2310
+ kwargs = dict(kwargs)
2311
+ if not message == None:
2312
+ kwargs["message"] = message
2313
+ if not code == None:
2314
+ kwargs["code"] = code
2315
+ raise exception(**kwargs)
2316
+
2317
+
2318
+ def verify_equal(first, second, message=None, code=None, exception=None, **kwargs):
2319
+ message = message or "Expected %s got %s" % (repr(second), repr(first))
2320
+ return verify(
2321
+ first == second, message=message, code=code, exception=exception, **kwargs
2322
+ )
2323
+
2324
+
2325
+ def verify_not_equal(first, second, message=None, code=None, exception=None, **kwargs):
2326
+ message = message or "Expected %s not equal to %s" % (repr(first), repr(second))
2327
+ return verify(
2328
+ not first == second, message=message, code=code, exception=exception, **kwargs
2329
+ )
2330
+
2331
+
2332
+ def verify_type(
2333
+ value, types, null=True, message=None, code=None, exception=None, **kwargs
2334
+ ):
2335
+ message = message or "Expected %s to have type %s" % (repr(value), repr(types))
2336
+ return verify(
2337
+ (null and value == None) or isinstance(value, types),
2338
+ message=message,
2339
+ code=code,
2340
+ exception=exception,
2341
+ **kwargs
2342
+ )
2343
+
2344
+
2345
+ def verify_many(sequence, message=None, code=None, exception=None, **kwargs):
2346
+ for condition in sequence:
2347
+ verify(condition, message=message, code=code, exception=exception, **kwargs)
2348
+
2349
+
2350
+ def execute(args, command=None, path=None, shell=None, encoding=None):
2351
+ if shell == None:
2352
+ shell = os.name == "nt"
2353
+ if not encoding:
2354
+ encoding = sys.getfilesystemencoding()
2355
+ if command:
2356
+ args = command.split(" ")
2357
+ process = subprocess.Popen(
2358
+ args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=path
2359
+ )
2360
+ code = process.wait()
2361
+ stdout = process.stdout.read()
2362
+ stderr = process.stderr.read()
2363
+ stdout = stdout.decode(encoding)
2364
+ stderr = stderr.decode(encoding)
2365
+ return dict(stdout=stdout, stderr=stderr, code=code)
2366
+
2367
+
2368
+ @contextlib.contextmanager
2369
+ def ctx_locale(name="", force=False):
2370
+ saved = locale.setlocale(locale.LC_ALL)
2371
+ if saved == name and not force:
2372
+ yield saved
2373
+ return
2374
+ try:
2375
+ yield locale.setlocale(locale.LC_ALL, name)
2376
+ finally:
2377
+ locale.setlocale(locale.LC_ALL, saved)
2378
+
2379
+
2380
+ @contextlib.contextmanager
2381
+ def ctx_request(app=None):
2382
+ app = app or common.base().get_app()
2383
+ _request = app._request
2384
+ app._request = app._mock
2385
+ try:
2386
+ yield True
2387
+ finally:
2388
+ app._request = _request
2389
+
2390
+
2391
+ class FileTuple(tuple):
2392
+ """
2393
+ Tuple class (inherits from tuple) that represents
2394
+ the name, content type and (data) contents of a file
2395
+ in the context of the appier infra-structure.
2396
+
2397
+ This class shares many of the signature with the
2398
+ typical python file interface, allowing most of
2399
+ the operation to be performed (eg: read, seek,
2400
+ tell, etc.).
2401
+ """
2402
+
2403
+ def __init__(self, *args, **kwargs):
2404
+ tuple.__init__(*args, **kwargs)
2405
+ self._position = 0
2406
+
2407
+ @classmethod
2408
+ def from_data(cls, data, name=None, mime=None):
2409
+ file_tuple = cls((name, mime, data))
2410
+ return file_tuple
2411
+
2412
+ @classmethod
2413
+ def from_file(cls, file, name=None, mime=None):
2414
+ data = file.read()
2415
+ file_tuple = cls.from_data(data, name=name, mime=mime)
2416
+ return file_tuple
2417
+
2418
+ @classmethod
2419
+ def from_path(cls, path, name=None, mime=None, guess=True):
2420
+ mime = cls.guess(path) if mime == None and guess else mime
2421
+ file = open(path, "rb")
2422
+ try:
2423
+ file_tuple = cls.from_file(file, name=name, mime=mime)
2424
+ finally:
2425
+ file.close()
2426
+ return file_tuple
2427
+
2428
+ @classmethod
2429
+ def guess(cls, name):
2430
+ mime = mimetypes.guess_type(name, strict=False)[0]
2431
+ if mime:
2432
+ return mime
2433
+ return None
2434
+
2435
+ def read(self, count=None):
2436
+ data, data_l = self[2], len(self[2])
2437
+ if not count and self._position == 0:
2438
+ data, offset = data, data_l
2439
+ elif not count:
2440
+ data, offset = data[self._position :], data_l - self._position
2441
+ else:
2442
+ data, offset = data[self._position : self._position + count], count
2443
+ self._position += offset
2444
+ return data
2445
+
2446
+ def seek(self, offset, whence=os.SEEK_SET):
2447
+ if whence == os.SEEK_SET:
2448
+ self._position = offset
2449
+ if whence == os.SEEK_CUR:
2450
+ self._position += offset
2451
+ if whence == os.SEEK_END:
2452
+ self._position = len(self[2]) + offset
2453
+
2454
+ def tell(self):
2455
+ return self._position
2456
+
2457
+ def save(self, path, close=True):
2458
+ contents = self[2]
2459
+ if legacy.is_string(path):
2460
+ file = open(path, "wb")
2461
+ else:
2462
+ file = path
2463
+ try:
2464
+ file.write(contents)
2465
+ finally:
2466
+ if close:
2467
+ file.close()
2468
+
2469
+ def seekable(self):
2470
+ return True
2471
+
2472
+ @property
2473
+ def name(self):
2474
+ return self[0]
2475
+
2476
+ @property
2477
+ def mime(self):
2478
+ return self[1]
2479
+
2480
+ @property
2481
+ def data(self):
2482
+ return self[2]
2483
+
2484
+
2485
+ class BaseThread(threading.Thread):
2486
+ """
2487
+ The top level thread class that is meant to encapsulate
2488
+ a running base object and run it in a new context.
2489
+
2490
+ This base thread may be used to run a network loop allowing
2491
+ a main thread to continue with execution logic.
2492
+ """
2493
+
2494
+ def __init__(self, owner=None, daemon=False, *args, **kwargs):
2495
+ threading.Thread.__init__(self, *args, **kwargs)
2496
+ self.owner = owner
2497
+ self.daemon = daemon
2498
+
2499
+ def run(self):
2500
+ threading.Thread.run(self)
2501
+ if not self.owner:
2502
+ return
2503
+ self.owner.start()
2504
+ self.owner = None
2505
+
2506
+
2507
+ class JSONEncoder(json.JSONEncoder):
2508
+ def __init__(self, *args, **kwargs):
2509
+ self.permissive = kwargs.pop("permissive", True)
2510
+ json.JSONEncoder.__init__(self, *args, **kwargs)
2511
+
2512
+ def default(self, obj, **kwargs):
2513
+ if hasattr(obj, "json_v"):
2514
+ return obj.json_v()
2515
+ if self.permissive:
2516
+ return str(obj)
2517
+ return json.JSONEncoder.default(self, obj, **kwargs)