swift 2.32.0__py2.py3-none-any.whl → 2.34.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. swift/account/auditor.py +11 -0
  2. swift/account/reaper.py +11 -1
  3. swift/account/replicator.py +22 -0
  4. swift/account/server.py +13 -12
  5. swift-2.32.0.data/scripts/swift-account-audit → swift/cli/account_audit.py +6 -2
  6. swift-2.32.0.data/scripts/swift-config → swift/cli/config.py +1 -1
  7. swift-2.32.0.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +6 -2
  8. swift-2.32.0.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +12 -3
  9. swift-2.32.0.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +6 -2
  10. swift/cli/info.py +131 -3
  11. swift-2.32.0.data/scripts/swift-oldies → swift/cli/oldies.py +6 -3
  12. swift-2.32.0.data/scripts/swift-orphans → swift/cli/orphans.py +7 -2
  13. swift-2.32.0.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +9 -18
  14. swift-2.32.0.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
  15. swift/cli/relinker.py +1 -1
  16. swift/cli/reload.py +141 -0
  17. swift/cli/ringbuilder.py +24 -0
  18. swift/common/daemon.py +12 -2
  19. swift/common/db.py +14 -9
  20. swift/common/db_auditor.py +2 -2
  21. swift/common/db_replicator.py +6 -0
  22. swift/common/exceptions.py +12 -0
  23. swift/common/http_protocol.py +76 -3
  24. swift/common/manager.py +120 -5
  25. swift/common/memcached.py +24 -25
  26. swift/common/middleware/account_quotas.py +144 -43
  27. swift/common/middleware/backend_ratelimit.py +166 -24
  28. swift/common/middleware/catch_errors.py +1 -3
  29. swift/common/middleware/cname_lookup.py +3 -5
  30. swift/common/middleware/container_sync.py +6 -10
  31. swift/common/middleware/crypto/crypto_utils.py +4 -5
  32. swift/common/middleware/crypto/decrypter.py +4 -5
  33. swift/common/middleware/crypto/kms_keymaster.py +2 -1
  34. swift/common/middleware/proxy_logging.py +57 -43
  35. swift/common/middleware/ratelimit.py +6 -7
  36. swift/common/middleware/recon.py +6 -7
  37. swift/common/middleware/s3api/acl_handlers.py +10 -1
  38. swift/common/middleware/s3api/controllers/__init__.py +3 -0
  39. swift/common/middleware/s3api/controllers/acl.py +3 -2
  40. swift/common/middleware/s3api/controllers/logging.py +2 -2
  41. swift/common/middleware/s3api/controllers/multi_upload.py +31 -15
  42. swift/common/middleware/s3api/controllers/obj.py +20 -1
  43. swift/common/middleware/s3api/controllers/object_lock.py +44 -0
  44. swift/common/middleware/s3api/s3api.py +6 -0
  45. swift/common/middleware/s3api/s3request.py +190 -74
  46. swift/common/middleware/s3api/s3response.py +48 -8
  47. swift/common/middleware/s3api/s3token.py +2 -2
  48. swift/common/middleware/s3api/utils.py +2 -1
  49. swift/common/middleware/slo.py +508 -310
  50. swift/common/middleware/staticweb.py +45 -14
  51. swift/common/middleware/tempauth.py +6 -4
  52. swift/common/middleware/tempurl.py +134 -93
  53. swift/common/middleware/x_profile/exceptions.py +1 -4
  54. swift/common/middleware/x_profile/html_viewer.py +9 -10
  55. swift/common/middleware/x_profile/profile_model.py +1 -2
  56. swift/common/middleware/xprofile.py +1 -2
  57. swift/common/request_helpers.py +101 -8
  58. swift/common/statsd_client.py +207 -0
  59. swift/common/storage_policy.py +1 -1
  60. swift/common/swob.py +5 -2
  61. swift/common/utils/__init__.py +331 -1774
  62. swift/common/utils/base.py +138 -0
  63. swift/common/utils/config.py +443 -0
  64. swift/common/utils/logs.py +999 -0
  65. swift/common/utils/timestamp.py +23 -2
  66. swift/common/wsgi.py +19 -3
  67. swift/container/auditor.py +11 -0
  68. swift/container/backend.py +136 -31
  69. swift/container/reconciler.py +11 -2
  70. swift/container/replicator.py +64 -7
  71. swift/container/server.py +276 -146
  72. swift/container/sharder.py +86 -42
  73. swift/container/sync.py +11 -1
  74. swift/container/updater.py +12 -2
  75. swift/obj/auditor.py +20 -3
  76. swift/obj/diskfile.py +63 -25
  77. swift/obj/expirer.py +154 -47
  78. swift/obj/mem_diskfile.py +2 -1
  79. swift/obj/mem_server.py +1 -0
  80. swift/obj/reconstructor.py +28 -4
  81. swift/obj/replicator.py +63 -24
  82. swift/obj/server.py +76 -59
  83. swift/obj/updater.py +12 -2
  84. swift/obj/watchers/dark_data.py +72 -34
  85. swift/proxy/controllers/account.py +3 -2
  86. swift/proxy/controllers/base.py +254 -148
  87. swift/proxy/controllers/container.py +274 -289
  88. swift/proxy/controllers/obj.py +120 -166
  89. swift/proxy/server.py +17 -13
  90. {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/AUTHORS +14 -4
  91. {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/METADATA +9 -7
  92. {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/RECORD +97 -120
  93. {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/entry_points.txt +39 -0
  94. swift-2.34.0.dist-info/pbr.json +1 -0
  95. swift-2.32.0.data/scripts/swift-account-auditor +0 -23
  96. swift-2.32.0.data/scripts/swift-account-info +0 -52
  97. swift-2.32.0.data/scripts/swift-account-reaper +0 -23
  98. swift-2.32.0.data/scripts/swift-account-replicator +0 -34
  99. swift-2.32.0.data/scripts/swift-account-server +0 -23
  100. swift-2.32.0.data/scripts/swift-container-auditor +0 -23
  101. swift-2.32.0.data/scripts/swift-container-info +0 -56
  102. swift-2.32.0.data/scripts/swift-container-reconciler +0 -21
  103. swift-2.32.0.data/scripts/swift-container-replicator +0 -34
  104. swift-2.32.0.data/scripts/swift-container-server +0 -23
  105. swift-2.32.0.data/scripts/swift-container-sharder +0 -37
  106. swift-2.32.0.data/scripts/swift-container-sync +0 -23
  107. swift-2.32.0.data/scripts/swift-container-updater +0 -23
  108. swift-2.32.0.data/scripts/swift-dispersion-report +0 -24
  109. swift-2.32.0.data/scripts/swift-form-signature +0 -20
  110. swift-2.32.0.data/scripts/swift-init +0 -119
  111. swift-2.32.0.data/scripts/swift-object-auditor +0 -29
  112. swift-2.32.0.data/scripts/swift-object-expirer +0 -33
  113. swift-2.32.0.data/scripts/swift-object-info +0 -60
  114. swift-2.32.0.data/scripts/swift-object-reconstructor +0 -33
  115. swift-2.32.0.data/scripts/swift-object-relinker +0 -23
  116. swift-2.32.0.data/scripts/swift-object-replicator +0 -37
  117. swift-2.32.0.data/scripts/swift-object-server +0 -27
  118. swift-2.32.0.data/scripts/swift-object-updater +0 -23
  119. swift-2.32.0.data/scripts/swift-proxy-server +0 -23
  120. swift-2.32.0.data/scripts/swift-recon +0 -24
  121. swift-2.32.0.data/scripts/swift-ring-builder +0 -37
  122. swift-2.32.0.data/scripts/swift-ring-builder-analyzer +0 -22
  123. swift-2.32.0.data/scripts/swift-ring-composer +0 -22
  124. swift-2.32.0.dist-info/pbr.json +0 -1
  125. {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/LICENSE +0 -0
  126. {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/WHEEL +0 -0
  127. {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/top_level.txt +0 -0
@@ -24,9 +24,7 @@ import collections
24
24
  import errno
25
25
  import fcntl
26
26
  import grp
27
- import hashlib
28
27
  import json
29
- import operator
30
28
  import os
31
29
  import pwd
32
30
  import re
@@ -37,7 +35,7 @@ import time
37
35
  import uuid
38
36
  import functools
39
37
  import email.parser
40
- from random import random, shuffle
38
+ from random import shuffle
41
39
  from contextlib import contextmanager, closing
42
40
  import ctypes
43
41
  import ctypes.util
@@ -49,7 +47,6 @@ from tempfile import gettempdir, mkstemp, NamedTemporaryFile
49
47
  import glob
50
48
  import itertools
51
49
  import stat
52
- import datetime
53
50
 
54
51
  import eventlet
55
52
  import eventlet.debug
@@ -64,25 +61,16 @@ except ImportError:
64
61
  import pkg_resources
65
62
  from eventlet import GreenPool, sleep, Timeout
66
63
  from eventlet.event import Event
67
- from eventlet.green import socket, threading
64
+ from eventlet.green import socket
68
65
  import eventlet.hubs
69
66
  import eventlet.queue
70
- import codecs
71
- utf8_decoder = codecs.getdecoder('utf-8')
72
- utf8_encoder = codecs.getencoder('utf-8')
73
67
  import six
74
- if six.PY2:
75
- from eventlet.green import httplib as green_http_client
76
- else:
77
- from eventlet.green.http import client as green_http_client
78
- utf16_decoder = codecs.getdecoder('utf-16')
79
- utf16_encoder = codecs.getencoder('utf-16')
68
+
80
69
  from six.moves import cPickle as pickle
81
- from six.moves import configparser
82
70
  from six.moves.configparser import (ConfigParser, NoSectionError,
83
- NoOptionError, RawConfigParser)
84
- from six.moves import range, http_client
85
- from six.moves.urllib.parse import quote as _quote, unquote
71
+ NoOptionError)
72
+ from six.moves import range
73
+ from six.moves.urllib.parse import unquote
86
74
  from six.moves.urllib.parse import urlparse
87
75
  from six.moves import UserList
88
76
 
@@ -93,6 +81,53 @@ from swift.common.linkat import linkat
93
81
 
94
82
  # For backwards compatability with 3rd party middlewares
95
83
  from swift.common.registry import register_swift_info, get_swift_info # noqa
84
+
85
+ from .base import ( # noqa
86
+ md5, get_valid_utf8_str, quote, split_path)
87
+ from swift.common.utils.logs import ( # noqa
88
+ SysLogHandler, # t.u.helpers.setup_servers monkey patch is sketch
89
+ logging_monkey_patch,
90
+ get_logger,
91
+ PrefixLoggerAdapter,
92
+ LogLevelFilter,
93
+ NullLogger,
94
+ capture_stdio,
95
+ SwiftLogFormatter,
96
+ SwiftLoggerAdapter,
97
+ LogAdapter,
98
+ LoggerFileObject,
99
+ PipeMutex,
100
+ NoopMutex,
101
+ ThreadSafeSysLogHandler,
102
+ StrAnonymizer,
103
+ get_log_line,
104
+ StrFormatTime,
105
+ LogStringFormatter,
106
+ get_policy_index,
107
+ LOG_LINE_DEFAULT_FORMAT,
108
+ NOTICE,
109
+ )
110
+ from swift.common.utils.config import ( # noqa
111
+ TRUE_VALUES,
112
+ NicerInterpolation,
113
+ config_true_value,
114
+ append_underscore,
115
+ non_negative_float,
116
+ non_negative_int,
117
+ config_positive_int_value,
118
+ config_float_value,
119
+ config_auto_int_value,
120
+ config_percent_value,
121
+ config_request_node_count_value,
122
+ config_fallocate_value,
123
+ config_read_prefixed_options,
124
+ config_read_reseller_options,
125
+ parse_prefixed_conf,
126
+ affinity_locality_predicate,
127
+ affinity_key_function,
128
+ readconf,
129
+ read_conf_dir,
130
+ )
96
131
  from swift.common.utils.libc import ( # noqa
97
132
  F_SETPIPE_SZ,
98
133
  load_libc_function,
@@ -114,6 +149,7 @@ from swift.common.utils.timestamp import ( # noqa
114
149
  EPOCH,
115
150
  last_modified_date_to_timestamp,
116
151
  normalize_delete_at_timestamp,
152
+ UTC,
117
153
  )
118
154
  from swift.common.utils.ipaddrs import ( # noqa
119
155
  is_valid_ip,
@@ -123,11 +159,9 @@ from swift.common.utils.ipaddrs import ( # noqa
123
159
  parse_socket_string,
124
160
  whataremyips,
125
161
  )
126
- from logging.handlers import SysLogHandler
162
+ from swift.common.statsd_client import StatsdClient # noqa
127
163
  import logging
128
164
 
129
- NOTICE = 25
130
-
131
165
  # These are lazily pulled from libc elsewhere
132
166
  _sys_fallocate = None
133
167
 
@@ -163,13 +197,12 @@ RESERVED_STR = u'\x00'
163
197
  RESERVED = '\x00'
164
198
 
165
199
 
166
- LOG_LINE_DEFAULT_FORMAT = '{remote_addr} - - [{time.d}/{time.b}/{time.Y}' \
167
- ':{time.H}:{time.M}:{time.S} +0000] ' \
168
- '"{method} {path}" {status} {content_length} ' \
169
- '"{referer}" "{txn_id}" "{user_agent}" ' \
170
- '{trans_time:.4f} "{additional_info}" {pid} ' \
171
- '{policy_index}'
172
200
  DEFAULT_LOCK_TIMEOUT = 10
201
+ # this is coupled with object-server.conf's network_chunk_size; if someone is
202
+ # running that unreasonably small they may find this number inefficient, but in
203
+ # the more likely case they've increased the value to optimize high througput
204
+ # transfers this will still cut off the transfer after the first chunk.
205
+ DEFAULT_DRAIN_LIMIT = 65536
173
206
 
174
207
 
175
208
  class InvalidHashPathConfigError(ValueError):
@@ -274,199 +307,6 @@ def backward(f, blocksize=4096):
274
307
  yield last_row
275
308
 
276
309
 
277
- # Used when reading config values
278
- TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
279
-
280
-
281
- def non_negative_float(value):
282
- """
283
- Check that the value casts to a float and is non-negative.
284
-
285
- :param value: value to check
286
- :raises ValueError: if the value cannot be cast to a float or is negative.
287
- :return: a float
288
- """
289
- try:
290
- value = float(value)
291
- if value < 0:
292
- raise ValueError
293
- except (TypeError, ValueError):
294
- raise ValueError('Value must be a non-negative float number, not "%s".'
295
- % value)
296
- return value
297
-
298
-
299
- def non_negative_int(value):
300
- """
301
- Check that the value casts to an int and is a whole number.
302
-
303
- :param value: value to check
304
- :raises ValueError: if the value cannot be cast to an int or does not
305
- represent a whole number.
306
- :return: an int
307
- """
308
- int_value = int(value)
309
- if int_value != non_negative_float(value):
310
- raise ValueError
311
- return int_value
312
-
313
-
314
- def config_true_value(value):
315
- """
316
- Returns True if the value is either True or a string in TRUE_VALUES.
317
- Returns False otherwise.
318
- """
319
- return value is True or \
320
- (isinstance(value, six.string_types) and value.lower() in TRUE_VALUES)
321
-
322
-
323
- def config_positive_int_value(value):
324
- """
325
- Returns positive int value if it can be cast by int() and it's an
326
- integer > 0. (not including zero) Raises ValueError otherwise.
327
- """
328
- try:
329
- result = int(value)
330
- if result < 1:
331
- raise ValueError()
332
- except (TypeError, ValueError):
333
- raise ValueError(
334
- 'Config option must be an positive int number, not "%s".' % value)
335
- return result
336
-
337
-
338
- def config_float_value(value, minimum=None, maximum=None):
339
- try:
340
- val = float(value)
341
- if minimum is not None and val < minimum:
342
- raise ValueError()
343
- if maximum is not None and val > maximum:
344
- raise ValueError()
345
- return val
346
- except (TypeError, ValueError):
347
- min_ = ', greater than %s' % minimum if minimum is not None else ''
348
- max_ = ', less than %s' % maximum if maximum is not None else ''
349
- raise ValueError('Config option must be a number%s%s, not "%s".' %
350
- (min_, max_, value))
351
-
352
-
353
- def config_auto_int_value(value, default):
354
- """
355
- Returns default if value is None or 'auto'.
356
- Returns value as an int or raises ValueError otherwise.
357
- """
358
- if value is None or \
359
- (isinstance(value, six.string_types) and value.lower() == 'auto'):
360
- return default
361
- try:
362
- value = int(value)
363
- except (TypeError, ValueError):
364
- raise ValueError('Config option must be an integer or the '
365
- 'string "auto", not "%s".' % value)
366
- return value
367
-
368
-
369
- def config_percent_value(value):
370
- try:
371
- return config_float_value(value, 0, 100) / 100.0
372
- except ValueError as err:
373
- raise ValueError("%s: %s" % (str(err), value))
374
-
375
-
376
- def config_request_node_count_value(value):
377
- try:
378
- value_parts = value.lower().split()
379
- rnc_value = int(value_parts[0])
380
- except (ValueError, AttributeError):
381
- pass
382
- else:
383
- if len(value_parts) == 1:
384
- return lambda replicas: rnc_value
385
- elif (len(value_parts) == 3 and
386
- value_parts[1] == '*' and
387
- value_parts[2] == 'replicas'):
388
- return lambda replicas: rnc_value * replicas
389
- raise ValueError(
390
- 'Invalid request_node_count value: %r' % value)
391
-
392
-
393
- def append_underscore(prefix):
394
- if prefix and not prefix.endswith('_'):
395
- prefix += '_'
396
- return prefix
397
-
398
-
399
- def config_read_reseller_options(conf, defaults):
400
- """
401
- Read reseller_prefix option and associated options from configuration
402
-
403
- Reads the reseller_prefix option, then reads options that may be
404
- associated with a specific reseller prefix. Reads options such that an
405
- option without a prefix applies to all reseller prefixes unless an option
406
- has an explicit prefix.
407
-
408
- :param conf: the configuration
409
- :param defaults: a dict of default values. The key is the option
410
- name. The value is either an array of strings or a string
411
- :return: tuple of an array of reseller prefixes and a dict of option values
412
- """
413
- reseller_prefix_opt = conf.get('reseller_prefix', 'AUTH').split(',')
414
- reseller_prefixes = []
415
- for prefix in [pre.strip() for pre in reseller_prefix_opt if pre.strip()]:
416
- if prefix == "''":
417
- prefix = ''
418
- prefix = append_underscore(prefix)
419
- if prefix not in reseller_prefixes:
420
- reseller_prefixes.append(prefix)
421
- if len(reseller_prefixes) == 0:
422
- reseller_prefixes.append('')
423
-
424
- # Get prefix-using config options
425
- associated_options = {}
426
- for prefix in reseller_prefixes:
427
- associated_options[prefix] = dict(defaults)
428
- associated_options[prefix].update(
429
- config_read_prefixed_options(conf, '', defaults))
430
- prefix_name = prefix if prefix != '' else "''"
431
- associated_options[prefix].update(
432
- config_read_prefixed_options(conf, prefix_name, defaults))
433
- return reseller_prefixes, associated_options
434
-
435
-
436
- def config_read_prefixed_options(conf, prefix_name, defaults):
437
- """
438
- Read prefixed options from configuration
439
-
440
- :param conf: the configuration
441
- :param prefix_name: the prefix (including, if needed, an underscore)
442
- :param defaults: a dict of default values. The dict supplies the
443
- option name and type (string or comma separated string)
444
- :return: a dict containing the options
445
- """
446
- params = {}
447
- for option_name in defaults.keys():
448
- value = conf.get('%s%s' % (prefix_name, option_name))
449
- if value:
450
- if isinstance(defaults.get(option_name), list):
451
- params[option_name] = []
452
- for role in value.lower().split(','):
453
- params[option_name].append(role.strip())
454
- else:
455
- params[option_name] = value.strip()
456
- return params
457
-
458
-
459
- def logging_monkey_patch():
460
- # explicitly patch the logging lock
461
- logging._lock = logging.threading.RLock()
462
- # setup notice level logging
463
- logging.addLevelName(NOTICE, 'NOTICE')
464
- SysLogHandler.priority_map['NOTICE'] = 'notice'
465
- # Trying to log threads while monkey-patched can lead to deadlocks; see
466
- # https://bugs.launchpad.net/swift/+bug/1895739
467
- logging.logThreads = 0
468
-
469
-
470
310
  def eventlet_monkey_patch():
471
311
  """
472
312
  Install the appropriate Eventlet monkey patches.
@@ -499,187 +339,6 @@ def generate_trans_id(trans_id_suffix):
499
339
  uuid.uuid4().hex[:21], int(time.time()), quote(trans_id_suffix))
500
340
 
501
341
 
502
- def get_policy_index(req_headers, res_headers):
503
- """
504
- Returns the appropriate index of the storage policy for the request from
505
- a proxy server
506
-
507
- :param req_headers: dict of the request headers.
508
- :param res_headers: dict of the response headers.
509
-
510
- :returns: string index of storage policy, or None
511
- """
512
- header = 'X-Backend-Storage-Policy-Index'
513
- policy_index = res_headers.get(header, req_headers.get(header))
514
- if isinstance(policy_index, six.binary_type) and not six.PY2:
515
- policy_index = policy_index.decode('ascii')
516
- return str(policy_index) if policy_index is not None else None
517
-
518
-
519
- class _UTC(datetime.tzinfo):
520
- """
521
- A tzinfo class for datetime objects that returns a 0 timedelta (UTC time)
522
- """
523
-
524
- def dst(self, dt):
525
- return datetime.timedelta(0)
526
- utcoffset = dst
527
-
528
- def tzname(self, dt):
529
- return 'UTC'
530
-
531
-
532
- UTC = _UTC()
533
-
534
-
535
- class LogStringFormatter(string.Formatter):
536
- def __init__(self, default='', quote=False):
537
- super(LogStringFormatter, self).__init__()
538
- self.default = default
539
- self.quote = quote
540
-
541
- def format_field(self, value, spec):
542
- if not value:
543
- return self.default
544
- else:
545
- log = super(LogStringFormatter, self).format_field(value, spec)
546
- if self.quote:
547
- return quote(log, ':/{}')
548
- else:
549
- return log
550
-
551
-
552
- class StrAnonymizer(str):
553
- """
554
- Class that permits to get a string anonymized or simply quoted.
555
- """
556
-
557
- def __new__(cls, data, method, salt):
558
- method = method.lower()
559
- if method not in (hashlib.algorithms if six.PY2 else
560
- hashlib.algorithms_guaranteed):
561
- raise ValueError('Unsupported hashing method: %r' % method)
562
- s = str.__new__(cls, data or '')
563
- s.method = method
564
- s.salt = salt
565
- return s
566
-
567
- @property
568
- def anonymized(self):
569
- if not self:
570
- return self
571
- else:
572
- if self.method == 'md5':
573
- h = md5(usedforsecurity=False)
574
- else:
575
- h = getattr(hashlib, self.method)()
576
- if self.salt:
577
- h.update(six.b(self.salt))
578
- h.update(six.b(self))
579
- return '{%s%s}%s' % ('S' if self.salt else '', self.method.upper(),
580
- h.hexdigest())
581
-
582
-
583
- class StrFormatTime(object):
584
- """
585
- Class that permits to get formats or parts of a time.
586
- """
587
-
588
- def __init__(self, ts):
589
- self.time = ts
590
- self.time_struct = time.gmtime(ts)
591
-
592
- def __str__(self):
593
- return "%.9f" % self.time
594
-
595
- def __getattr__(self, attr):
596
- if attr not in ['a', 'A', 'b', 'B', 'c', 'd', 'H',
597
- 'I', 'j', 'm', 'M', 'p', 'S', 'U',
598
- 'w', 'W', 'x', 'X', 'y', 'Y', 'Z']:
599
- raise ValueError(("The attribute %s is not a correct directive "
600
- "for time.strftime formater.") % attr)
601
- return datetime.datetime(*self.time_struct[:-2],
602
- tzinfo=UTC).strftime('%' + attr)
603
-
604
- @property
605
- def asctime(self):
606
- return time.asctime(self.time_struct)
607
-
608
- @property
609
- def datetime(self):
610
- return time.strftime('%d/%b/%Y/%H/%M/%S', self.time_struct)
611
-
612
- @property
613
- def iso8601(self):
614
- return time.strftime('%Y-%m-%dT%H:%M:%S', self.time_struct)
615
-
616
- @property
617
- def ms(self):
618
- return self.__str__().split('.')[1][:3]
619
-
620
- @property
621
- def us(self):
622
- return self.__str__().split('.')[1][:6]
623
-
624
- @property
625
- def ns(self):
626
- return self.__str__().split('.')[1]
627
-
628
- @property
629
- def s(self):
630
- return self.__str__().split('.')[0]
631
-
632
-
633
- def get_log_line(req, res, trans_time, additional_info, fmt,
634
- anonymization_method, anonymization_salt):
635
- """
636
- Make a line for logging that matches the documented log line format
637
- for backend servers.
638
-
639
- :param req: the request.
640
- :param res: the response.
641
- :param trans_time: the time the request took to complete, a float.
642
- :param additional_info: a string to log at the end of the line
643
-
644
- :returns: a properly formatted line for logging.
645
- """
646
-
647
- policy_index = get_policy_index(req.headers, res.headers)
648
- if req.path.startswith('/'):
649
- disk, partition, account, container, obj = split_path(req.path, 0, 5,
650
- True)
651
- else:
652
- disk, partition, account, container, obj = (None, ) * 5
653
- replacements = {
654
- 'remote_addr': StrAnonymizer(req.remote_addr, anonymization_method,
655
- anonymization_salt),
656
- 'time': StrFormatTime(time.time()),
657
- 'method': req.method,
658
- 'path': StrAnonymizer(req.path, anonymization_method,
659
- anonymization_salt),
660
- 'disk': disk,
661
- 'partition': partition,
662
- 'account': StrAnonymizer(account, anonymization_method,
663
- anonymization_salt),
664
- 'container': StrAnonymizer(container, anonymization_method,
665
- anonymization_salt),
666
- 'object': StrAnonymizer(obj, anonymization_method,
667
- anonymization_salt),
668
- 'status': res.status.split()[0],
669
- 'content_length': res.content_length,
670
- 'referer': StrAnonymizer(req.referer, anonymization_method,
671
- anonymization_salt),
672
- 'txn_id': req.headers.get('x-trans-id'),
673
- 'user_agent': StrAnonymizer(req.user_agent, anonymization_method,
674
- anonymization_salt),
675
- 'trans_time': trans_time,
676
- 'additional_info': additional_info,
677
- 'pid': os.getpid(),
678
- 'policy_index': policy_index,
679
- }
680
- return LogStringFormatter(default='-').format(fmt, **replacements)
681
-
682
-
683
342
  def get_trans_id_time(trans_id):
684
343
  if len(trans_id) >= 34 and \
685
344
  trans_id.startswith('tx') and trans_id[23] == '-':
@@ -690,25 +349,6 @@ def get_trans_id_time(trans_id):
690
349
  return None
691
350
 
692
351
 
693
- def config_fallocate_value(reserve_value):
694
- """
695
- Returns fallocate reserve_value as an int or float.
696
- Returns is_percent as a boolean.
697
- Returns a ValueError on invalid fallocate value.
698
- """
699
- try:
700
- if str(reserve_value[-1:]) == '%':
701
- reserve_value = float(reserve_value[:-1])
702
- is_percent = True
703
- else:
704
- reserve_value = int(reserve_value)
705
- is_percent = False
706
- except ValueError:
707
- raise ValueError('Error: %s is an invalid value for fallocate'
708
- '_reserve.' % reserve_value)
709
- return reserve_value, is_percent
710
-
711
-
712
352
  class FileLikeIter(object):
713
353
 
714
354
  def __init__(self, iterable):
@@ -830,13 +470,14 @@ class FileLikeIter(object):
830
470
  self.closed = True
831
471
 
832
472
 
833
- def fs_has_free_space(fs_path, space_needed, is_percent):
473
+ def fs_has_free_space(fs_path_or_fd, space_needed, is_percent):
834
474
  """
835
475
  Check to see whether or not a filesystem has the given amount of space
836
476
  free. Unlike fallocate(), this does not reserve any space.
837
477
 
838
- :param fs_path: path to a file or directory on the filesystem; typically
839
- the path to the filesystem's mount point
478
+ :param fs_path_or_fd: path to a file or directory on the filesystem, or an
479
+ open file descriptor; if a directory, typically the path to the
480
+ filesystem's mount point
840
481
 
841
482
  :param space_needed: minimum bytes or percentage of free space
842
483
 
@@ -849,7 +490,10 @@ def fs_has_free_space(fs_path, space_needed, is_percent):
849
490
 
850
491
  :raises OSError: if fs_path does not exist
851
492
  """
852
- st = os.statvfs(fs_path)
493
+ if isinstance(fs_path_or_fd, int):
494
+ st = os.fstatvfs(fs_path_or_fd)
495
+ else:
496
+ st = os.statvfs(fs_path_or_fd)
853
497
  free_bytes = st.f_frsize * st.f_bavail
854
498
  if is_percent:
855
499
  size_bytes = st.f_frsize * st.f_blocks
@@ -1136,53 +780,6 @@ def link_fd_to_path(fd, target_path, dirs_created=0, retries=2, fsync=True):
1136
780
  dirpath = os.path.dirname(dirpath)
1137
781
 
1138
782
 
1139
- def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
1140
- """
1141
- Validate and split the given HTTP request path.
1142
-
1143
- **Examples**::
1144
-
1145
- ['a'] = split_path('/a')
1146
- ['a', None] = split_path('/a', 1, 2)
1147
- ['a', 'c'] = split_path('/a/c', 1, 2)
1148
- ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
1149
-
1150
- :param path: HTTP Request path to be split
1151
- :param minsegs: Minimum number of segments to be extracted
1152
- :param maxsegs: Maximum number of segments to be extracted
1153
- :param rest_with_last: If True, trailing data will be returned as part
1154
- of last segment. If False, and there is
1155
- trailing data, raises ValueError.
1156
- :returns: list of segments with a length of maxsegs (non-existent
1157
- segments will return as None)
1158
- :raises ValueError: if given an invalid path
1159
- """
1160
- if not maxsegs:
1161
- maxsegs = minsegs
1162
- if minsegs > maxsegs:
1163
- raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
1164
- if rest_with_last:
1165
- segs = path.split('/', maxsegs)
1166
- minsegs += 1
1167
- maxsegs += 1
1168
- count = len(segs)
1169
- if (segs[0] or count < minsegs or count > maxsegs or
1170
- '' in segs[1:minsegs]):
1171
- raise ValueError('Invalid path: %s' % quote(path))
1172
- else:
1173
- minsegs += 1
1174
- maxsegs += 1
1175
- segs = path.split('/', maxsegs)
1176
- count = len(segs)
1177
- if (segs[0] or count < minsegs or count > maxsegs + 1 or
1178
- '' in segs[1:minsegs] or
1179
- (count == maxsegs + 1 and segs[maxsegs])):
1180
- raise ValueError('Invalid path: %s' % quote(path))
1181
- segs = segs[1:maxsegs]
1182
- segs.extend([None] * (maxsegs - 1 - len(segs)))
1183
- return segs
1184
-
1185
-
1186
783
  def validate_device_partition(device, partition):
1187
784
  """
1188
785
  Validate that a device and a partition are valid and won't lead to
@@ -1257,260 +854,6 @@ class GreenthreadSafeIterator(object):
1257
854
  __next__ = next
1258
855
 
1259
856
 
1260
- class NullLogger(object):
1261
- """A no-op logger for eventlet wsgi."""
1262
-
1263
- def write(self, *args):
1264
- # "Logs" the args to nowhere
1265
- pass
1266
-
1267
- def exception(self, *args):
1268
- pass
1269
-
1270
- def critical(self, *args):
1271
- pass
1272
-
1273
- def error(self, *args):
1274
- pass
1275
-
1276
- def warning(self, *args):
1277
- pass
1278
-
1279
- def info(self, *args):
1280
- pass
1281
-
1282
- def debug(self, *args):
1283
- pass
1284
-
1285
- def log(self, *args):
1286
- pass
1287
-
1288
-
1289
- class LoggerFileObject(object):
1290
-
1291
- # Note: this is greenthread-local storage
1292
- _cls_thread_local = threading.local()
1293
-
1294
- def __init__(self, logger, log_type='STDOUT'):
1295
- self.logger = logger
1296
- self.log_type = log_type
1297
-
1298
- def write(self, value):
1299
- # We can get into a nasty situation when logs are going to syslog
1300
- # and syslog dies.
1301
- #
1302
- # It's something like this:
1303
- #
1304
- # (A) someone logs something
1305
- #
1306
- # (B) there's an exception in sending to /dev/log since syslog is
1307
- # not working
1308
- #
1309
- # (C) logging takes that exception and writes it to stderr (see
1310
- # logging.Handler.handleError)
1311
- #
1312
- # (D) stderr was replaced with a LoggerFileObject at process start,
1313
- # so the LoggerFileObject takes the provided string and tells
1314
- # its logger to log it (to syslog, naturally).
1315
- #
1316
- # Then, steps B through D repeat until we run out of stack.
1317
- if getattr(self._cls_thread_local, 'already_called_write', False):
1318
- return
1319
-
1320
- self._cls_thread_local.already_called_write = True
1321
- try:
1322
- value = value.strip()
1323
- if value:
1324
- if 'Connection reset by peer' in value:
1325
- self.logger.error(
1326
- '%s: Connection reset by peer', self.log_type)
1327
- else:
1328
- self.logger.error('%(type)s: %(value)s',
1329
- {'type': self.log_type, 'value': value})
1330
- finally:
1331
- self._cls_thread_local.already_called_write = False
1332
-
1333
- def writelines(self, values):
1334
- if getattr(self._cls_thread_local, 'already_called_writelines', False):
1335
- return
1336
-
1337
- self._cls_thread_local.already_called_writelines = True
1338
- try:
1339
- self.logger.error('%(type)s: %(value)s',
1340
- {'type': self.log_type,
1341
- 'value': '#012'.join(values)})
1342
- finally:
1343
- self._cls_thread_local.already_called_writelines = False
1344
-
1345
- def close(self):
1346
- pass
1347
-
1348
- def flush(self):
1349
- pass
1350
-
1351
- def __iter__(self):
1352
- return self
1353
-
1354
- def next(self):
1355
- raise IOError(errno.EBADF, 'Bad file descriptor')
1356
- __next__ = next
1357
-
1358
- def read(self, size=-1):
1359
- raise IOError(errno.EBADF, 'Bad file descriptor')
1360
-
1361
- def readline(self, size=-1):
1362
- raise IOError(errno.EBADF, 'Bad file descriptor')
1363
-
1364
- def tell(self):
1365
- return 0
1366
-
1367
- def xreadlines(self):
1368
- return self
1369
-
1370
-
1371
- class StatsdClient(object):
1372
- def __init__(self, host, port, base_prefix='', tail_prefix='',
1373
- default_sample_rate=1, sample_rate_factor=1, logger=None):
1374
- self._host = host
1375
- self._port = port
1376
- self._base_prefix = base_prefix
1377
- self._set_prefix(tail_prefix)
1378
- self._default_sample_rate = default_sample_rate
1379
- self._sample_rate_factor = sample_rate_factor
1380
- self.random = random
1381
- self.logger = logger
1382
-
1383
- # Determine if host is IPv4 or IPv6
1384
- addr_info, self._sock_family = self._determine_sock_family(host, port)
1385
-
1386
- # NOTE: we use the original host value, not the DNS-resolved one
1387
- # because if host is a hostname, we don't want to cache the DNS
1388
- # resolution for the entire lifetime of this process. Let standard
1389
- # name resolution caching take effect. This should help operators use
1390
- # DNS trickery if they want.
1391
- if addr_info is not None:
1392
- # addr_info is a list of 5-tuples with the following structure:
1393
- # (family, socktype, proto, canonname, sockaddr)
1394
- # where sockaddr is the only thing of interest to us, and we only
1395
- # use the first result. We want to use the originally supplied
1396
- # host (see note above) and the remainder of the variable-length
1397
- # sockaddr: IPv4 has (address, port) while IPv6 has (address,
1398
- # port, flow info, scope id).
1399
- sockaddr = addr_info[0][-1]
1400
- self._target = (host,) + (sockaddr[1:])
1401
- else:
1402
- self._target = (host, port)
1403
-
1404
- def _determine_sock_family(self, host, port):
1405
- addr_info = sock_family = None
1406
- try:
1407
- addr_info = socket.getaddrinfo(host, port, socket.AF_INET)
1408
- sock_family = socket.AF_INET
1409
- except socket.gaierror:
1410
- try:
1411
- addr_info = socket.getaddrinfo(host, port, socket.AF_INET6)
1412
- sock_family = socket.AF_INET6
1413
- except socket.gaierror:
1414
- # Don't keep the server from starting from what could be a
1415
- # transient DNS failure. Any hostname will get re-resolved as
1416
- # necessary in the .sendto() calls.
1417
- # However, we don't know if we're IPv4 or IPv6 in this case, so
1418
- # we assume legacy IPv4.
1419
- sock_family = socket.AF_INET
1420
- return addr_info, sock_family
1421
-
1422
- def _set_prefix(self, tail_prefix):
1423
- """
1424
- Modifies the prefix that is added to metric names. The resulting prefix
1425
- is the concatenation of the component parts `base_prefix` and
1426
- `tail_prefix`. Only truthy components are included. Each included
1427
- component is followed by a period, e.g.::
1428
-
1429
- <base_prefix>.<tail_prefix>.
1430
- <tail_prefix>.
1431
- <base_prefix>.
1432
- <the empty string>
1433
-
1434
- Note: this method is expected to be called from the constructor only,
1435
- but exists to provide backwards compatible functionality for the
1436
- deprecated set_prefix() method.
1437
-
1438
- :param tail_prefix: The new value of tail_prefix
1439
- """
1440
- if tail_prefix and self._base_prefix:
1441
- self._prefix = '.'.join([self._base_prefix, tail_prefix, ''])
1442
- elif tail_prefix:
1443
- self._prefix = tail_prefix + '.'
1444
- elif self._base_prefix:
1445
- self._prefix = self._base_prefix + '.'
1446
- else:
1447
- self._prefix = ''
1448
-
1449
- def set_prefix(self, tail_prefix):
1450
- """
1451
- This method is deprecated; use the ``tail_prefix`` argument of the
1452
- constructor when instantiating the class instead.
1453
- """
1454
- warnings.warn(
1455
- 'set_prefix() is deprecated; use the ``tail_prefix`` argument of '
1456
- 'the constructor when instantiating the class instead.',
1457
- DeprecationWarning, stacklevel=2
1458
- )
1459
- self._set_prefix(tail_prefix)
1460
-
1461
- def _send(self, m_name, m_value, m_type, sample_rate):
1462
- if sample_rate is None:
1463
- sample_rate = self._default_sample_rate
1464
- sample_rate = sample_rate * self._sample_rate_factor
1465
- parts = ['%s%s:%s' % (self._prefix, m_name, m_value), m_type]
1466
- if sample_rate < 1:
1467
- if self.random() < sample_rate:
1468
- parts.append('@%s' % (sample_rate,))
1469
- else:
1470
- return
1471
- if six.PY3:
1472
- parts = [part.encode('utf-8') for part in parts]
1473
- # Ideally, we'd cache a sending socket in self, but that
1474
- # results in a socket getting shared by multiple green threads.
1475
- with closing(self._open_socket()) as sock:
1476
- try:
1477
- return sock.sendto(b'|'.join(parts), self._target)
1478
- except IOError as err:
1479
- if self.logger:
1480
- self.logger.warning(
1481
- 'Error sending UDP message to %(target)r: %(err)s',
1482
- {'target': self._target, 'err': err})
1483
-
1484
- def _open_socket(self):
1485
- return socket.socket(self._sock_family, socket.SOCK_DGRAM)
1486
-
1487
- def update_stats(self, m_name, m_value, sample_rate=None):
1488
- return self._send(m_name, m_value, 'c', sample_rate)
1489
-
1490
- def increment(self, metric, sample_rate=None):
1491
- return self.update_stats(metric, 1, sample_rate)
1492
-
1493
- def decrement(self, metric, sample_rate=None):
1494
- return self.update_stats(metric, -1, sample_rate)
1495
-
1496
- def _timing(self, metric, timing_ms, sample_rate):
1497
- # This method was added to disagregate timing metrics when testing
1498
- return self._send(metric, timing_ms, 'ms', sample_rate)
1499
-
1500
- def timing(self, metric, timing_ms, sample_rate=None):
1501
- return self._timing(metric, timing_ms, sample_rate)
1502
-
1503
- def timing_since(self, metric, orig_time, sample_rate=None):
1504
- return self._timing(metric, (time.time() - orig_time) * 1000,
1505
- sample_rate)
1506
-
1507
- def transfer_rate(self, metric, elapsed_time, byte_xfer, sample_rate=None):
1508
- if byte_xfer:
1509
- return self.timing(metric,
1510
- elapsed_time * 1000 / byte_xfer * 1000,
1511
- sample_rate)
1512
-
1513
-
1514
857
  def timing_stats(**dec_kwargs):
1515
858
  """
1516
859
  Returns a decorator that logs timing events or errors for public methods in
@@ -1537,517 +880,30 @@ def timing_stats(**dec_kwargs):
1537
880
  else:
1538
881
  ctrl.logger.timing_since(method + '.errors.timing',
1539
882
  start_time, **dec_kwargs)
1540
- return resp
1541
-
1542
- return _timing_stats
1543
- return decorating_func
1544
-
1545
-
1546
- def memcached_timing_stats(**dec_kwargs):
1547
- """
1548
- Returns a decorator that logs timing events or errors for public methods in
1549
- MemcacheRing class, such as memcached set, get and etc.
1550
- """
1551
- def decorating_func(func):
1552
- method = func.__name__
1553
-
1554
- @functools.wraps(func)
1555
- def _timing_stats(cache, *args, **kwargs):
1556
- start_time = time.time()
1557
- result = func(cache, *args, **kwargs)
1558
- cache.logger.timing_since(
1559
- 'memcached.' + method + '.timing', start_time, **dec_kwargs)
1560
- return result
1561
-
1562
- return _timing_stats
1563
- return decorating_func
1564
-
1565
-
1566
- class SwiftLoggerAdapter(logging.LoggerAdapter):
1567
- """
1568
- A logging.LoggerAdapter subclass that also passes through StatsD method
1569
- calls.
1570
-
1571
- Like logging.LoggerAdapter, you have to subclass this and override the
1572
- process() method to accomplish anything useful.
1573
- """
1574
-
1575
- @property
1576
- def name(self):
1577
- # py3 does this for us already; add it for py2
1578
- return self.logger.name
1579
-
1580
- def get_metric_name(self, metric):
1581
- # subclasses may override this method to annotate the metric name
1582
- return metric
1583
-
1584
- def update_stats(self, metric, *a, **kw):
1585
- return self.logger.update_stats(self.get_metric_name(metric), *a, **kw)
1586
-
1587
- def increment(self, metric, *a, **kw):
1588
- return self.logger.increment(self.get_metric_name(metric), *a, **kw)
1589
-
1590
- def decrement(self, metric, *a, **kw):
1591
- return self.logger.decrement(self.get_metric_name(metric), *a, **kw)
1592
-
1593
- def timing(self, metric, *a, **kw):
1594
- return self.logger.timing(self.get_metric_name(metric), *a, **kw)
1595
-
1596
- def timing_since(self, metric, *a, **kw):
1597
- return self.logger.timing_since(self.get_metric_name(metric), *a, **kw)
1598
-
1599
- def transfer_rate(self, metric, *a, **kw):
1600
- return self.logger.transfer_rate(
1601
- self.get_metric_name(metric), *a, **kw)
1602
-
1603
- @property
1604
- def thread_locals(self):
1605
- return self.logger.thread_locals
1606
-
1607
- @thread_locals.setter
1608
- def thread_locals(self, thread_locals):
1609
- self.logger.thread_locals = thread_locals
1610
-
1611
- def exception(self, msg, *a, **kw):
1612
- # We up-call to exception() where stdlib uses error() so we can get
1613
- # some of the traceback suppression from LogAdapter, below
1614
- self.logger.exception(msg, *a, **kw)
1615
-
1616
-
1617
- class PrefixLoggerAdapter(SwiftLoggerAdapter):
1618
- """
1619
- Adds an optional prefix to all its log messages. When the prefix has not
1620
- been set, messages are unchanged.
1621
- """
1622
-
1623
- def set_prefix(self, prefix):
1624
- self.extra['prefix'] = prefix
1625
-
1626
- def exception(self, msg, *a, **kw):
1627
- if 'prefix' in self.extra:
1628
- msg = self.extra['prefix'] + msg
1629
- super(PrefixLoggerAdapter, self).exception(msg, *a, **kw)
1630
-
1631
- def process(self, msg, kwargs):
1632
- msg, kwargs = super(PrefixLoggerAdapter, self).process(msg, kwargs)
1633
- if 'prefix' in self.extra:
1634
- msg = self.extra['prefix'] + msg
1635
- return (msg, kwargs)
1636
-
1637
-
1638
- class MetricsPrefixLoggerAdapter(SwiftLoggerAdapter):
1639
- """
1640
- Adds a prefix to all Statsd metrics' names.
1641
- """
1642
-
1643
- def __init__(self, logger, extra, metric_prefix):
1644
- """
1645
- :param logger: an instance of logging.Logger
1646
- :param extra: a dict-like object
1647
- :param metric_prefix: A prefix that will be added to the start of each
1648
- metric name such that the metric name is transformed to:
1649
- ``<metric_prefix>.<metric name>``. Note that the logger's
1650
- StatsdClient also adds its configured prefix to metric names.
1651
- """
1652
- super(MetricsPrefixLoggerAdapter, self).__init__(logger, extra)
1653
- self.metric_prefix = metric_prefix
1654
-
1655
- def get_metric_name(self, metric):
1656
- return '%s.%s' % (self.metric_prefix, metric)
1657
-
1658
-
1659
- # double inheritance to support property with setter
1660
- class LogAdapter(logging.LoggerAdapter, object):
1661
- """
1662
- A Logger like object which performs some reformatting on calls to
1663
- :meth:`exception`. Can be used to store a threadlocal transaction id and
1664
- client ip.
1665
- """
1666
-
1667
- _cls_thread_local = threading.local()
1668
-
1669
- def __init__(self, logger, server):
1670
- logging.LoggerAdapter.__init__(self, logger, {})
1671
- self.server = server
1672
- self.warn = self.warning
1673
-
1674
- # There are a few properties needed for py35; see
1675
- # - https://bugs.python.org/issue31457
1676
- # - https://github.com/python/cpython/commit/1bbd482
1677
- # - https://github.com/python/cpython/commit/0b6a118
1678
- # - https://github.com/python/cpython/commit/ce9e625
1679
- def _log(self, level, msg, args, exc_info=None, extra=None,
1680
- stack_info=False):
1681
- """
1682
- Low-level log implementation, proxied to allow nested logger adapters.
1683
- """
1684
- return self.logger._log(
1685
- level,
1686
- msg,
1687
- args,
1688
- exc_info=exc_info,
1689
- extra=extra,
1690
- stack_info=stack_info,
1691
- )
1692
-
1693
- @property
1694
- def manager(self):
1695
- return self.logger.manager
1696
-
1697
- @manager.setter
1698
- def manager(self, value):
1699
- self.logger.manager = value
1700
-
1701
- @property
1702
- def name(self):
1703
- return self.logger.name
1704
-
1705
- @property
1706
- def txn_id(self):
1707
- if hasattr(self._cls_thread_local, 'txn_id'):
1708
- return self._cls_thread_local.txn_id
1709
-
1710
- @txn_id.setter
1711
- def txn_id(self, value):
1712
- self._cls_thread_local.txn_id = value
1713
-
1714
- @property
1715
- def client_ip(self):
1716
- if hasattr(self._cls_thread_local, 'client_ip'):
1717
- return self._cls_thread_local.client_ip
1718
-
1719
- @client_ip.setter
1720
- def client_ip(self, value):
1721
- self._cls_thread_local.client_ip = value
1722
-
1723
- @property
1724
- def thread_locals(self):
1725
- return (self.txn_id, self.client_ip)
1726
-
1727
- @thread_locals.setter
1728
- def thread_locals(self, value):
1729
- self.txn_id, self.client_ip = value
1730
-
1731
- def getEffectiveLevel(self):
1732
- return self.logger.getEffectiveLevel()
1733
-
1734
- def process(self, msg, kwargs):
1735
- """
1736
- Add extra info to message
1737
- """
1738
- kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id,
1739
- 'client_ip': self.client_ip}
1740
- return msg, kwargs
1741
-
1742
- def notice(self, msg, *args, **kwargs):
1743
- """
1744
- Convenience function for syslog priority LOG_NOTICE. The python
1745
- logging lvl is set to 25, just above info. SysLogHandler is
1746
- monkey patched to map this log lvl to the LOG_NOTICE syslog
1747
- priority.
1748
- """
1749
- self.log(NOTICE, msg, *args, **kwargs)
1750
-
1751
- def _exception(self, msg, *args, **kwargs):
1752
- logging.LoggerAdapter.exception(self, msg, *args, **kwargs)
1753
-
1754
- def exception(self, msg, *args, **kwargs):
1755
- _junk, exc, _junk = sys.exc_info()
1756
- call = self.error
1757
- emsg = ''
1758
- if isinstance(exc, (http_client.BadStatusLine,
1759
- green_http_client.BadStatusLine)):
1760
- # Use error(); not really exceptional
1761
- emsg = repr(exc)
1762
- # Note that on py3, we've seen a RemoteDisconnected error getting
1763
- # raised, which inherits from *both* BadStatusLine and OSError;
1764
- # we want it getting caught here
1765
- elif isinstance(exc, (OSError, socket.error)):
1766
- if exc.errno in (errno.EIO, errno.ENOSPC):
1767
- emsg = str(exc)
1768
- elif exc.errno == errno.ECONNREFUSED:
1769
- emsg = 'Connection refused'
1770
- elif exc.errno == errno.ECONNRESET:
1771
- emsg = 'Connection reset'
1772
- elif exc.errno == errno.EHOSTUNREACH:
1773
- emsg = 'Host unreachable'
1774
- elif exc.errno == errno.ENETUNREACH:
1775
- emsg = 'Network unreachable'
1776
- elif exc.errno == errno.ETIMEDOUT:
1777
- emsg = 'Connection timeout'
1778
- elif exc.errno == errno.EPIPE:
1779
- emsg = 'Broken pipe'
1780
- else:
1781
- call = self._exception
1782
- elif isinstance(exc, eventlet.Timeout):
1783
- emsg = exc.__class__.__name__
1784
- detail = '%ss' % exc.seconds
1785
- if hasattr(exc, 'created_at'):
1786
- detail += ' after %0.2fs' % (time.time() - exc.created_at)
1787
- emsg += ' (%s)' % detail
1788
- if isinstance(exc, swift.common.exceptions.MessageTimeout):
1789
- if exc.msg:
1790
- emsg += ' %s' % exc.msg
1791
- else:
1792
- call = self._exception
1793
- call('%s: %s' % (msg, emsg), *args, **kwargs)
883
+ return resp
1794
884
 
1795
- def set_statsd_prefix(self, prefix):
1796
- """
1797
- This method is deprecated. Callers should use the
1798
- ``statsd_tail_prefix`` argument of ``get_logger`` when instantiating a
1799
- logger.
1800
-
1801
- The StatsD client prefix defaults to the "name" of the logger. This
1802
- method may override that default with a specific value. Currently used
1803
- in the proxy-server to differentiate the Account, Container, and Object
1804
- controllers.
1805
- """
1806
- warnings.warn(
1807
- 'set_statsd_prefix() is deprecated; use the '
1808
- '``statsd_tail_prefix`` argument to ``get_logger`` instead.',
1809
- DeprecationWarning, stacklevel=2
1810
- )
1811
- if self.logger.statsd_client:
1812
- self.logger.statsd_client._set_prefix(prefix)
885
+ return _timing_stats
886
+ return decorating_func
1813
887
 
1814
- def statsd_delegate(statsd_func_name):
1815
- """
1816
- Factory to create methods which delegate to methods on
1817
- self.logger.statsd_client (an instance of StatsdClient). The
1818
- created methods conditionally delegate to a method whose name is given
1819
- in 'statsd_func_name'. The created delegate methods are a no-op when
1820
- StatsD logging is not configured.
1821
888
 
1822
- :param statsd_func_name: the name of a method on StatsdClient.
1823
- """
1824
- func = getattr(StatsdClient, statsd_func_name)
889
+ def memcached_timing_stats(**dec_kwargs):
890
+ """
891
+ Returns a decorator that logs timing events or errors for public methods in
892
+ MemcacheRing class, such as memcached set, get and etc.
893
+ """
894
+ def decorating_func(func):
895
+ method = func.__name__
1825
896
 
1826
897
  @functools.wraps(func)
1827
- def wrapped(self, *a, **kw):
1828
- if getattr(self.logger, 'statsd_client'):
1829
- func = getattr(self.logger.statsd_client, statsd_func_name)
1830
- return func(*a, **kw)
1831
- return wrapped
1832
-
1833
- update_stats = statsd_delegate('update_stats')
1834
- increment = statsd_delegate('increment')
1835
- decrement = statsd_delegate('decrement')
1836
- timing = statsd_delegate('timing')
1837
- timing_since = statsd_delegate('timing_since')
1838
- transfer_rate = statsd_delegate('transfer_rate')
1839
-
1840
-
1841
- class SwiftLogFormatter(logging.Formatter):
1842
- """
1843
- Custom logging.Formatter will append txn_id to a log message if the
1844
- record has one and the message does not. Optionally it can shorten
1845
- overly long log lines.
1846
- """
1847
-
1848
- def __init__(self, fmt=None, datefmt=None, max_line_length=0):
1849
- logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)
1850
- self.max_line_length = max_line_length
1851
-
1852
- def format(self, record):
1853
- if not hasattr(record, 'server'):
1854
- # Catch log messages that were not initiated by swift
1855
- # (for example, the keystone auth middleware)
1856
- record.server = record.name
1857
-
1858
- # Included from Python's logging.Formatter and then altered slightly to
1859
- # replace \n with #012
1860
- record.message = record.getMessage()
1861
- if self._fmt.find('%(asctime)') >= 0:
1862
- record.asctime = self.formatTime(record, self.datefmt)
1863
- msg = (self._fmt % record.__dict__).replace('\n', '#012')
1864
- if record.exc_info:
1865
- # Cache the traceback text to avoid converting it multiple times
1866
- # (it's constant anyway)
1867
- if not record.exc_text:
1868
- record.exc_text = self.formatException(
1869
- record.exc_info).replace('\n', '#012')
1870
- if record.exc_text:
1871
- if not msg.endswith('#012'):
1872
- msg = msg + '#012'
1873
- msg = msg + record.exc_text
1874
-
1875
- if (hasattr(record, 'txn_id') and record.txn_id and
1876
- record.txn_id not in msg):
1877
- msg = "%s (txn: %s)" % (msg, record.txn_id)
1878
- if (hasattr(record, 'client_ip') and record.client_ip and
1879
- record.levelno != logging.INFO and
1880
- record.client_ip not in msg):
1881
- msg = "%s (client_ip: %s)" % (msg, record.client_ip)
1882
- if self.max_line_length > 0 and len(msg) > self.max_line_length:
1883
- if self.max_line_length < 7:
1884
- msg = msg[:self.max_line_length]
1885
- else:
1886
- approxhalf = (self.max_line_length - 5) // 2
1887
- msg = msg[:approxhalf] + " ... " + msg[-approxhalf:]
1888
- return msg
1889
-
1890
-
1891
- class LogLevelFilter(object):
1892
- """
1893
- Drop messages for the logger based on level.
1894
-
1895
- This is useful when dependencies log too much information.
1896
-
1897
- :param level: All messages at or below this level are dropped
1898
- (DEBUG < INFO < WARN < ERROR < CRITICAL|FATAL)
1899
- Default: DEBUG
1900
- """
1901
-
1902
- def __init__(self, level=logging.DEBUG):
1903
- self.level = level
1904
-
1905
- def filter(self, record):
1906
- if record.levelno <= self.level:
1907
- return 0
1908
- return 1
1909
-
1910
-
1911
- def get_logger(conf, name=None, log_to_console=False, log_route=None,
1912
- fmt="%(server)s: %(message)s", statsd_tail_prefix=None):
1913
- """
1914
- Get the current system logger using config settings.
1915
-
1916
- **Log config and defaults**::
1917
-
1918
- log_facility = LOG_LOCAL0
1919
- log_level = INFO
1920
- log_name = swift
1921
- log_max_line_length = 0
1922
- log_udp_host = (disabled)
1923
- log_udp_port = logging.handlers.SYSLOG_UDP_PORT
1924
- log_address = /dev/log
1925
- log_statsd_host = (disabled)
1926
- log_statsd_port = 8125
1927
- log_statsd_default_sample_rate = 1.0
1928
- log_statsd_sample_rate_factor = 1.0
1929
- log_statsd_metric_prefix = (empty-string)
1930
-
1931
- :param conf: Configuration dict to read settings from
1932
- :param name: This value is used to populate the ``server`` field in the log
1933
- format, as the prefix for statsd messages, and as the default
1934
- value for ``log_route``; defaults to the ``log_name`` value in
1935
- ``conf``, if it exists, or to 'swift'.
1936
- :param log_to_console: Add handler which writes to console on stderr
1937
- :param log_route: Route for the logging, not emitted to the log, just used
1938
- to separate logging configurations; defaults to the value
1939
- of ``name`` or whatever ``name`` defaults to. This value
1940
- is used as the name attribute of the
1941
- ``logging.LogAdapter`` that is returned.
1942
- :param fmt: Override log format
1943
- :param statsd_tail_prefix: tail prefix to pass to statsd client; if None
1944
- then the tail prefix defaults to the value of ``name``.
1945
- :return: an instance of ``LogAdapter``
1946
- """
1947
- # note: log_name is typically specified in conf (i.e. defined by
1948
- # operators), whereas log_route is typically hard-coded in callers of
1949
- # get_logger (i.e. defined by developers)
1950
- if not conf:
1951
- conf = {}
1952
- if name is None:
1953
- name = conf.get('log_name', 'swift')
1954
- if not log_route:
1955
- log_route = name
1956
- logger = logging.getLogger(log_route)
1957
- logger.propagate = False
1958
- # all new handlers will get the same formatter
1959
- formatter = SwiftLogFormatter(
1960
- fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0)))
1961
-
1962
- # get_logger will only ever add one SysLog Handler to a logger
1963
- if not hasattr(get_logger, 'handler4logger'):
1964
- get_logger.handler4logger = {}
1965
- if logger in get_logger.handler4logger:
1966
- logger.removeHandler(get_logger.handler4logger[logger])
1967
-
1968
- # facility for this logger will be set by last call wins
1969
- facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
1970
- SysLogHandler.LOG_LOCAL0)
1971
- udp_host = conf.get('log_udp_host')
1972
- if udp_host:
1973
- udp_port = int(conf.get('log_udp_port',
1974
- logging.handlers.SYSLOG_UDP_PORT))
1975
- handler = ThreadSafeSysLogHandler(address=(udp_host, udp_port),
1976
- facility=facility)
1977
- else:
1978
- log_address = conf.get('log_address', '/dev/log')
1979
- handler = None
1980
- try:
1981
- mode = os.stat(log_address).st_mode
1982
- if stat.S_ISSOCK(mode):
1983
- handler = ThreadSafeSysLogHandler(address=log_address,
1984
- facility=facility)
1985
- except (OSError, socket.error) as e:
1986
- # If either /dev/log isn't a UNIX socket or it does not exist at
1987
- # all then py2 would raise an error
1988
- if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
1989
- raise
1990
- if handler is None:
1991
- # fallback to default UDP
1992
- handler = ThreadSafeSysLogHandler(facility=facility)
1993
- handler.setFormatter(formatter)
1994
- logger.addHandler(handler)
1995
- get_logger.handler4logger[logger] = handler
1996
-
1997
- # setup console logging
1998
- if log_to_console or hasattr(get_logger, 'console_handler4logger'):
1999
- # remove pre-existing console handler for this logger
2000
- if not hasattr(get_logger, 'console_handler4logger'):
2001
- get_logger.console_handler4logger = {}
2002
- if logger in get_logger.console_handler4logger:
2003
- logger.removeHandler(get_logger.console_handler4logger[logger])
2004
-
2005
- console_handler = logging.StreamHandler(sys.__stderr__)
2006
- console_handler.setFormatter(formatter)
2007
- logger.addHandler(console_handler)
2008
- get_logger.console_handler4logger[logger] = console_handler
2009
-
2010
- # set the level for the logger
2011
- logger.setLevel(
2012
- getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
2013
-
2014
- # Setup logger with a StatsD client if so configured
2015
- statsd_host = conf.get('log_statsd_host')
2016
- if statsd_host:
2017
- statsd_port = int(conf.get('log_statsd_port', 8125))
2018
- base_prefix = conf.get('log_statsd_metric_prefix', '')
2019
- default_sample_rate = float(conf.get(
2020
- 'log_statsd_default_sample_rate', 1))
2021
- sample_rate_factor = float(conf.get(
2022
- 'log_statsd_sample_rate_factor', 1))
2023
- if statsd_tail_prefix is None:
2024
- statsd_tail_prefix = name
2025
- statsd_client = StatsdClient(statsd_host, statsd_port, base_prefix,
2026
- statsd_tail_prefix, default_sample_rate,
2027
- sample_rate_factor, logger=logger)
2028
- logger.statsd_client = statsd_client
2029
- else:
2030
- logger.statsd_client = None
2031
-
2032
- adapted_logger = LogAdapter(logger, name)
2033
- other_handlers = conf.get('log_custom_handlers', None)
2034
- if other_handlers:
2035
- log_custom_handlers = [s.strip() for s in other_handlers.split(',')
2036
- if s.strip()]
2037
- for hook in log_custom_handlers:
2038
- try:
2039
- mod, fnc = hook.rsplit('.', 1)
2040
- logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc)
2041
- logger_hook(conf, name, log_to_console, log_route, fmt,
2042
- logger, adapted_logger)
2043
- except (AttributeError, ImportError):
2044
- print('Error calling custom handler [%s]' % hook,
2045
- file=sys.stderr)
2046
- except ValueError:
2047
- print('Invalid custom handler format [%s]' % hook,
2048
- file=sys.stderr)
898
+ def _timing_stats(cache, *args, **kwargs):
899
+ start_time = time.time()
900
+ result = func(cache, *args, **kwargs)
901
+ cache.logger.timing_since(
902
+ 'memcached.' + method + '.timing', start_time, **dec_kwargs)
903
+ return result
2049
904
 
2050
- return adapted_logger
905
+ return _timing_stats
906
+ return decorating_func
2051
907
 
2052
908
 
2053
909
  def get_hub():
@@ -2127,43 +983,6 @@ def clean_up_daemon_hygiene():
2127
983
  os.umask(0o22) # ensure files are created with the correct privileges
2128
984
 
2129
985
 
2130
- def capture_stdio(logger, **kwargs):
2131
- """
2132
- Log unhandled exceptions, close stdio, capture stdout and stderr.
2133
-
2134
- param logger: Logger object to use
2135
- """
2136
- # log uncaught exceptions
2137
- sys.excepthook = lambda * exc_info: \
2138
- logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
2139
-
2140
- # collect stdio file desc not in use for logging
2141
- stdio_files = [sys.stdin, sys.stdout, sys.stderr]
2142
- console_fds = [h.stream.fileno() for _junk, h in getattr(
2143
- get_logger, 'console_handler4logger', {}).items()]
2144
- stdio_files = [f for f in stdio_files if f.fileno() not in console_fds]
2145
-
2146
- with open(os.devnull, 'r+b') as nullfile:
2147
- # close stdio (excludes fds open for logging)
2148
- for f in stdio_files:
2149
- # some platforms throw an error when attempting an stdin flush
2150
- try:
2151
- f.flush()
2152
- except IOError:
2153
- pass
2154
-
2155
- try:
2156
- os.dup2(nullfile.fileno(), f.fileno())
2157
- except OSError:
2158
- pass
2159
-
2160
- # redirect stdio
2161
- if kwargs.pop('capture_stdout', True):
2162
- sys.stdout = LoggerFileObject(logger)
2163
- if kwargs.pop('capture_stderr', True):
2164
- sys.stderr = LoggerFileObject(logger, 'STDERR')
2165
-
2166
-
2167
986
  def parse_options(parser=None, once=False, test_config=False, test_args=None):
2168
987
  """Parse standard swift server/daemon options with optparse.OptionParser.
2169
988
 
@@ -2550,119 +1369,6 @@ def cache_from_env(env, allow_none=False):
2550
1369
  return item_from_env(env, 'swift.cache', allow_none)
2551
1370
 
2552
1371
 
2553
- def read_conf_dir(parser, conf_dir):
2554
- conf_files = []
2555
- for f in os.listdir(conf_dir):
2556
- if f.endswith('.conf') and not f.startswith('.'):
2557
- conf_files.append(os.path.join(conf_dir, f))
2558
- return parser.read(sorted(conf_files))
2559
-
2560
-
2561
- if six.PY2:
2562
- NicerInterpolation = None # just don't cause ImportErrors over in wsgi.py
2563
- else:
2564
- class NicerInterpolation(configparser.BasicInterpolation):
2565
- def before_get(self, parser, section, option, value, defaults):
2566
- if '%(' not in value:
2567
- return value
2568
- return super(NicerInterpolation, self).before_get(
2569
- parser, section, option, value, defaults)
2570
-
2571
-
2572
- def readconf(conf_path, section_name=None, log_name=None, defaults=None,
2573
- raw=False):
2574
- """
2575
- Read config file(s) and return config items as a dict
2576
-
2577
- :param conf_path: path to config file/directory, or a file-like object
2578
- (hasattr readline)
2579
- :param section_name: config section to read (will return all sections if
2580
- not defined)
2581
- :param log_name: name to be used with logging (will use section_name if
2582
- not defined)
2583
- :param defaults: dict of default values to pre-populate the config with
2584
- :returns: dict of config items
2585
- :raises ValueError: if section_name does not exist
2586
- :raises IOError: if reading the file failed
2587
- """
2588
- if defaults is None:
2589
- defaults = {}
2590
- if raw:
2591
- c = RawConfigParser(defaults)
2592
- else:
2593
- if six.PY2:
2594
- c = ConfigParser(defaults)
2595
- else:
2596
- # In general, we haven't really thought much about interpolation
2597
- # in configs. Python's default ConfigParser has always supported
2598
- # it, though, so *we* got it "for free". Unfortunatley, since we
2599
- # "supported" interpolation, we have to assume there are
2600
- # deployments in the wild that use it, and try not to break them.
2601
- # So, do what we can to mimic the py2 behavior of passing through
2602
- # values like "1%" (which we want to support for
2603
- # fallocate_reserve).
2604
- c = ConfigParser(defaults, interpolation=NicerInterpolation())
2605
- c.optionxform = str # Don't lower-case keys
2606
-
2607
- if hasattr(conf_path, 'readline'):
2608
- if hasattr(conf_path, 'seek'):
2609
- conf_path.seek(0)
2610
- if six.PY2:
2611
- c.readfp(conf_path)
2612
- else:
2613
- c.read_file(conf_path)
2614
- else:
2615
- if os.path.isdir(conf_path):
2616
- # read all configs in directory
2617
- success = read_conf_dir(c, conf_path)
2618
- else:
2619
- success = c.read(conf_path)
2620
- if not success:
2621
- raise IOError("Unable to read config from %s" %
2622
- conf_path)
2623
- if section_name:
2624
- if c.has_section(section_name):
2625
- conf = dict(c.items(section_name))
2626
- else:
2627
- raise ValueError(
2628
- "Unable to find %(section)s config section in %(conf)s" %
2629
- {'section': section_name, 'conf': conf_path})
2630
- if "log_name" not in conf:
2631
- if log_name is not None:
2632
- conf['log_name'] = log_name
2633
- else:
2634
- conf['log_name'] = section_name
2635
- else:
2636
- conf = {}
2637
- for s in c.sections():
2638
- conf.update({s: dict(c.items(s))})
2639
- if 'log_name' not in conf:
2640
- conf['log_name'] = log_name
2641
- conf['__file__'] = conf_path
2642
- return conf
2643
-
2644
-
2645
- def parse_prefixed_conf(conf_file, prefix):
2646
- """
2647
- Search the config file for any common-prefix sections and load those
2648
- sections to a dict mapping the after-prefix reference to options.
2649
-
2650
- :param conf_file: the file name of the config to parse
2651
- :param prefix: the common prefix of the sections
2652
- :return: a dict mapping policy reference -> dict of policy options
2653
- :raises ValueError: if a policy config section has an invalid name
2654
- """
2655
-
2656
- ret_config = {}
2657
- all_conf = readconf(conf_file)
2658
- for section, options in all_conf.items():
2659
- if not section.startswith(prefix):
2660
- continue
2661
- target_ref = section[len(prefix):]
2662
- ret_config[target_ref] = options
2663
- return ret_config
2664
-
2665
-
2666
1372
  def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
2667
1373
  """
2668
1374
  Ensure that a pickle file gets written to disk. The file
@@ -2952,13 +1658,19 @@ class AbstractRateLimiter(object):
2952
1658
  running_time < (current time - rate_buffer ms) to allow an initial
2953
1659
  burst.
2954
1660
  """
2955
- self.max_rate = max_rate
2956
- self.rate_buffer_ms = rate_buffer * self.clock_accuracy
1661
+ self.set_max_rate(max_rate)
1662
+ self.set_rate_buffer(rate_buffer)
2957
1663
  self.burst_after_idle = burst_after_idle
2958
1664
  self.running_time = running_time
1665
+
1666
+ def set_max_rate(self, max_rate):
1667
+ self.max_rate = max_rate
2959
1668
  self.time_per_incr = (self.clock_accuracy / self.max_rate
2960
1669
  if self.max_rate else 0)
2961
1670
 
1671
+ def set_rate_buffer(self, rate_buffer):
1672
+ self.rate_buffer_ms = rate_buffer * self.clock_accuracy
1673
+
2962
1674
  def _sleep(self, seconds):
2963
1675
  # subclasses should override to implement a sleep
2964
1676
  raise NotImplementedError
@@ -3287,112 +1999,6 @@ def validate_sync_to(value, allowed_sync_hosts, realms_conf):
3287
1999
  return (None, value, None, None)
3288
2000
 
3289
2001
 
3290
- def affinity_key_function(affinity_str):
3291
- """Turns an affinity config value into a function suitable for passing to
3292
- sort(). After doing so, the array will be sorted with respect to the given
3293
- ordering.
3294
-
3295
- For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array
3296
- will be sorted with all nodes from region 1 (r1=1) first, then all the
3297
- nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything
3298
- else.
3299
-
3300
- Note that the order of the pieces of affinity_str is irrelevant; the
3301
- priority values are what comes after the equals sign.
3302
-
3303
- If affinity_str is empty or all whitespace, then the resulting function
3304
- will not alter the ordering of the nodes.
3305
-
3306
- :param affinity_str: affinity config value, e.g. "r1z2=3"
3307
- or "r1=1, r2z1=2, r2z2=2"
3308
- :returns: single-argument function
3309
- :raises ValueError: if argument invalid
3310
- """
3311
- affinity_str = affinity_str.strip()
3312
-
3313
- if not affinity_str:
3314
- return lambda x: 0
3315
-
3316
- priority_matchers = []
3317
- pieces = [s.strip() for s in affinity_str.split(',')]
3318
- for piece in pieces:
3319
- # matches r<number>=<number> or r<number>z<number>=<number>
3320
- match = re.match(r"r(\d+)(?:z(\d+))?=(\d+)$", piece)
3321
- if match:
3322
- region, zone, priority = match.groups()
3323
- region = int(region)
3324
- priority = int(priority)
3325
- zone = int(zone) if zone else None
3326
-
3327
- matcher = {'region': region, 'priority': priority}
3328
- if zone is not None:
3329
- matcher['zone'] = zone
3330
- priority_matchers.append(matcher)
3331
- else:
3332
- raise ValueError("Invalid affinity value: %r" % affinity_str)
3333
-
3334
- priority_matchers.sort(key=operator.itemgetter('priority'))
3335
-
3336
- def keyfn(ring_node):
3337
- for matcher in priority_matchers:
3338
- if (matcher['region'] == ring_node['region']
3339
- and ('zone' not in matcher
3340
- or matcher['zone'] == ring_node['zone'])):
3341
- return matcher['priority']
3342
- return 4294967296 # 2^32, i.e. "a big number"
3343
- return keyfn
3344
-
3345
-
3346
- def affinity_locality_predicate(write_affinity_str):
3347
- """
3348
- Turns a write-affinity config value into a predicate function for nodes.
3349
- The returned value will be a 1-arg function that takes a node dictionary
3350
- and returns a true value if it is "local" and a false value otherwise. The
3351
- definition of "local" comes from the affinity_str argument passed in here.
3352
-
3353
- For example, if affinity_str is "r1, r2z2", then only nodes where region=1
3354
- or where (region=2 and zone=2) are considered local.
3355
-
3356
- If affinity_str is empty or all whitespace, then the resulting function
3357
- will consider everything local
3358
-
3359
- :param write_affinity_str: affinity config value, e.g. "r1z2"
3360
- or "r1, r2z1, r2z2"
3361
- :returns: single-argument function, or None if affinity_str is empty
3362
- :raises ValueError: if argument invalid
3363
- """
3364
- affinity_str = write_affinity_str.strip()
3365
-
3366
- if not affinity_str:
3367
- return None
3368
-
3369
- matchers = []
3370
- pieces = [s.strip() for s in affinity_str.split(',')]
3371
- for piece in pieces:
3372
- # matches r<number> or r<number>z<number>
3373
- match = re.match(r"r(\d+)(?:z(\d+))?$", piece)
3374
- if match:
3375
- region, zone = match.groups()
3376
- region = int(region)
3377
- zone = int(zone) if zone else None
3378
-
3379
- matcher = {'region': region}
3380
- if zone is not None:
3381
- matcher['zone'] = zone
3382
- matchers.append(matcher)
3383
- else:
3384
- raise ValueError("Invalid write-affinity value: %r" % affinity_str)
3385
-
3386
- def is_local(ring_node):
3387
- for matcher in matchers:
3388
- if (matcher['region'] == ring_node['region']
3389
- and ('zone' not in matcher
3390
- or matcher['zone'] == ring_node['zone'])):
3391
- return True
3392
- return False
3393
- return is_local
3394
-
3395
-
3396
2002
  def get_remote_client(req):
3397
2003
  # remote host for zeus
3398
2004
  client = req.headers.get('x-cluster-client-ip')
@@ -3425,7 +2031,7 @@ def put_recon_cache_entry(cache_entry, key, item):
3425
2031
 
3426
2032
  If ``item`` is an empty dict then any existing ``key`` in ``cache_entry``
3427
2033
  will be deleted. Similarly if ``item`` is a dict and any of its values are
3428
- empty dicts then the corrsponsing key will be deleted from the nested dict
2034
+ empty dicts then the corresponding key will be deleted from the nested dict
3429
2035
  in ``cache_entry``.
3430
2036
 
3431
2037
  We use nested recon cache entries when the object auditor
@@ -3648,30 +2254,6 @@ def rsync_module_interpolation(template, device):
3648
2254
  return module
3649
2255
 
3650
2256
 
3651
- def get_valid_utf8_str(str_or_unicode):
3652
- """
3653
- Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str
3654
-
3655
- :param str_or_unicode: a string or an unicode which can be invalid utf-8
3656
- """
3657
- if six.PY2:
3658
- if isinstance(str_or_unicode, six.text_type):
3659
- (str_or_unicode, _len) = utf8_encoder(str_or_unicode, 'replace')
3660
- (valid_unicode_str, _len) = utf8_decoder(str_or_unicode, 'replace')
3661
- else:
3662
- # Apparently under py3 we need to go to utf-16 to collapse surrogates?
3663
- if isinstance(str_or_unicode, six.binary_type):
3664
- try:
3665
- (str_or_unicode, _len) = utf8_decoder(str_or_unicode,
3666
- 'surrogatepass')
3667
- except UnicodeDecodeError:
3668
- (str_or_unicode, _len) = utf8_decoder(str_or_unicode,
3669
- 'replace')
3670
- (str_or_unicode, _len) = utf16_encoder(str_or_unicode, 'surrogatepass')
3671
- (valid_unicode_str, _len) = utf16_decoder(str_or_unicode, 'replace')
3672
- return valid_unicode_str.encode('utf-8')
3673
-
3674
-
3675
2257
  class Everything(object):
3676
2258
  """
3677
2259
  A container that contains everything. If "e" is an instance of
@@ -3704,27 +2286,93 @@ def csv_append(csv_string, item):
3704
2286
  return item
3705
2287
 
3706
2288
 
3707
- class CloseableChain(object):
2289
+ class ClosingIterator(object):
3708
2290
  """
3709
- Like itertools.chain, but with a close method that will attempt to invoke
3710
- its sub-iterators' close methods, if any.
2291
+ Wrap another iterator and close it, if possible, on completion/exception.
2292
+
2293
+ If other closeable objects are given then they will also be closed when
2294
+ this iterator is closed.
2295
+
2296
+ This is particularly useful for ensuring a generator properly closes its
2297
+ resources, even if the generator was never started.
2298
+
2299
+ This class may be subclassed to override the behavior of
2300
+ ``_get_next_item``.
2301
+
2302
+ :param iterable: iterator to wrap.
2303
+ :param other_closeables: other resources to attempt to close.
3711
2304
  """
2305
+ __slots__ = ('closeables', 'wrapped_iter', 'closed')
3712
2306
 
3713
- def __init__(self, *iterables):
3714
- self.iterables = iterables
3715
- self.chained_iter = itertools.chain(*self.iterables)
2307
+ def __init__(self, iterable, other_closeables=None):
2308
+ self.closeables = [iterable]
2309
+ if other_closeables:
2310
+ self.closeables.extend(other_closeables)
2311
+ # this is usually, but not necessarily, the same object
2312
+ self.wrapped_iter = iter(iterable)
2313
+ self.closed = False
3716
2314
 
3717
2315
  def __iter__(self):
3718
2316
  return self
3719
2317
 
2318
+ def _get_next_item(self):
2319
+ return next(self.wrapped_iter)
2320
+
3720
2321
  def __next__(self):
3721
- return next(self.chained_iter)
2322
+ try:
2323
+ return self._get_next_item()
2324
+ except Exception:
2325
+ # note: if wrapped_iter is a generator then the exception
2326
+ # already caused it to exit (without raising a GeneratorExit)
2327
+ # but we still need to close any other closeables.
2328
+ self.close()
2329
+ raise
3722
2330
 
3723
2331
  next = __next__ # py2
3724
2332
 
3725
2333
  def close(self):
3726
- for it in self.iterables:
3727
- close_if_possible(it)
2334
+ if not self.closed:
2335
+ for wrapped in self.closeables:
2336
+ close_if_possible(wrapped)
2337
+ # clear it out so they get GC'ed
2338
+ self.closeables = []
2339
+ self.wrapped_iter = iter([])
2340
+ self.closed = True
2341
+
2342
+
2343
+ class ClosingMapper(ClosingIterator):
2344
+ """
2345
+ A closing iterator that yields the result of ``function`` as it is applied
2346
+ to each item of ``iterable``.
2347
+
2348
+ Note that while this behaves similarly to the built-in ``map`` function,
2349
+ ``other_closeables`` does not have the same semantic as the ``iterables``
2350
+ argument of ``map``.
2351
+
2352
+ :param function: a function that will be called with each item of
2353
+ ``iterable`` before yielding its result.
2354
+ :param iterable: iterator to wrap.
2355
+ :param other_closeables: other resources to attempt to close.
2356
+ """
2357
+ __slots__ = ('func',)
2358
+
2359
+ def __init__(self, function, iterable, other_closeables=None):
2360
+ self.func = function
2361
+ super(ClosingMapper, self).__init__(iterable, other_closeables)
2362
+
2363
+ def _get_next_item(self):
2364
+ return self.func(super(ClosingMapper, self)._get_next_item())
2365
+
2366
+
2367
+ class CloseableChain(ClosingIterator):
2368
+ """
2369
+ Like itertools.chain, but with a close method that will attempt to invoke
2370
+ its sub-iterators' close methods, if any.
2371
+ """
2372
+
2373
+ def __init__(self, *iterables):
2374
+ chained_iter = itertools.chain(*iterables)
2375
+ super(CloseableChain, self).__init__(chained_iter, iterables)
3728
2376
 
3729
2377
 
3730
2378
  def reiterate(iterable):
@@ -4040,7 +2688,7 @@ def closing_if_possible(maybe_closable):
4040
2688
  close_if_possible(maybe_closable)
4041
2689
 
4042
2690
 
4043
- def drain_and_close(response_or_app_iter):
2691
+ def drain_and_close(response_or_app_iter, read_limit=None):
4044
2692
  """
4045
2693
  Drain and close a swob or WSGI response.
4046
2694
 
@@ -4050,9 +2698,26 @@ def drain_and_close(response_or_app_iter):
4050
2698
  app_iter = getattr(response_or_app_iter, 'app_iter', response_or_app_iter)
4051
2699
  if app_iter is None: # for example, if we used the Response.body property
4052
2700
  return
4053
- for _chunk in app_iter:
4054
- pass
4055
- close_if_possible(app_iter)
2701
+ bytes_read = 0
2702
+ with closing_if_possible(app_iter):
2703
+ for chunk in app_iter:
2704
+ bytes_read += len(chunk)
2705
+ if read_limit is not None and bytes_read >= read_limit:
2706
+ break
2707
+
2708
+
2709
+ def friendly_close(resp):
2710
+ """
2711
+ Close a swob or WSGI response and maybe drain it.
2712
+
2713
+ It's basically free to "read" a HEAD or HTTPException response - the bytes
2714
+ are probably already in our network buffers. For a larger response we
2715
+ could possibly burn a lot of CPU/network trying to drain an un-used
2716
+ response. This method will read up to DEFAULT_DRAIN_LIMIT bytes to avoid
2717
+ logging a 499 in the proxy when it would otherwise be easy to just throw
2718
+ away the small/empty body.
2719
+ """
2720
+ return drain_and_close(resp, read_limit=DEFAULT_DRAIN_LIMIT)
4056
2721
 
4057
2722
 
4058
2723
  _rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+'
@@ -4148,16 +2813,6 @@ def clean_content_type(value):
4148
2813
  return value
4149
2814
 
4150
2815
 
4151
- def quote(value, safe='/'):
4152
- """
4153
- Patched version of urllib.quote that encodes utf-8 strings before quoting
4154
- """
4155
- quoted = _quote(get_valid_utf8_str(value), safe)
4156
- if isinstance(value, six.binary_type):
4157
- quoted = quoted.encode('utf-8')
4158
- return quoted
4159
-
4160
-
4161
2816
  def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj):
4162
2817
  """
4163
2818
  Returns an expiring object container name for given X-Delete-At and
@@ -4396,6 +3051,47 @@ def document_iters_to_multipart_byteranges(ranges_iter, boundary):
4396
3051
  yield terminator
4397
3052
 
4398
3053
 
3054
+ class StringAlong(ClosingIterator):
3055
+ """
3056
+ This iterator wraps and iterates over a first iterator until it stops, and
3057
+ then iterates a second iterator, expecting it to stop immediately. This
3058
+ "stringing along" of the second iterator is useful when the exit of the
3059
+ second iterator must be delayed until the first iterator has stopped. For
3060
+ example, when the second iterator has already yielded its item(s) but
3061
+ has resources that mustn't be garbage collected until the first iterator
3062
+ has stopped.
3063
+
3064
+ The second iterator is expected to have no more items and raise
3065
+ StopIteration when called. If this is not the case then
3066
+ ``unexpected_items_func`` is called.
3067
+
3068
+ :param iterable: a first iterator that is wrapped and iterated.
3069
+ :param other_iter: a second iterator that is stopped once the first
3070
+ iterator has stopped.
3071
+ :param unexpected_items_func: a no-arg function that will be called if the
3072
+ second iterator is found to have remaining items.
3073
+ """
3074
+ __slots__ = ('other_iter', 'unexpected_items_func')
3075
+
3076
+ def __init__(self, iterable, other_iter, unexpected_items_func):
3077
+ super(StringAlong, self).__init__(iterable, [other_iter])
3078
+ self.other_iter = other_iter
3079
+ self.unexpected_items_func = unexpected_items_func
3080
+
3081
+ def _get_next_item(self):
3082
+ try:
3083
+ return super(StringAlong, self)._get_next_item()
3084
+ except StopIteration:
3085
+ try:
3086
+ next(self.other_iter)
3087
+ except StopIteration:
3088
+ pass
3089
+ else:
3090
+ self.unexpected_items_func()
3091
+ finally:
3092
+ raise
3093
+
3094
+
4399
3095
  def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
4400
3096
  logger):
4401
3097
  """
@@ -4445,20 +3141,11 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
4445
3141
  # ranges_iter has a finally block that calls close_swift_conn, and
4446
3142
  # so if that finally block fires before we read response_body_iter,
4447
3143
  # there's nothing there.
4448
- def string_along(useful_iter, useless_iter_iter, logger):
4449
- with closing_if_possible(useful_iter):
4450
- for x in useful_iter:
4451
- yield x
4452
-
4453
- try:
4454
- next(useless_iter_iter)
4455
- except StopIteration:
4456
- pass
4457
- else:
4458
- logger.warning(
4459
- "More than one part in a single-part response?")
4460
-
4461
- return string_along(response_body_iter, ranges_iter, logger)
3144
+ result = StringAlong(
3145
+ response_body_iter, ranges_iter,
3146
+ lambda: logger.warning(
3147
+ "More than one part in a single-part response?"))
3148
+ return result
4462
3149
 
4463
3150
 
4464
3151
  def multipart_byteranges_to_document_iters(input_file, boundary,
@@ -4508,27 +3195,6 @@ def parse_content_disposition(header):
4508
3195
  return header, attributes
4509
3196
 
4510
3197
 
4511
- try:
4512
- _test_md5 = hashlib.md5(usedforsecurity=False) # nosec
4513
-
4514
- def md5(string=b'', usedforsecurity=True):
4515
- """Return an md5 hashlib object using usedforsecurity parameter
4516
-
4517
- For python distributions that support the usedforsecurity keyword
4518
- parameter, this passes the parameter through as expected.
4519
- See https://bugs.python.org/issue9216
4520
- """
4521
- return hashlib.md5(string, usedforsecurity=usedforsecurity) # nosec
4522
- except TypeError:
4523
- def md5(string=b'', usedforsecurity=True):
4524
- """Return an md5 hashlib object without usedforsecurity parameter
4525
-
4526
- For python distributions that do not yet support this keyword
4527
- parameter, we drop the parameter
4528
- """
4529
- return hashlib.md5(string) # nosec
4530
-
4531
-
4532
3198
  class NamespaceOuterBound(object):
4533
3199
  """
4534
3200
  A custom singleton type to be subclassed for the outer bounds of
@@ -4562,13 +3228,14 @@ class Namespace(object):
4562
3228
  A Namespace encapsulates parameters that define a range of the object
4563
3229
  namespace.
4564
3230
 
4565
- :param name: the name of the ``Namespace``.
3231
+ :param name: the name of the ``Namespace``; this SHOULD take the form of a
3232
+ path to a container i.e. <account_name>/<container_name>.
4566
3233
  :param lower: the lower bound of object names contained in the namespace;
4567
3234
  the lower bound *is not* included in the namespace.
4568
3235
  :param upper: the upper bound of object names contained in the namespace;
4569
3236
  the upper bound *is* included in the namespace.
4570
3237
  """
4571
- __slots__ = ('_lower', '_upper', 'name')
3238
+ __slots__ = ('_lower', '_upper', '_name')
4572
3239
 
4573
3240
  @functools.total_ordering
4574
3241
  class MaxBound(NamespaceOuterBound):
@@ -4588,9 +3255,14 @@ class Namespace(object):
4588
3255
  def __init__(self, name, lower, upper):
4589
3256
  self._lower = Namespace.MIN
4590
3257
  self._upper = Namespace.MAX
3258
+ # We deliberately do not validate that the name has the form 'a/c'
3259
+ # because we want Namespace instantiation to be fast. Namespaces are
3260
+ # typically created using state that has previously been serialized
3261
+ # from a ShardRange instance, and the ShardRange will have validated
3262
+ # the name format.
3263
+ self._name = self._encode(name)
4591
3264
  self.lower = lower
4592
3265
  self.upper = upper
4593
- self.name = name
4594
3266
 
4595
3267
  def __iter__(self):
4596
3268
  yield 'name', str(self.name)
@@ -4618,7 +3290,7 @@ class Namespace(object):
4618
3290
  def __gt__(self, other):
4619
3291
  # a Namespace is greater than other if its entire namespace is greater
4620
3292
  # than other; if other is another Namespace that implies that this
4621
- # Namespace's lower must be less greater than or equal to the other
3293
+ # Namespace's lower must be greater than or equal to the other
4622
3294
  # Namespace's upper
4623
3295
  if self.lower == Namespace.MIN:
4624
3296
  return False
@@ -4663,6 +3335,21 @@ class Namespace(object):
4663
3335
  raise TypeError('must be a string type')
4664
3336
  return self._encode(bound)
4665
3337
 
3338
+ @property
3339
+ def account(self):
3340
+ return self._name.split('/')[0]
3341
+
3342
+ @property
3343
+ def container(self):
3344
+ # note: this may raise an IndexError if name does not have the expected
3345
+ # form 'a/c'; that is a deliberate trade-off against the overhead of
3346
+ # validating the name every time a Namespace is instantiated.
3347
+ return self._name.split('/')[1]
3348
+
3349
+ @property
3350
+ def name(self):
3351
+ return self._name
3352
+
4666
3353
  @property
4667
3354
  def lower(self):
4668
3355
  return self._lower
@@ -4988,7 +3675,7 @@ class ShardRange(Namespace):
4988
3675
  range record and the most recent version of an attribute should be
4989
3676
  persisted.
4990
3677
 
4991
- :param name: the name of the shard range; this should take the form of a
3678
+ :param name: the name of the shard range; this MUST take the form of a
4992
3679
  path to a container i.e. <account_name>/<container_name>.
4993
3680
  :param timestamp: a timestamp that represents the time at which the
4994
3681
  shard range's ``lower``, ``upper`` or ``deleted`` attributes were
@@ -5046,7 +3733,6 @@ class ShardRange(Namespace):
5046
3733
  CLEAVING_STATES = SHRINKING_STATES + SHARDING_STATES
5047
3734
 
5048
3735
  __slots__ = (
5049
- 'account', 'container',
5050
3736
  '_timestamp', '_meta_timestamp', '_state_timestamp', '_epoch',
5051
3737
  '_deleted', '_state', '_count', '_bytes',
5052
3738
  '_tombstones', '_reported')
@@ -5057,12 +3743,12 @@ class ShardRange(Namespace):
5057
3743
  deleted=False, state=None, state_timestamp=None, epoch=None,
5058
3744
  reported=False, tombstones=-1, **kwargs):
5059
3745
  super(ShardRange, self).__init__(name=name, lower=lower, upper=upper)
5060
- self.account = self.container = self._timestamp = \
5061
- self._meta_timestamp = self._state_timestamp = self._epoch = None
3746
+ self._validate_name(self.name)
3747
+ self._timestamp = self._meta_timestamp = self._state_timestamp = \
3748
+ self._epoch = None
5062
3749
  self._deleted = False
5063
3750
  self._state = None
5064
3751
 
5065
- self.name = name
5066
3752
  self.timestamp = timestamp
5067
3753
  self.deleted = deleted
5068
3754
  self.object_count = object_count
@@ -5076,11 +3762,18 @@ class ShardRange(Namespace):
5076
3762
 
5077
3763
  @classmethod
5078
3764
  def sort_key(cls, sr):
3765
+ return cls.sort_key_order(sr.name, sr.lower, sr.upper, sr.state)
3766
+
3767
+ @staticmethod
3768
+ def sort_key_order(name, lower, upper, state):
3769
+ # Use Namespace.MaxBound() for upper bound '', this will allow this
3770
+ # record to be sorted correctly by upper.
3771
+ upper = upper if upper else Namespace.MaxBound()
5079
3772
  # defines the sort order for shard ranges
5080
3773
  # note if this ever changes to *not* sort by upper first then it breaks
5081
3774
  # a key assumption for bisect, which is used by utils.find_namespace
5082
3775
  # with shard ranges.
5083
- return sr.upper, sr.state, sr.lower, sr.name
3776
+ return upper, state, lower, name
5084
3777
 
5085
3778
  def is_child_of(self, parent):
5086
3779
  """
@@ -5221,16 +3914,24 @@ class ShardRange(Namespace):
5221
3914
 
5222
3915
  @property
5223
3916
  def name(self):
5224
- return '%s/%s' % (self.account, self.container)
5225
-
5226
- @name.setter
5227
- def name(self, path):
5228
- path = self._encode(path)
5229
- if not path or len(path.split('/')) != 2 or not all(path.split('/')):
3917
+ return self._name
3918
+
3919
+ @staticmethod
3920
+ def _validate_name(name):
3921
+ # Validate the name format is 'a/c'. The ShardRange class is typically
3922
+ # used when shard state is created (e.g. by the sharder or
3923
+ # swift-manage-shard-ranges), but it is not typically used in
3924
+ # performance sensitive paths (e.g. listing namespaces), so we can
3925
+ # afford the overhead of being more defensive here.
3926
+ if not name or len(name.split('/')) != 2 or not all(name.split('/')):
5230
3927
  raise ValueError(
5231
3928
  "Name must be of the form '<account>/<container>', got %r" %
5232
- path)
5233
- self.account, self.container = path.split('/')
3929
+ name)
3930
+ return name
3931
+
3932
+ @name.setter
3933
+ def name(self, name):
3934
+ self._name = self._validate_name(self._encode(name))
5234
3935
 
5235
3936
  @property
5236
3937
  def timestamp(self):
@@ -5533,7 +4234,7 @@ class ShardRangeList(UserList):
5533
4234
  def __getitem__(self, index):
5534
4235
  # workaround for py3 - not needed for py2.7,py3.8
5535
4236
  result = self.data[index]
5536
- return ShardRangeList(result) if type(result) == list else result
4237
+ return ShardRangeList(result) if type(result) is list else result
5537
4238
 
5538
4239
  @property
5539
4240
  def lower(self):
@@ -5884,162 +4585,6 @@ def load_pkg_resource(group, uri):
5884
4585
  return entry_points[0].load()
5885
4586
 
5886
4587
 
5887
- class PipeMutex(object):
5888
- """
5889
- Mutex using a pipe. Works across both greenlets and real threads, even
5890
- at the same time.
5891
- """
5892
-
5893
- def __init__(self):
5894
- self.rfd, self.wfd = os.pipe()
5895
-
5896
- # You can't create a pipe in non-blocking mode; you must set it
5897
- # later.
5898
- rflags = fcntl.fcntl(self.rfd, fcntl.F_GETFL)
5899
- fcntl.fcntl(self.rfd, fcntl.F_SETFL, rflags | os.O_NONBLOCK)
5900
- os.write(self.wfd, b'-') # start unlocked
5901
-
5902
- self.owner = None
5903
- self.recursion_depth = 0
5904
-
5905
- # Usually, it's an error to have multiple greenthreads all waiting
5906
- # to read the same file descriptor. It's often a sign of inadequate
5907
- # concurrency control; for example, if you have two greenthreads
5908
- # trying to use the same memcache connection, they'll end up writing
5909
- # interleaved garbage to the socket or stealing part of each others'
5910
- # responses.
5911
- #
5912
- # In this case, we have multiple greenthreads waiting on the same
5913
- # file descriptor by design. This lets greenthreads in real thread A
5914
- # wait with greenthreads in real thread B for the same mutex.
5915
- # Therefore, we must turn off eventlet's multiple-reader detection.
5916
- #
5917
- # It would be better to turn off multiple-reader detection for only
5918
- # our calls to trampoline(), but eventlet does not support that.
5919
- eventlet.debug.hub_prevent_multiple_readers(False)
5920
-
5921
- def acquire(self, blocking=True):
5922
- """
5923
- Acquire the mutex.
5924
-
5925
- If called with blocking=False, returns True if the mutex was
5926
- acquired and False if it wasn't. Otherwise, blocks until the mutex
5927
- is acquired and returns True.
5928
-
5929
- This lock is recursive; the same greenthread may acquire it as many
5930
- times as it wants to, though it must then release it that many times
5931
- too.
5932
- """
5933
- current_greenthread_id = id(eventlet.greenthread.getcurrent())
5934
- if self.owner == current_greenthread_id:
5935
- self.recursion_depth += 1
5936
- return True
5937
-
5938
- while True:
5939
- try:
5940
- # If there is a byte available, this will read it and remove
5941
- # it from the pipe. If not, this will raise OSError with
5942
- # errno=EAGAIN.
5943
- os.read(self.rfd, 1)
5944
- self.owner = current_greenthread_id
5945
- return True
5946
- except OSError as err:
5947
- if err.errno != errno.EAGAIN:
5948
- raise
5949
-
5950
- if not blocking:
5951
- return False
5952
-
5953
- # Tell eventlet to suspend the current greenthread until
5954
- # self.rfd becomes readable. This will happen when someone
5955
- # else writes to self.wfd.
5956
- eventlet.hubs.trampoline(self.rfd, read=True)
5957
-
5958
- def release(self):
5959
- """
5960
- Release the mutex.
5961
- """
5962
- current_greenthread_id = id(eventlet.greenthread.getcurrent())
5963
- if self.owner != current_greenthread_id:
5964
- raise RuntimeError("cannot release un-acquired lock")
5965
-
5966
- if self.recursion_depth > 0:
5967
- self.recursion_depth -= 1
5968
- return
5969
-
5970
- self.owner = None
5971
- os.write(self.wfd, b'X')
5972
-
5973
- def close(self):
5974
- """
5975
- Close the mutex. This releases its file descriptors.
5976
-
5977
- You can't use a mutex after it's been closed.
5978
- """
5979
- if self.wfd is not None:
5980
- os.close(self.rfd)
5981
- self.rfd = None
5982
- os.close(self.wfd)
5983
- self.wfd = None
5984
- self.owner = None
5985
- self.recursion_depth = 0
5986
-
5987
- def __del__(self):
5988
- # We need this so we don't leak file descriptors. Otherwise, if you
5989
- # call get_logger() and don't explicitly dispose of it by calling
5990
- # logger.logger.handlers[0].lock.close() [1], the pipe file
5991
- # descriptors are leaked.
5992
- #
5993
- # This only really comes up in tests. Swift processes tend to call
5994
- # get_logger() once and then hang on to it until they exit, but the
5995
- # test suite calls get_logger() a lot.
5996
- #
5997
- # [1] and that's a completely ridiculous thing to expect callers to
5998
- # do, so nobody does it and that's okay.
5999
- self.close()
6000
-
6001
-
6002
- class NoopMutex(object):
6003
- """
6004
- "Mutex" that doesn't lock anything.
6005
-
6006
- We only allow our syslog logging to be configured via UDS or UDP, neither
6007
- of which have the message-interleaving trouble you'd expect from TCP or
6008
- file handlers.
6009
- """
6010
-
6011
- def __init__(self):
6012
- # Usually, it's an error to have multiple greenthreads all waiting
6013
- # to write to the same file descriptor. It's often a sign of inadequate
6014
- # concurrency control; for example, if you have two greenthreads
6015
- # trying to use the same memcache connection, they'll end up writing
6016
- # interleaved garbage to the socket or stealing part of each others'
6017
- # responses.
6018
- #
6019
- # In this case, we have multiple greenthreads waiting on the same
6020
- # (logging) file descriptor by design. So, similar to the PipeMutex,
6021
- # we must turn off eventlet's multiple-waiter detection.
6022
- #
6023
- # It would be better to turn off multiple-reader detection for only
6024
- # the logging socket fd, but eventlet does not support that.
6025
- eventlet.debug.hub_prevent_multiple_readers(False)
6026
-
6027
- def acquire(self, blocking=True):
6028
- pass
6029
-
6030
- def release(self):
6031
- pass
6032
-
6033
-
6034
- class ThreadSafeSysLogHandler(SysLogHandler):
6035
- def createLock(self):
6036
- if config_true_value(os.environ.get(
6037
- 'SWIFT_NOOP_LOGGING_MUTEX') or 'true'):
6038
- self.lock = NoopMutex()
6039
- else:
6040
- self.lock = PipeMutex()
6041
-
6042
-
6043
4588
  def round_robin_iter(its):
6044
4589
  """
6045
4590
  Takes a list of iterators, yield an element from each in a round-robin
@@ -6228,17 +4773,28 @@ def get_db_files(db_path):
6228
4773
  return sorted(results)
6229
4774
 
6230
4775
 
6231
- def systemd_notify(logger=None):
4776
+ def systemd_notify(logger=None, msg=b"READY=1"):
6232
4777
  """
6233
- Notify the service manager that started this process, if it is
6234
- systemd-compatible, that this process correctly started. To do so,
6235
- it communicates through a Unix socket stored in environment variable
6236
- NOTIFY_SOCKET. More information can be found in systemd documentation:
4778
+ Send systemd-compatible notifications.
4779
+
4780
+ Notify the service manager that started this process, if it has set the
4781
+ NOTIFY_SOCKET environment variable. For example, systemd will set this
4782
+ when the unit has ``Type=notify``. More information can be found in
4783
+ systemd documentation:
6237
4784
  https://www.freedesktop.org/software/systemd/man/sd_notify.html
6238
4785
 
4786
+ Common messages include::
4787
+
4788
+ READY=1
4789
+ RELOADING=1
4790
+ STOPPING=1
4791
+ STATUS=<some string>
4792
+
6239
4793
  :param logger: a logger object
4794
+ :param msg: the message to send
6240
4795
  """
6241
- msg = b'READY=1'
4796
+ if not isinstance(msg, bytes):
4797
+ msg = msg.encode('utf8')
6242
4798
  notify_socket = os.getenv('NOTIFY_SOCKET')
6243
4799
  if notify_socket:
6244
4800
  if notify_socket.startswith('@'):
@@ -6249,7 +4805,6 @@ def systemd_notify(logger=None):
6249
4805
  try:
6250
4806
  sock.connect(notify_socket)
6251
4807
  sock.sendall(msg)
6252
- del os.environ['NOTIFY_SOCKET']
6253
4808
  except EnvironmentError:
6254
4809
  if logger:
6255
4810
  logger.debug("Systemd notification failed", exc_info=True)
@@ -6320,7 +4875,7 @@ class Watchdog(object):
6320
4875
  :param key: timeout id, as returned by start()
6321
4876
  """
6322
4877
  try:
6323
- del(self._timeouts[key])
4878
+ del self._timeouts[key]
6324
4879
  except KeyError:
6325
4880
  pass
6326
4881
 
@@ -6331,6 +4886,14 @@ class Watchdog(object):
6331
4886
  if self._run_gth is None:
6332
4887
  self._run_gth = eventlet.spawn(self.run)
6333
4888
 
4889
+ def kill(self):
4890
+ """
4891
+ Stop the watchdog greenthread.
4892
+ """
4893
+ if self._run_gth is not None:
4894
+ self._run_gth.kill()
4895
+ self._run_gth = None
4896
+
6334
4897
  def run(self):
6335
4898
  while True:
6336
4899
  self._run()
@@ -6385,7 +4948,7 @@ class WatchdogTimeout(object):
6385
4948
  self.watchdog.stop(self.key)
6386
4949
 
6387
4950
 
6388
- class CooperativeIterator(object):
4951
+ class CooperativeIterator(ClosingIterator):
6389
4952
  """
6390
4953
  Wrapper to make a deliberate periodic call to ``sleep()`` while iterating
6391
4954
  over wrapped iterator, providing an opportunity to switch greenthreads.
@@ -6405,26 +4968,20 @@ class CooperativeIterator(object):
6405
4968
 
6406
4969
  :param iterable: iterator to wrap.
6407
4970
  :param period: number of items yielded from this iterator between calls to
6408
- ``sleep()``.
4971
+ ``sleep()``; a negative value or 0 mean that cooperative sleep will be
4972
+ disabled.
6409
4973
  """
6410
- __slots__ = ('period', 'count', 'wrapped_iter')
4974
+ __slots__ = ('period', 'count')
6411
4975
 
6412
4976
  def __init__(self, iterable, period=5):
6413
- self.wrapped_iter = iterable
4977
+ super(CooperativeIterator, self).__init__(iterable)
6414
4978
  self.count = 0
6415
- self.period = period
6416
-
6417
- def __iter__(self):
6418
- return self
6419
-
6420
- def next(self):
6421
- if self.count >= self.period:
6422
- self.count = 0
6423
- sleep()
6424
- self.count += 1
6425
- return next(self.wrapped_iter)
6426
-
6427
- __next__ = next
6428
-
6429
- def close(self):
6430
- close_if_possible(self.wrapped_iter)
4979
+ self.period = max(0, period or 0)
4980
+
4981
+ def _get_next_item(self):
4982
+ if self.period:
4983
+ if self.count >= self.period:
4984
+ self.count = 0
4985
+ sleep()
4986
+ self.count += 1
4987
+ return super(CooperativeIterator, self)._get_next_item()