swift 2.31.1__py2.py3-none-any.whl → 2.32.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/cli/info.py +9 -2
- swift/cli/ringbuilder.py +5 -1
- swift/common/container_sync_realms.py +6 -7
- swift/common/daemon.py +7 -3
- swift/common/db.py +22 -7
- swift/common/db_replicator.py +19 -20
- swift/common/direct_client.py +63 -14
- swift/common/internal_client.py +24 -3
- swift/common/manager.py +43 -44
- swift/common/memcached.py +168 -74
- swift/common/middleware/__init__.py +4 -0
- swift/common/middleware/account_quotas.py +98 -40
- swift/common/middleware/backend_ratelimit.py +6 -4
- swift/common/middleware/crossdomain.py +21 -8
- swift/common/middleware/listing_formats.py +26 -38
- swift/common/middleware/proxy_logging.py +12 -9
- swift/common/middleware/s3api/controllers/bucket.py +8 -2
- swift/common/middleware/s3api/s3api.py +9 -4
- swift/common/middleware/s3api/s3request.py +32 -24
- swift/common/middleware/s3api/s3response.py +10 -1
- swift/common/middleware/tempauth.py +9 -10
- swift/common/middleware/versioned_writes/__init__.py +0 -3
- swift/common/middleware/versioned_writes/object_versioning.py +22 -5
- swift/common/middleware/x_profile/html_viewer.py +1 -1
- swift/common/middleware/xprofile.py +5 -0
- swift/common/request_helpers.py +1 -2
- swift/common/ring/ring.py +22 -19
- swift/common/swob.py +2 -1
- swift/common/{utils.py → utils/__init__.py} +610 -1146
- swift/common/utils/ipaddrs.py +256 -0
- swift/common/utils/libc.py +345 -0
- swift/common/utils/timestamp.py +399 -0
- swift/common/wsgi.py +70 -39
- swift/container/backend.py +106 -38
- swift/container/server.py +11 -2
- swift/container/sharder.py +34 -15
- swift/locale/de/LC_MESSAGES/swift.po +1 -320
- swift/locale/en_GB/LC_MESSAGES/swift.po +1 -347
- swift/locale/es/LC_MESSAGES/swift.po +1 -279
- swift/locale/fr/LC_MESSAGES/swift.po +1 -209
- swift/locale/it/LC_MESSAGES/swift.po +1 -207
- swift/locale/ja/LC_MESSAGES/swift.po +2 -278
- swift/locale/ko_KR/LC_MESSAGES/swift.po +3 -303
- swift/locale/pt_BR/LC_MESSAGES/swift.po +1 -204
- swift/locale/ru/LC_MESSAGES/swift.po +1 -203
- swift/locale/tr_TR/LC_MESSAGES/swift.po +1 -192
- swift/locale/zh_CN/LC_MESSAGES/swift.po +1 -192
- swift/locale/zh_TW/LC_MESSAGES/swift.po +1 -193
- swift/obj/diskfile.py +19 -6
- swift/obj/server.py +20 -6
- swift/obj/ssync_receiver.py +19 -9
- swift/obj/ssync_sender.py +10 -10
- swift/proxy/controllers/account.py +7 -7
- swift/proxy/controllers/base.py +374 -366
- swift/proxy/controllers/container.py +112 -53
- swift/proxy/controllers/obj.py +254 -390
- swift/proxy/server.py +3 -8
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-server +1 -1
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-server +1 -1
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-drive-audit +45 -14
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-server +1 -1
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-proxy-server +1 -1
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/AUTHORS +4 -0
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/METADATA +32 -35
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/RECORD +103 -100
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/WHEEL +1 -1
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/entry_points.txt +0 -1
- swift-2.32.1.dist-info/pbr.json +1 -0
- swift-2.31.1.dist-info/pbr.json +0 -1
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-audit +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-auditor +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-info +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-reaper +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-replicator +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-config +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-auditor +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-info +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-reconciler +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-replicator +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-sharder +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-sync +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-updater +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-dispersion-populate +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-dispersion-report +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-form-signature +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-get-nodes +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-init +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-auditor +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-expirer +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-info +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-reconstructor +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-relinker +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-replicator +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-updater +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-oldies +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-orphans +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-recon +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-recon-cron +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-reconciler-enqueue +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-ring-builder +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-ring-builder-analyzer +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-ring-composer +0 -0
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/LICENSE +0 -0
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/top_level.txt +0 -0
@@ -26,7 +26,6 @@ import fcntl
|
|
26
26
|
import grp
|
27
27
|
import hashlib
|
28
28
|
import json
|
29
|
-
import math
|
30
29
|
import operator
|
31
30
|
import os
|
32
31
|
import pwd
|
@@ -37,7 +36,6 @@ import sys
|
|
37
36
|
import time
|
38
37
|
import uuid
|
39
38
|
import functools
|
40
|
-
import platform
|
41
39
|
import email.parser
|
42
40
|
from random import random, shuffle
|
43
41
|
from contextlib import contextmanager, closing
|
@@ -69,7 +67,6 @@ from eventlet.event import Event
|
|
69
67
|
from eventlet.green import socket, threading
|
70
68
|
import eventlet.hubs
|
71
69
|
import eventlet.queue
|
72
|
-
import netifaces
|
73
70
|
import codecs
|
74
71
|
utf8_decoder = codecs.getdecoder('utf-8')
|
75
72
|
utf8_encoder = codecs.getencoder('utf-8')
|
@@ -89,36 +86,50 @@ from six.moves.urllib.parse import quote as _quote, unquote
|
|
89
86
|
from six.moves.urllib.parse import urlparse
|
90
87
|
from six.moves import UserList
|
91
88
|
|
92
|
-
from swift import gettext_ as _
|
93
89
|
import swift.common.exceptions
|
94
90
|
from swift.common.http import is_server_error
|
95
91
|
from swift.common.header_key_dict import HeaderKeyDict
|
96
92
|
from swift.common.linkat import linkat
|
97
93
|
|
98
94
|
# For backwards compatability with 3rd party middlewares
|
99
|
-
from swift.common.registry import register_swift_info, get_swift_info
|
100
|
-
|
101
|
-
|
95
|
+
from swift.common.registry import register_swift_info, get_swift_info # noqa
|
96
|
+
from swift.common.utils.libc import ( # noqa
|
97
|
+
F_SETPIPE_SZ,
|
98
|
+
load_libc_function,
|
99
|
+
drop_buffer_cache,
|
100
|
+
get_md5_socket,
|
101
|
+
modify_priority,
|
102
|
+
_LibcWrapper,
|
103
|
+
)
|
104
|
+
from swift.common.utils.timestamp import ( # noqa
|
105
|
+
NORMAL_FORMAT,
|
106
|
+
INTERNAL_FORMAT,
|
107
|
+
SHORT_FORMAT,
|
108
|
+
MAX_OFFSET,
|
109
|
+
PRECISION,
|
110
|
+
Timestamp,
|
111
|
+
encode_timestamps,
|
112
|
+
decode_timestamps,
|
113
|
+
normalize_timestamp,
|
114
|
+
EPOCH,
|
115
|
+
last_modified_date_to_timestamp,
|
116
|
+
normalize_delete_at_timestamp,
|
117
|
+
)
|
118
|
+
from swift.common.utils.ipaddrs import ( # noqa
|
119
|
+
is_valid_ip,
|
120
|
+
is_valid_ipv4,
|
121
|
+
is_valid_ipv6,
|
122
|
+
expand_ipv6,
|
123
|
+
parse_socket_string,
|
124
|
+
whataremyips,
|
125
|
+
)
|
102
126
|
from logging.handlers import SysLogHandler
|
103
127
|
import logging
|
104
|
-
|
105
|
-
logging.threading = eventlet.green.threading
|
106
|
-
logging._lock = logging.threading.RLock()
|
107
|
-
# setup notice level logging
|
128
|
+
|
108
129
|
NOTICE = 25
|
109
|
-
logging.addLevelName(NOTICE, 'NOTICE')
|
110
|
-
SysLogHandler.priority_map['NOTICE'] = 'notice'
|
111
130
|
|
112
131
|
# These are lazily pulled from libc elsewhere
|
113
132
|
_sys_fallocate = None
|
114
|
-
_posix_fadvise = None
|
115
|
-
_libc_socket = None
|
116
|
-
_libc_bind = None
|
117
|
-
_libc_accept = None
|
118
|
-
# see man -s 2 setpriority
|
119
|
-
_libc_setpriority = None
|
120
|
-
# see man -s 2 syscall
|
121
|
-
_posix_syscall = None
|
122
133
|
|
123
134
|
# If set to non-zero, fallocate routines will fail based on free space
|
124
135
|
# available being at or below this amount, in bytes.
|
@@ -131,56 +142,6 @@ FALLOCATE_IS_PERCENT = False
|
|
131
142
|
FALLOC_FL_KEEP_SIZE = 1
|
132
143
|
FALLOC_FL_PUNCH_HOLE = 2
|
133
144
|
|
134
|
-
# from /usr/src/linux-headers-*/include/uapi/linux/resource.h
|
135
|
-
PRIO_PROCESS = 0
|
136
|
-
|
137
|
-
|
138
|
-
# /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there
|
139
|
-
# are many like it, but this one is mine, see man -s 2 ioprio_set
|
140
|
-
def NR_ioprio_set():
|
141
|
-
"""Give __NR_ioprio_set value for your system."""
|
142
|
-
architecture = os.uname()[4]
|
143
|
-
arch_bits = platform.architecture()[0]
|
144
|
-
# check if supported system, now support x86_64 and AArch64
|
145
|
-
if architecture == 'x86_64' and arch_bits == '64bit':
|
146
|
-
return 251
|
147
|
-
elif architecture == 'aarch64' and arch_bits == '64bit':
|
148
|
-
return 30
|
149
|
-
raise OSError("Swift doesn't support ionice priority for %s %s" %
|
150
|
-
(architecture, arch_bits))
|
151
|
-
|
152
|
-
|
153
|
-
# this syscall integer probably only works on x86_64 linux systems, you
|
154
|
-
# can check if it's correct on yours with something like this:
|
155
|
-
"""
|
156
|
-
#include <stdio.h>
|
157
|
-
#include <sys/syscall.h>
|
158
|
-
|
159
|
-
int main(int argc, const char* argv[]) {
|
160
|
-
printf("%d\n", __NR_ioprio_set);
|
161
|
-
return 0;
|
162
|
-
}
|
163
|
-
"""
|
164
|
-
|
165
|
-
# this is the value for "which" that says our who value will be a pid
|
166
|
-
# pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h
|
167
|
-
IOPRIO_WHO_PROCESS = 1
|
168
|
-
|
169
|
-
|
170
|
-
IO_CLASS_ENUM = {
|
171
|
-
'IOPRIO_CLASS_RT': 1,
|
172
|
-
'IOPRIO_CLASS_BE': 2,
|
173
|
-
'IOPRIO_CLASS_IDLE': 3,
|
174
|
-
}
|
175
|
-
|
176
|
-
# the IOPRIO_PRIO_VALUE "macro" is also pulled from
|
177
|
-
# /usr/src/linux-headers-*/include/linux/ioprio.h
|
178
|
-
IOPRIO_CLASS_SHIFT = 13
|
179
|
-
|
180
|
-
|
181
|
-
def IOPRIO_PRIO_VALUE(class_, data):
|
182
|
-
return (((class_) << IOPRIO_CLASS_SHIFT) | data)
|
183
|
-
|
184
145
|
|
185
146
|
# Used by hash_path to offer a bit more security when generating hashes for
|
186
147
|
# paths. It simply appends this value to all paths; guessing the hash a path
|
@@ -194,13 +155,8 @@ SWIFT_CONF_FILE = '/etc/swift/swift.conf'
|
|
194
155
|
# about them. We ask anyway just in case that ever gets fixed.
|
195
156
|
#
|
196
157
|
# The values were copied from the Linux 3.x kernel headers.
|
197
|
-
AF_ALG = getattr(socket, 'AF_ALG', 38)
|
198
|
-
F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
|
199
158
|
O_TMPFILE = getattr(os, 'O_TMPFILE', 0o20000000 | os.O_DIRECTORY)
|
200
159
|
|
201
|
-
# Used by the parse_socket_string() function to validate IPv6 addresses
|
202
|
-
IPV6_RE = re.compile(r"^\[(?P<address>.*)\](:(?P<port>[0-9]+))?$")
|
203
|
-
|
204
160
|
MD5_OF_EMPTY_STRING = 'd41d8cd98f00b204e9800998ecf8427e'
|
205
161
|
RESERVED_BYTE = b'\x00'
|
206
162
|
RESERVED_STR = u'\x00'
|
@@ -500,6 +456,17 @@ def config_read_prefixed_options(conf, prefix_name, defaults):
|
|
500
456
|
return params
|
501
457
|
|
502
458
|
|
459
|
+
def logging_monkey_patch():
|
460
|
+
# explicitly patch the logging lock
|
461
|
+
logging._lock = logging.threading.RLock()
|
462
|
+
# setup notice level logging
|
463
|
+
logging.addLevelName(NOTICE, 'NOTICE')
|
464
|
+
SysLogHandler.priority_map['NOTICE'] = 'notice'
|
465
|
+
# Trying to log threads while monkey-patched can lead to deadlocks; see
|
466
|
+
# https://bugs.launchpad.net/swift/+bug/1895739
|
467
|
+
logging.logThreads = 0
|
468
|
+
|
469
|
+
|
503
470
|
def eventlet_monkey_patch():
|
504
471
|
"""
|
505
472
|
Install the appropriate Eventlet monkey patches.
|
@@ -510,13 +477,14 @@ def eventlet_monkey_patch():
|
|
510
477
|
# if thread is monkey-patched.
|
511
478
|
eventlet.patcher.monkey_patch(all=False, socket=True, select=True,
|
512
479
|
thread=True)
|
513
|
-
# Trying to log threads while monkey-patched can lead to deadlocks; see
|
514
|
-
# https://bugs.launchpad.net/swift/+bug/1895739
|
515
|
-
logging.logThreads = 0
|
516
480
|
|
517
481
|
|
518
|
-
def
|
519
|
-
|
482
|
+
def monkey_patch():
|
483
|
+
"""
|
484
|
+
Apply all swift monkey patching consistently in one place.
|
485
|
+
"""
|
486
|
+
eventlet_monkey_patch()
|
487
|
+
logging_monkey_patch()
|
520
488
|
|
521
489
|
|
522
490
|
def validate_configuration():
|
@@ -526,39 +494,6 @@ def validate_configuration():
|
|
526
494
|
sys.exit("Error: %s" % e)
|
527
495
|
|
528
496
|
|
529
|
-
def load_libc_function(func_name, log_error=True,
|
530
|
-
fail_if_missing=False, errcheck=False):
|
531
|
-
"""
|
532
|
-
Attempt to find the function in libc, otherwise return a no-op func.
|
533
|
-
|
534
|
-
:param func_name: name of the function to pull from libc.
|
535
|
-
:param log_error: log an error when a function can't be found
|
536
|
-
:param fail_if_missing: raise an exception when a function can't be found.
|
537
|
-
Default behavior is to return a no-op function.
|
538
|
-
:param errcheck: boolean, if true install a wrapper on the function
|
539
|
-
to check for a return values of -1 and call
|
540
|
-
ctype.get_errno and raise an OSError
|
541
|
-
"""
|
542
|
-
try:
|
543
|
-
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
|
544
|
-
func = getattr(libc, func_name)
|
545
|
-
except AttributeError:
|
546
|
-
if fail_if_missing:
|
547
|
-
raise
|
548
|
-
if log_error:
|
549
|
-
logging.warning(_("Unable to locate %s in libc. Leaving as a "
|
550
|
-
"no-op."), func_name)
|
551
|
-
return noop_libc_function
|
552
|
-
if errcheck:
|
553
|
-
def _errcheck(result, f, args):
|
554
|
-
if result == -1:
|
555
|
-
errcode = ctypes.get_errno()
|
556
|
-
raise OSError(errcode, os.strerror(errcode))
|
557
|
-
return result
|
558
|
-
func.errcheck = _errcheck
|
559
|
-
return func
|
560
|
-
|
561
|
-
|
562
497
|
def generate_trans_id(trans_id_suffix):
|
563
498
|
return 'tx%s-%010x%s' % (
|
564
499
|
uuid.uuid4().hex[:21], int(time.time()), quote(trans_id_suffix))
|
@@ -585,6 +520,7 @@ class _UTC(datetime.tzinfo):
|
|
585
520
|
"""
|
586
521
|
A tzinfo class for datetime objects that returns a 0 timedelta (UTC time)
|
587
522
|
"""
|
523
|
+
|
588
524
|
def dst(self, dt):
|
589
525
|
return datetime.timedelta(0)
|
590
526
|
utcoffset = dst
|
@@ -923,53 +859,6 @@ def fs_has_free_space(fs_path, space_needed, is_percent):
|
|
923
859
|
return free_bytes >= space_needed
|
924
860
|
|
925
861
|
|
926
|
-
class _LibcWrapper(object):
|
927
|
-
"""
|
928
|
-
A callable object that forwards its calls to a C function from libc.
|
929
|
-
|
930
|
-
These objects are lazy. libc will not be checked until someone tries to
|
931
|
-
either call the function or check its availability.
|
932
|
-
|
933
|
-
_LibcWrapper objects have an "available" property; if true, then libc
|
934
|
-
has the function of that name. If false, then calls will fail with a
|
935
|
-
NotImplementedError.
|
936
|
-
"""
|
937
|
-
def __init__(self, func_name):
|
938
|
-
self._func_name = func_name
|
939
|
-
self._func_handle = None
|
940
|
-
self._loaded = False
|
941
|
-
|
942
|
-
def _ensure_loaded(self):
|
943
|
-
if not self._loaded:
|
944
|
-
func_name = self._func_name
|
945
|
-
try:
|
946
|
-
# Keep everything in this try-block in local variables so
|
947
|
-
# that a typo in self.some_attribute_name doesn't raise a
|
948
|
-
# spurious AttributeError.
|
949
|
-
func_handle = load_libc_function(
|
950
|
-
func_name, fail_if_missing=True)
|
951
|
-
self._func_handle = func_handle
|
952
|
-
except AttributeError:
|
953
|
-
# We pass fail_if_missing=True to load_libc_function and
|
954
|
-
# then ignore the error. It's weird, but otherwise we have
|
955
|
-
# to check if self._func_handle is noop_libc_function, and
|
956
|
-
# that's even weirder.
|
957
|
-
pass
|
958
|
-
self._loaded = True
|
959
|
-
|
960
|
-
@property
|
961
|
-
def available(self):
|
962
|
-
self._ensure_loaded()
|
963
|
-
return bool(self._func_handle)
|
964
|
-
|
965
|
-
def __call__(self, *args):
|
966
|
-
if self.available:
|
967
|
-
return self._func_handle(*args)
|
968
|
-
else:
|
969
|
-
raise NotImplementedError(
|
970
|
-
"No function %r found in libc" % self._func_name)
|
971
|
-
|
972
|
-
|
973
862
|
_fallocate_enabled = True
|
974
863
|
_fallocate_warned_about_missing = False
|
975
864
|
_sys_fallocate = _LibcWrapper('fallocate')
|
@@ -1036,8 +925,8 @@ def fallocate(fd, size, offset=0):
|
|
1036
925
|
# but just once per process, and then do nothing.
|
1037
926
|
global _fallocate_warned_about_missing
|
1038
927
|
if not _fallocate_warned_about_missing:
|
1039
|
-
logging.warning(
|
1040
|
-
|
928
|
+
logging.warning("Unable to locate fallocate, posix_fallocate in "
|
929
|
+
"libc. Leaving as a no-op.")
|
1041
930
|
_fallocate_warned_about_missing = True
|
1042
931
|
return
|
1043
932
|
|
@@ -1121,410 +1010,14 @@ def fsync_dir(dirpath):
|
|
1121
1010
|
if err.errno == errno.ENOTDIR:
|
1122
1011
|
# Raise error if someone calls fsync_dir on a non-directory
|
1123
1012
|
raise
|
1124
|
-
logging.warning(
|
1125
|
-
|
1013
|
+
logging.warning('Unable to perform fsync() on directory %(dir)s:'
|
1014
|
+
' %(err)s',
|
1126
1015
|
{'dir': dirpath, 'err': os.strerror(err.errno)})
|
1127
1016
|
finally:
|
1128
1017
|
if dirfd:
|
1129
1018
|
os.close(dirfd)
|
1130
1019
|
|
1131
1020
|
|
1132
|
-
def drop_buffer_cache(fd, offset, length):
|
1133
|
-
"""
|
1134
|
-
Drop 'buffer' cache for the given range of the given file.
|
1135
|
-
|
1136
|
-
:param fd: file descriptor
|
1137
|
-
:param offset: start offset
|
1138
|
-
:param length: length
|
1139
|
-
"""
|
1140
|
-
global _posix_fadvise
|
1141
|
-
if _posix_fadvise is None:
|
1142
|
-
_posix_fadvise = load_libc_function('posix_fadvise64')
|
1143
|
-
# 4 means "POSIX_FADV_DONTNEED"
|
1144
|
-
ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
|
1145
|
-
ctypes.c_uint64(length), 4)
|
1146
|
-
if ret != 0:
|
1147
|
-
logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
|
1148
|
-
"-> %(ret)s", {'fd': fd, 'offset': offset,
|
1149
|
-
'length': length, 'ret': ret})
|
1150
|
-
|
1151
|
-
|
1152
|
-
NORMAL_FORMAT = "%016.05f"
|
1153
|
-
INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
|
1154
|
-
SHORT_FORMAT = NORMAL_FORMAT + '_%x'
|
1155
|
-
MAX_OFFSET = (16 ** 16) - 1
|
1156
|
-
PRECISION = 1e-5
|
1157
|
-
# Setting this to True will cause the internal format to always display
|
1158
|
-
# extended digits - even when the value is equivalent to the normalized form.
|
1159
|
-
# This isn't ideal during an upgrade when some servers might not understand
|
1160
|
-
# the new time format - but flipping it to True works great for testing.
|
1161
|
-
FORCE_INTERNAL = False # or True
|
1162
|
-
|
1163
|
-
|
1164
|
-
@functools.total_ordering
|
1165
|
-
class Timestamp(object):
|
1166
|
-
"""
|
1167
|
-
Internal Representation of Swift Time.
|
1168
|
-
|
1169
|
-
The normalized form of the X-Timestamp header looks like a float
|
1170
|
-
with a fixed width to ensure stable string sorting - normalized
|
1171
|
-
timestamps look like "1402464677.04188"
|
1172
|
-
|
1173
|
-
To support overwrites of existing data without modifying the original
|
1174
|
-
timestamp but still maintain consistency a second internal offset vector
|
1175
|
-
is append to the normalized timestamp form which compares and sorts
|
1176
|
-
greater than the fixed width float format but less than a newer timestamp.
|
1177
|
-
The internalized format of timestamps looks like
|
1178
|
-
"1402464677.04188_0000000000000000" - the portion after the underscore is
|
1179
|
-
the offset and is a formatted hexadecimal integer.
|
1180
|
-
|
1181
|
-
The internalized form is not exposed to clients in responses from
|
1182
|
-
Swift. Normal client operations will not create a timestamp with an
|
1183
|
-
offset.
|
1184
|
-
|
1185
|
-
The Timestamp class in common.utils supports internalized and
|
1186
|
-
normalized formatting of timestamps and also comparison of timestamp
|
1187
|
-
values. When the offset value of a Timestamp is 0 - it's considered
|
1188
|
-
insignificant and need not be represented in the string format; to
|
1189
|
-
support backwards compatibility during a Swift upgrade the
|
1190
|
-
internalized and normalized form of a Timestamp with an
|
1191
|
-
insignificant offset are identical. When a timestamp includes an
|
1192
|
-
offset it will always be represented in the internalized form, but
|
1193
|
-
is still excluded from the normalized form. Timestamps with an
|
1194
|
-
equivalent timestamp portion (the float part) will compare and order
|
1195
|
-
by their offset. Timestamps with a greater timestamp portion will
|
1196
|
-
always compare and order greater than a Timestamp with a lesser
|
1197
|
-
timestamp regardless of it's offset. String comparison and ordering
|
1198
|
-
is guaranteed for the internalized string format, and is backwards
|
1199
|
-
compatible for normalized timestamps which do not include an offset.
|
1200
|
-
"""
|
1201
|
-
|
1202
|
-
def __init__(self, timestamp, offset=0, delta=0, check_bounds=True):
|
1203
|
-
"""
|
1204
|
-
Create a new Timestamp.
|
1205
|
-
|
1206
|
-
:param timestamp: time in seconds since the Epoch, may be any of:
|
1207
|
-
|
1208
|
-
* a float or integer
|
1209
|
-
* normalized/internalized string
|
1210
|
-
* another instance of this class (offset is preserved)
|
1211
|
-
|
1212
|
-
:param offset: the second internal offset vector, an int
|
1213
|
-
:param delta: deca-microsecond difference from the base timestamp
|
1214
|
-
param, an int
|
1215
|
-
"""
|
1216
|
-
if isinstance(timestamp, bytes):
|
1217
|
-
timestamp = timestamp.decode('ascii')
|
1218
|
-
if isinstance(timestamp, six.string_types):
|
1219
|
-
base, base_offset = timestamp.partition('_')[::2]
|
1220
|
-
self.timestamp = float(base)
|
1221
|
-
if '_' in base_offset:
|
1222
|
-
raise ValueError('invalid literal for int() with base 16: '
|
1223
|
-
'%r' % base_offset)
|
1224
|
-
if base_offset:
|
1225
|
-
self.offset = int(base_offset, 16)
|
1226
|
-
else:
|
1227
|
-
self.offset = 0
|
1228
|
-
else:
|
1229
|
-
self.timestamp = float(timestamp)
|
1230
|
-
self.offset = getattr(timestamp, 'offset', 0)
|
1231
|
-
# increment offset
|
1232
|
-
if offset >= 0:
|
1233
|
-
self.offset += offset
|
1234
|
-
else:
|
1235
|
-
raise ValueError('offset must be non-negative')
|
1236
|
-
if self.offset > MAX_OFFSET:
|
1237
|
-
raise ValueError('offset must be smaller than %d' % MAX_OFFSET)
|
1238
|
-
self.raw = int(round(self.timestamp / PRECISION))
|
1239
|
-
# add delta
|
1240
|
-
if delta:
|
1241
|
-
self.raw = self.raw + delta
|
1242
|
-
if self.raw <= 0:
|
1243
|
-
raise ValueError(
|
1244
|
-
'delta must be greater than %d' % (-1 * self.raw))
|
1245
|
-
self.timestamp = float(self.raw * PRECISION)
|
1246
|
-
if check_bounds:
|
1247
|
-
if self.timestamp < 0:
|
1248
|
-
raise ValueError('timestamp cannot be negative')
|
1249
|
-
if self.timestamp >= 10000000000:
|
1250
|
-
raise ValueError('timestamp too large')
|
1251
|
-
|
1252
|
-
@classmethod
|
1253
|
-
def now(cls, offset=0, delta=0):
|
1254
|
-
return cls(time.time(), offset=offset, delta=delta)
|
1255
|
-
|
1256
|
-
def __repr__(self):
|
1257
|
-
return INTERNAL_FORMAT % (self.timestamp, self.offset)
|
1258
|
-
|
1259
|
-
def __str__(self):
|
1260
|
-
raise TypeError('You must specify which string format is required')
|
1261
|
-
|
1262
|
-
def __float__(self):
|
1263
|
-
return self.timestamp
|
1264
|
-
|
1265
|
-
def __int__(self):
|
1266
|
-
return int(self.timestamp)
|
1267
|
-
|
1268
|
-
def __nonzero__(self):
|
1269
|
-
return bool(self.timestamp or self.offset)
|
1270
|
-
|
1271
|
-
def __bool__(self):
|
1272
|
-
return self.__nonzero__()
|
1273
|
-
|
1274
|
-
@property
|
1275
|
-
def normal(self):
|
1276
|
-
return NORMAL_FORMAT % self.timestamp
|
1277
|
-
|
1278
|
-
@property
|
1279
|
-
def internal(self):
|
1280
|
-
if self.offset or FORCE_INTERNAL:
|
1281
|
-
return INTERNAL_FORMAT % (self.timestamp, self.offset)
|
1282
|
-
else:
|
1283
|
-
return self.normal
|
1284
|
-
|
1285
|
-
@property
|
1286
|
-
def short(self):
|
1287
|
-
if self.offset or FORCE_INTERNAL:
|
1288
|
-
return SHORT_FORMAT % (self.timestamp, self.offset)
|
1289
|
-
else:
|
1290
|
-
return self.normal
|
1291
|
-
|
1292
|
-
@property
|
1293
|
-
def isoformat(self):
|
1294
|
-
"""
|
1295
|
-
Get an isoformat string representation of the 'normal' part of the
|
1296
|
-
Timestamp with microsecond precision and no trailing timezone, for
|
1297
|
-
example::
|
1298
|
-
|
1299
|
-
1970-01-01T00:00:00.000000
|
1300
|
-
|
1301
|
-
:return: an isoformat string
|
1302
|
-
"""
|
1303
|
-
t = float(self.normal)
|
1304
|
-
if six.PY3:
|
1305
|
-
# On Python 3, round manually using ROUND_HALF_EVEN rounding
|
1306
|
-
# method, to use the same rounding method than Python 2. Python 3
|
1307
|
-
# used a different rounding method, but Python 3.4.4 and 3.5.1 use
|
1308
|
-
# again ROUND_HALF_EVEN as Python 2.
|
1309
|
-
# See https://bugs.python.org/issue23517
|
1310
|
-
frac, t = math.modf(t)
|
1311
|
-
us = round(frac * 1e6)
|
1312
|
-
if us >= 1000000:
|
1313
|
-
t += 1
|
1314
|
-
us -= 1000000
|
1315
|
-
elif us < 0:
|
1316
|
-
t -= 1
|
1317
|
-
us += 1000000
|
1318
|
-
dt = datetime.datetime.utcfromtimestamp(t)
|
1319
|
-
dt = dt.replace(microsecond=us)
|
1320
|
-
else:
|
1321
|
-
dt = datetime.datetime.utcfromtimestamp(t)
|
1322
|
-
|
1323
|
-
isoformat = dt.isoformat()
|
1324
|
-
# python isoformat() doesn't include msecs when zero
|
1325
|
-
if len(isoformat) < len("1970-01-01T00:00:00.000000"):
|
1326
|
-
isoformat += ".000000"
|
1327
|
-
return isoformat
|
1328
|
-
|
1329
|
-
@classmethod
|
1330
|
-
def from_isoformat(cls, date_string):
|
1331
|
-
"""
|
1332
|
-
Parse an isoformat string representation of time to a Timestamp object.
|
1333
|
-
|
1334
|
-
:param date_string: a string formatted as per an Timestamp.isoformat
|
1335
|
-
property.
|
1336
|
-
:return: an instance of this class.
|
1337
|
-
"""
|
1338
|
-
start = datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%f")
|
1339
|
-
delta = start - EPOCH
|
1340
|
-
# This calculation is based on Python 2.7's Modules/datetimemodule.c,
|
1341
|
-
# function delta_to_microseconds(), but written in Python.
|
1342
|
-
return cls(delta.total_seconds())
|
1343
|
-
|
1344
|
-
def ceil(self):
|
1345
|
-
"""
|
1346
|
-
Return the 'normal' part of the timestamp rounded up to the nearest
|
1347
|
-
integer number of seconds.
|
1348
|
-
|
1349
|
-
This value should be used whenever the second-precision Last-Modified
|
1350
|
-
time of a resource is required.
|
1351
|
-
|
1352
|
-
:return: a float value with second precision.
|
1353
|
-
"""
|
1354
|
-
return math.ceil(float(self))
|
1355
|
-
|
1356
|
-
def __eq__(self, other):
|
1357
|
-
if other is None:
|
1358
|
-
return False
|
1359
|
-
if not isinstance(other, Timestamp):
|
1360
|
-
try:
|
1361
|
-
other = Timestamp(other, check_bounds=False)
|
1362
|
-
except ValueError:
|
1363
|
-
return False
|
1364
|
-
return self.internal == other.internal
|
1365
|
-
|
1366
|
-
def __ne__(self, other):
|
1367
|
-
return not (self == other)
|
1368
|
-
|
1369
|
-
def __lt__(self, other):
|
1370
|
-
if other is None:
|
1371
|
-
return False
|
1372
|
-
if not isinstance(other, Timestamp):
|
1373
|
-
other = Timestamp(other, check_bounds=False)
|
1374
|
-
if other.timestamp < 0:
|
1375
|
-
return False
|
1376
|
-
if other.timestamp >= 10000000000:
|
1377
|
-
return True
|
1378
|
-
return self.internal < other.internal
|
1379
|
-
|
1380
|
-
def __hash__(self):
|
1381
|
-
return hash(self.internal)
|
1382
|
-
|
1383
|
-
def __invert__(self):
|
1384
|
-
if self.offset:
|
1385
|
-
raise ValueError('Cannot invert timestamps with offsets')
|
1386
|
-
return Timestamp((999999999999999 - self.raw) * PRECISION)
|
1387
|
-
|
1388
|
-
|
1389
|
-
def encode_timestamps(t1, t2=None, t3=None, explicit=False):
|
1390
|
-
"""
|
1391
|
-
Encode up to three timestamps into a string. Unlike a Timestamp object, the
|
1392
|
-
encoded string does NOT used fixed width fields and consequently no
|
1393
|
-
relative chronology of the timestamps can be inferred from lexicographic
|
1394
|
-
sorting of encoded timestamp strings.
|
1395
|
-
|
1396
|
-
The format of the encoded string is:
|
1397
|
-
<t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
|
1398
|
-
|
1399
|
-
i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
|
1400
|
-
otherwise the time offsets for t2 and t3 are appended. If explicit is True
|
1401
|
-
then the offsets for t2 and t3 are always appended even if zero.
|
1402
|
-
|
1403
|
-
Note: any offset value in t1 will be preserved, but offsets on t2 and t3
|
1404
|
-
are not preserved. In the anticipated use cases for this method (and the
|
1405
|
-
inverse decode_timestamps method) the timestamps passed as t2 and t3 are
|
1406
|
-
not expected to have offsets as they will be timestamps associated with a
|
1407
|
-
POST request. In the case where the encoding is used in a container objects
|
1408
|
-
table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
|
1409
|
-
content type and metadata times (if different from the data file) i.e.
|
1410
|
-
correspond to POST timestamps. In the case where the encoded form is used
|
1411
|
-
in a .meta file name, t1 and t2 both correspond to POST timestamps.
|
1412
|
-
"""
|
1413
|
-
form = '{0}'
|
1414
|
-
values = [t1.short]
|
1415
|
-
if t2 is not None:
|
1416
|
-
t2_t1_delta = t2.raw - t1.raw
|
1417
|
-
explicit = explicit or (t2_t1_delta != 0)
|
1418
|
-
values.append(t2_t1_delta)
|
1419
|
-
if t3 is not None:
|
1420
|
-
t3_t2_delta = t3.raw - t2.raw
|
1421
|
-
explicit = explicit or (t3_t2_delta != 0)
|
1422
|
-
values.append(t3_t2_delta)
|
1423
|
-
if explicit:
|
1424
|
-
form += '{1:+x}'
|
1425
|
-
if t3 is not None:
|
1426
|
-
form += '{2:+x}'
|
1427
|
-
return form.format(*values)
|
1428
|
-
|
1429
|
-
|
1430
|
-
def decode_timestamps(encoded, explicit=False):
|
1431
|
-
"""
|
1432
|
-
Parses a string of the form generated by encode_timestamps and returns
|
1433
|
-
a tuple of the three component timestamps. If explicit is False, component
|
1434
|
-
timestamps that are not explicitly encoded will be assumed to have zero
|
1435
|
-
delta from the previous component and therefore take the value of the
|
1436
|
-
previous component. If explicit is True, component timestamps that are
|
1437
|
-
not explicitly encoded will be returned with value None.
|
1438
|
-
"""
|
1439
|
-
# TODO: some tests, e.g. in test_replicator, put float timestamps values
|
1440
|
-
# into container db's, hence this defensive check, but in real world
|
1441
|
-
# this may never happen.
|
1442
|
-
if not isinstance(encoded, six.string_types):
|
1443
|
-
ts = Timestamp(encoded)
|
1444
|
-
return ts, ts, ts
|
1445
|
-
|
1446
|
-
parts = []
|
1447
|
-
signs = []
|
1448
|
-
pos_parts = encoded.split('+')
|
1449
|
-
for part in pos_parts:
|
1450
|
-
# parse time components and their signs
|
1451
|
-
# e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
|
1452
|
-
neg_parts = part.split('-')
|
1453
|
-
parts = parts + neg_parts
|
1454
|
-
signs = signs + [1] + [-1] * (len(neg_parts) - 1)
|
1455
|
-
t1 = Timestamp(parts[0])
|
1456
|
-
t2 = t3 = None
|
1457
|
-
if len(parts) > 1:
|
1458
|
-
t2 = t1
|
1459
|
-
delta = signs[1] * int(parts[1], 16)
|
1460
|
-
# if delta = 0 we want t2 = t3 = t1 in order to
|
1461
|
-
# preserve any offset in t1 - only construct a distinct
|
1462
|
-
# timestamp if there is a non-zero delta.
|
1463
|
-
if delta:
|
1464
|
-
t2 = Timestamp((t1.raw + delta) * PRECISION)
|
1465
|
-
elif not explicit:
|
1466
|
-
t2 = t1
|
1467
|
-
if len(parts) > 2:
|
1468
|
-
t3 = t2
|
1469
|
-
delta = signs[2] * int(parts[2], 16)
|
1470
|
-
if delta:
|
1471
|
-
t3 = Timestamp((t2.raw + delta) * PRECISION)
|
1472
|
-
elif not explicit:
|
1473
|
-
t3 = t2
|
1474
|
-
return t1, t2, t3
|
1475
|
-
|
1476
|
-
|
1477
|
-
def normalize_timestamp(timestamp):
|
1478
|
-
"""
|
1479
|
-
Format a timestamp (string or numeric) into a standardized
|
1480
|
-
xxxxxxxxxx.xxxxx (10.5) format.
|
1481
|
-
|
1482
|
-
Note that timestamps using values greater than or equal to November 20th,
|
1483
|
-
2286 at 17:46 UTC will use 11 digits to represent the number of
|
1484
|
-
seconds.
|
1485
|
-
|
1486
|
-
:param timestamp: unix timestamp
|
1487
|
-
:returns: normalized timestamp as a string
|
1488
|
-
"""
|
1489
|
-
return Timestamp(timestamp).normal
|
1490
|
-
|
1491
|
-
|
1492
|
-
EPOCH = datetime.datetime(1970, 1, 1)
|
1493
|
-
|
1494
|
-
|
1495
|
-
def last_modified_date_to_timestamp(last_modified_date_str):
|
1496
|
-
"""
|
1497
|
-
Convert a last modified date (like you'd get from a container listing,
|
1498
|
-
e.g. 2014-02-28T23:22:36.698390) to a float.
|
1499
|
-
"""
|
1500
|
-
return Timestamp.from_isoformat(last_modified_date_str)
|
1501
|
-
|
1502
|
-
|
1503
|
-
def normalize_delete_at_timestamp(timestamp, high_precision=False):
|
1504
|
-
"""
|
1505
|
-
Format a timestamp (string or numeric) into a standardized
|
1506
|
-
xxxxxxxxxx (10) or xxxxxxxxxx.xxxxx (10.5) format.
|
1507
|
-
|
1508
|
-
Note that timestamps less than 0000000000 are raised to
|
1509
|
-
0000000000 and values greater than November 20th, 2286 at
|
1510
|
-
17:46:39 UTC will be capped at that date and time, resulting in
|
1511
|
-
no return value exceeding 9999999999.99999 (or 9999999999 if
|
1512
|
-
using low-precision).
|
1513
|
-
|
1514
|
-
This cap is because the expirer is already working through a
|
1515
|
-
sorted list of strings that were all a length of 10. Adding
|
1516
|
-
another digit would mess up the sort and cause the expirer to
|
1517
|
-
break from processing early. By 2286, this problem will need to
|
1518
|
-
be fixed, probably by creating an additional .expiring_objects
|
1519
|
-
account to work from with 11 (or more) digit container names.
|
1520
|
-
|
1521
|
-
:param timestamp: unix timestamp
|
1522
|
-
:returns: normalized timestamp as a string
|
1523
|
-
"""
|
1524
|
-
fmt = '%016.5f' if high_precision else '%010d'
|
1525
|
-
return fmt % min(max(0, float(timestamp)), 9999999999.99999)
|
1526
|
-
|
1527
|
-
|
1528
1021
|
def mkdirs(path):
|
1529
1022
|
"""
|
1530
1023
|
Ensures the path is a directory or makes it if not. Errors if the path
|
@@ -1715,6 +1208,7 @@ class RateLimitedIterator(object):
|
|
1715
1208
|
this many elements; default is 0 (rate limit
|
1716
1209
|
immediately)
|
1717
1210
|
"""
|
1211
|
+
|
1718
1212
|
def __init__(self, iterable, elements_per_second, limit_after=0,
|
1719
1213
|
ratelimit_if=lambda _junk: True):
|
1720
1214
|
self.iterator = iter(iterable)
|
@@ -1749,6 +1243,7 @@ class GreenthreadSafeIterator(object):
|
|
1749
1243
|
an error like "ValueError: generator already executing". By wrapping calls
|
1750
1244
|
to next() with a mutex, we avoid that error.
|
1751
1245
|
"""
|
1246
|
+
|
1752
1247
|
def __init__(self, unsafe_iterable):
|
1753
1248
|
self.unsafe_iter = iter(unsafe_iterable)
|
1754
1249
|
self.semaphore = eventlet.semaphore.Semaphore(value=1)
|
@@ -1828,9 +1323,9 @@ class LoggerFileObject(object):
|
|
1828
1323
|
if value:
|
1829
1324
|
if 'Connection reset by peer' in value:
|
1830
1325
|
self.logger.error(
|
1831
|
-
|
1326
|
+
'%s: Connection reset by peer', self.log_type)
|
1832
1327
|
else:
|
1833
|
-
self.logger.error(
|
1328
|
+
self.logger.error('%(type)s: %(value)s',
|
1834
1329
|
{'type': self.log_type, 'value': value})
|
1835
1330
|
finally:
|
1836
1331
|
self._cls_thread_local.already_called_write = False
|
@@ -1841,7 +1336,7 @@ class LoggerFileObject(object):
|
|
1841
1336
|
|
1842
1337
|
self._cls_thread_local.already_called_writelines = True
|
1843
1338
|
try:
|
1844
|
-
self.logger.error(
|
1339
|
+
self.logger.error('%(type)s: %(value)s',
|
1845
1340
|
{'type': self.log_type,
|
1846
1341
|
'value': '#012'.join(values)})
|
1847
1342
|
finally:
|
@@ -1886,21 +1381,7 @@ class StatsdClient(object):
|
|
1886
1381
|
self.logger = logger
|
1887
1382
|
|
1888
1383
|
# Determine if host is IPv4 or IPv6
|
1889
|
-
addr_info =
|
1890
|
-
try:
|
1891
|
-
addr_info = socket.getaddrinfo(host, port, socket.AF_INET)
|
1892
|
-
self._sock_family = socket.AF_INET
|
1893
|
-
except socket.gaierror:
|
1894
|
-
try:
|
1895
|
-
addr_info = socket.getaddrinfo(host, port, socket.AF_INET6)
|
1896
|
-
self._sock_family = socket.AF_INET6
|
1897
|
-
except socket.gaierror:
|
1898
|
-
# Don't keep the server from starting from what could be a
|
1899
|
-
# transient DNS failure. Any hostname will get re-resolved as
|
1900
|
-
# necessary in the .sendto() calls.
|
1901
|
-
# However, we don't know if we're IPv4 or IPv6 in this case, so
|
1902
|
-
# we assume legacy IPv4.
|
1903
|
-
self._sock_family = socket.AF_INET
|
1384
|
+
addr_info, self._sock_family = self._determine_sock_family(host, port)
|
1904
1385
|
|
1905
1386
|
# NOTE: we use the original host value, not the DNS-resolved one
|
1906
1387
|
# because if host is a hostname, we don't want to cache the DNS
|
@@ -1920,6 +1401,24 @@ class StatsdClient(object):
|
|
1920
1401
|
else:
|
1921
1402
|
self._target = (host, port)
|
1922
1403
|
|
1404
|
+
def _determine_sock_family(self, host, port):
|
1405
|
+
addr_info = sock_family = None
|
1406
|
+
try:
|
1407
|
+
addr_info = socket.getaddrinfo(host, port, socket.AF_INET)
|
1408
|
+
sock_family = socket.AF_INET
|
1409
|
+
except socket.gaierror:
|
1410
|
+
try:
|
1411
|
+
addr_info = socket.getaddrinfo(host, port, socket.AF_INET6)
|
1412
|
+
sock_family = socket.AF_INET6
|
1413
|
+
except socket.gaierror:
|
1414
|
+
# Don't keep the server from starting from what could be a
|
1415
|
+
# transient DNS failure. Any hostname will get re-resolved as
|
1416
|
+
# necessary in the .sendto() calls.
|
1417
|
+
# However, we don't know if we're IPv4 or IPv6 in this case, so
|
1418
|
+
# we assume legacy IPv4.
|
1419
|
+
sock_family = socket.AF_INET
|
1420
|
+
return addr_info, sock_family
|
1421
|
+
|
1923
1422
|
def _set_prefix(self, tail_prefix):
|
1924
1423
|
"""
|
1925
1424
|
Modifies the prefix that is added to metric names. The resulting prefix
|
@@ -1979,7 +1478,7 @@ class StatsdClient(object):
|
|
1979
1478
|
except IOError as err:
|
1980
1479
|
if self.logger:
|
1981
1480
|
self.logger.warning(
|
1982
|
-
|
1481
|
+
'Error sending UDP message to %(target)r: %(err)s',
|
1983
1482
|
{'target': self._target, 'err': err})
|
1984
1483
|
|
1985
1484
|
def _open_socket(self):
|
@@ -1994,12 +1493,16 @@ class StatsdClient(object):
|
|
1994
1493
|
def decrement(self, metric, sample_rate=None):
|
1995
1494
|
return self.update_stats(metric, -1, sample_rate)
|
1996
1495
|
|
1997
|
-
def
|
1496
|
+
def _timing(self, metric, timing_ms, sample_rate):
|
1497
|
+
# This method was added to disagregate timing metrics when testing
|
1998
1498
|
return self._send(metric, timing_ms, 'ms', sample_rate)
|
1999
1499
|
|
1500
|
+
def timing(self, metric, timing_ms, sample_rate=None):
|
1501
|
+
return self._timing(metric, timing_ms, sample_rate)
|
1502
|
+
|
2000
1503
|
def timing_since(self, metric, orig_time, sample_rate=None):
|
2001
|
-
return self.
|
2002
|
-
|
1504
|
+
return self._timing(metric, (time.time() - orig_time) * 1000,
|
1505
|
+
sample_rate)
|
2003
1506
|
|
2004
1507
|
def transfer_rate(self, metric, elapsed_time, byte_xfer, sample_rate=None):
|
2005
1508
|
if byte_xfer:
|
@@ -2068,6 +1571,12 @@ class SwiftLoggerAdapter(logging.LoggerAdapter):
|
|
2068
1571
|
Like logging.LoggerAdapter, you have to subclass this and override the
|
2069
1572
|
process() method to accomplish anything useful.
|
2070
1573
|
"""
|
1574
|
+
|
1575
|
+
@property
|
1576
|
+
def name(self):
|
1577
|
+
# py3 does this for us already; add it for py2
|
1578
|
+
return self.logger.name
|
1579
|
+
|
2071
1580
|
def get_metric_name(self, metric):
|
2072
1581
|
# subclasses may override this method to annotate the metric name
|
2073
1582
|
return metric
|
@@ -2110,6 +1619,7 @@ class PrefixLoggerAdapter(SwiftLoggerAdapter):
|
|
2110
1619
|
Adds an optional prefix to all its log messages. When the prefix has not
|
2111
1620
|
been set, messages are unchanged.
|
2112
1621
|
"""
|
1622
|
+
|
2113
1623
|
def set_prefix(self, prefix):
|
2114
1624
|
self.extra['prefix'] = prefix
|
2115
1625
|
|
@@ -2129,6 +1639,7 @@ class MetricsPrefixLoggerAdapter(SwiftLoggerAdapter):
|
|
2129
1639
|
"""
|
2130
1640
|
Adds a prefix to all Statsd metrics' names.
|
2131
1641
|
"""
|
1642
|
+
|
2132
1643
|
def __init__(self, logger, extra, metric_prefix):
|
2133
1644
|
"""
|
2134
1645
|
:param logger: an instance of logging.Logger
|
@@ -2244,31 +1755,36 @@ class LogAdapter(logging.LoggerAdapter, object):
|
|
2244
1755
|
_junk, exc, _junk = sys.exc_info()
|
2245
1756
|
call = self.error
|
2246
1757
|
emsg = ''
|
2247
|
-
if isinstance(exc, (
|
1758
|
+
if isinstance(exc, (http_client.BadStatusLine,
|
1759
|
+
green_http_client.BadStatusLine)):
|
1760
|
+
# Use error(); not really exceptional
|
1761
|
+
emsg = repr(exc)
|
1762
|
+
# Note that on py3, we've seen a RemoteDisconnected error getting
|
1763
|
+
# raised, which inherits from *both* BadStatusLine and OSError;
|
1764
|
+
# we want it getting caught here
|
1765
|
+
elif isinstance(exc, (OSError, socket.error)):
|
2248
1766
|
if exc.errno in (errno.EIO, errno.ENOSPC):
|
2249
1767
|
emsg = str(exc)
|
2250
1768
|
elif exc.errno == errno.ECONNREFUSED:
|
2251
|
-
emsg =
|
1769
|
+
emsg = 'Connection refused'
|
2252
1770
|
elif exc.errno == errno.ECONNRESET:
|
2253
|
-
emsg =
|
1771
|
+
emsg = 'Connection reset'
|
2254
1772
|
elif exc.errno == errno.EHOSTUNREACH:
|
2255
|
-
emsg =
|
1773
|
+
emsg = 'Host unreachable'
|
2256
1774
|
elif exc.errno == errno.ENETUNREACH:
|
2257
|
-
emsg =
|
1775
|
+
emsg = 'Network unreachable'
|
2258
1776
|
elif exc.errno == errno.ETIMEDOUT:
|
2259
|
-
emsg =
|
1777
|
+
emsg = 'Connection timeout'
|
2260
1778
|
elif exc.errno == errno.EPIPE:
|
2261
|
-
emsg =
|
1779
|
+
emsg = 'Broken pipe'
|
2262
1780
|
else:
|
2263
1781
|
call = self._exception
|
2264
|
-
elif isinstance(exc, (http_client.BadStatusLine,
|
2265
|
-
green_http_client.BadStatusLine)):
|
2266
|
-
# Use error(); not really exceptional
|
2267
|
-
emsg = '%s: %s' % (exc.__class__.__name__, exc.line)
|
2268
1782
|
elif isinstance(exc, eventlet.Timeout):
|
2269
1783
|
emsg = exc.__class__.__name__
|
2270
|
-
|
2271
|
-
|
1784
|
+
detail = '%ss' % exc.seconds
|
1785
|
+
if hasattr(exc, 'created_at'):
|
1786
|
+
detail += ' after %0.2fs' % (time.time() - exc.created_at)
|
1787
|
+
emsg += ' (%s)' % detail
|
2272
1788
|
if isinstance(exc, swift.common.exceptions.MessageTimeout):
|
2273
1789
|
if exc.msg:
|
2274
1790
|
emsg += ' %s' % exc.msg
|
@@ -2305,13 +1821,13 @@ class LogAdapter(logging.LoggerAdapter, object):
|
|
2305
1821
|
|
2306
1822
|
:param statsd_func_name: the name of a method on StatsdClient.
|
2307
1823
|
"""
|
2308
|
-
|
2309
1824
|
func = getattr(StatsdClient, statsd_func_name)
|
2310
1825
|
|
2311
1826
|
@functools.wraps(func)
|
2312
1827
|
def wrapped(self, *a, **kw):
|
2313
1828
|
if getattr(self.logger, 'statsd_client'):
|
2314
|
-
|
1829
|
+
func = getattr(self.logger.statsd_client, statsd_func_name)
|
1830
|
+
return func(*a, **kw)
|
2315
1831
|
return wrapped
|
2316
1832
|
|
2317
1833
|
update_stats = statsd_delegate('update_stats')
|
@@ -2382,6 +1898,7 @@ class LogLevelFilter(object):
|
|
2382
1898
|
(DEBUG < INFO < WARN < ERROR < CRITICAL|FATAL)
|
2383
1899
|
Default: DEBUG
|
2384
1900
|
"""
|
1901
|
+
|
2385
1902
|
def __init__(self, level=logging.DEBUG):
|
2386
1903
|
self.level = level
|
2387
1904
|
|
@@ -2618,7 +2135,7 @@ def capture_stdio(logger, **kwargs):
|
|
2618
2135
|
"""
|
2619
2136
|
# log uncaught exceptions
|
2620
2137
|
sys.excepthook = lambda * exc_info: \
|
2621
|
-
logger.critical(
|
2138
|
+
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
2622
2139
|
|
2623
2140
|
# collect stdio file desc not in use for logging
|
2624
2141
|
stdio_files = [sys.stdin, sys.stdout, sys.stderr]
|
@@ -2647,11 +2164,13 @@ def capture_stdio(logger, **kwargs):
|
|
2647
2164
|
sys.stderr = LoggerFileObject(logger, 'STDERR')
|
2648
2165
|
|
2649
2166
|
|
2650
|
-
def parse_options(parser=None, once=False, test_args=None):
|
2167
|
+
def parse_options(parser=None, once=False, test_config=False, test_args=None):
|
2651
2168
|
"""Parse standard swift server/daemon options with optparse.OptionParser.
|
2652
2169
|
|
2653
2170
|
:param parser: OptionParser to use. If not sent one will be created.
|
2654
2171
|
:param once: Boolean indicating the "once" option is available
|
2172
|
+
:param test_config: Boolean indicating the "test-config" option is
|
2173
|
+
available
|
2655
2174
|
:param test_args: Override sys.argv; used in testing
|
2656
2175
|
|
2657
2176
|
:returns: Tuple of (config, options); config is an absolute path to the
|
@@ -2666,18 +2185,23 @@ def parse_options(parser=None, once=False, test_args=None):
|
|
2666
2185
|
if once:
|
2667
2186
|
parser.add_option("-o", "--once", default=False, action="store_true",
|
2668
2187
|
help="only run one pass of daemon")
|
2188
|
+
if test_config:
|
2189
|
+
parser.add_option("-t", "--test-config",
|
2190
|
+
default=False, action="store_true",
|
2191
|
+
help="exit after loading and validating config; "
|
2192
|
+
"do not run the daemon")
|
2669
2193
|
|
2670
2194
|
# if test_args is None, optparse will use sys.argv[:1]
|
2671
2195
|
options, args = parser.parse_args(args=test_args)
|
2672
2196
|
|
2673
2197
|
if not args:
|
2674
2198
|
parser.print_usage()
|
2675
|
-
print(
|
2199
|
+
print("Error: missing config path argument")
|
2676
2200
|
sys.exit(1)
|
2677
2201
|
config = os.path.abspath(args.pop(0))
|
2678
2202
|
if not os.path.exists(config):
|
2679
2203
|
parser.print_usage()
|
2680
|
-
print(
|
2204
|
+
print("Error: unable to locate %s" % config)
|
2681
2205
|
sys.exit(1)
|
2682
2206
|
|
2683
2207
|
extra_args = []
|
@@ -2694,125 +2218,6 @@ def parse_options(parser=None, once=False, test_args=None):
|
|
2694
2218
|
return config, options
|
2695
2219
|
|
2696
2220
|
|
2697
|
-
def is_valid_ip(ip):
|
2698
|
-
"""
|
2699
|
-
Return True if the provided ip is a valid IP-address
|
2700
|
-
"""
|
2701
|
-
return is_valid_ipv4(ip) or is_valid_ipv6(ip)
|
2702
|
-
|
2703
|
-
|
2704
|
-
def is_valid_ipv4(ip):
|
2705
|
-
"""
|
2706
|
-
Return True if the provided ip is a valid IPv4-address
|
2707
|
-
"""
|
2708
|
-
try:
|
2709
|
-
socket.inet_pton(socket.AF_INET, ip)
|
2710
|
-
except socket.error: # not a valid IPv4 address
|
2711
|
-
return False
|
2712
|
-
return True
|
2713
|
-
|
2714
|
-
|
2715
|
-
def is_valid_ipv6(ip):
|
2716
|
-
"""
|
2717
|
-
Returns True if the provided ip is a valid IPv6-address
|
2718
|
-
"""
|
2719
|
-
try:
|
2720
|
-
socket.inet_pton(socket.AF_INET6, ip)
|
2721
|
-
except socket.error: # not a valid IPv6 address
|
2722
|
-
return False
|
2723
|
-
return True
|
2724
|
-
|
2725
|
-
|
2726
|
-
def expand_ipv6(address):
|
2727
|
-
"""
|
2728
|
-
Expand ipv6 address.
|
2729
|
-
:param address: a string indicating valid ipv6 address
|
2730
|
-
:returns: a string indicating fully expanded ipv6 address
|
2731
|
-
|
2732
|
-
"""
|
2733
|
-
packed_ip = socket.inet_pton(socket.AF_INET6, address)
|
2734
|
-
return socket.inet_ntop(socket.AF_INET6, packed_ip)
|
2735
|
-
|
2736
|
-
|
2737
|
-
def whataremyips(ring_ip=None):
|
2738
|
-
"""
|
2739
|
-
Get "our" IP addresses ("us" being the set of services configured by
|
2740
|
-
one `*.conf` file). If our REST listens on a specific address, return it.
|
2741
|
-
Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including
|
2742
|
-
the loopback.
|
2743
|
-
|
2744
|
-
:param str ring_ip: Optional ring_ip/bind_ip from a config file; may be
|
2745
|
-
IP address or hostname.
|
2746
|
-
:returns: list of Strings of ip addresses
|
2747
|
-
"""
|
2748
|
-
if ring_ip:
|
2749
|
-
# See if bind_ip is '0.0.0.0'/'::'
|
2750
|
-
try:
|
2751
|
-
_, _, _, _, sockaddr = socket.getaddrinfo(
|
2752
|
-
ring_ip, None, 0, socket.SOCK_STREAM, 0,
|
2753
|
-
socket.AI_NUMERICHOST)[0]
|
2754
|
-
if sockaddr[0] not in ('0.0.0.0', '::'):
|
2755
|
-
return [ring_ip]
|
2756
|
-
except socket.gaierror:
|
2757
|
-
pass
|
2758
|
-
|
2759
|
-
addresses = []
|
2760
|
-
for interface in netifaces.interfaces():
|
2761
|
-
try:
|
2762
|
-
iface_data = netifaces.ifaddresses(interface)
|
2763
|
-
for family in iface_data:
|
2764
|
-
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
|
2765
|
-
continue
|
2766
|
-
for address in iface_data[family]:
|
2767
|
-
addr = address['addr']
|
2768
|
-
|
2769
|
-
# If we have an ipv6 address remove the
|
2770
|
-
# %ether_interface at the end
|
2771
|
-
if family == netifaces.AF_INET6:
|
2772
|
-
addr = expand_ipv6(addr.split('%')[0])
|
2773
|
-
addresses.append(addr)
|
2774
|
-
except ValueError:
|
2775
|
-
pass
|
2776
|
-
return addresses
|
2777
|
-
|
2778
|
-
|
2779
|
-
def parse_socket_string(socket_string, default_port):
|
2780
|
-
"""
|
2781
|
-
Given a string representing a socket, returns a tuple of (host, port).
|
2782
|
-
Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an
|
2783
|
-
optional port. If an IPv6 address is specified it **must** be enclosed in
|
2784
|
-
[], like *[::1]* or *[::1]:11211*. This follows the accepted prescription
|
2785
|
-
for `IPv6 host literals`_.
|
2786
|
-
|
2787
|
-
Examples::
|
2788
|
-
|
2789
|
-
server.org
|
2790
|
-
server.org:1337
|
2791
|
-
127.0.0.1:1337
|
2792
|
-
[::1]:1337
|
2793
|
-
[::1]
|
2794
|
-
|
2795
|
-
.. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2
|
2796
|
-
"""
|
2797
|
-
port = default_port
|
2798
|
-
# IPv6 addresses must be between '[]'
|
2799
|
-
if socket_string.startswith('['):
|
2800
|
-
match = IPV6_RE.match(socket_string)
|
2801
|
-
if not match:
|
2802
|
-
raise ValueError("Invalid IPv6 address: %s" % socket_string)
|
2803
|
-
host = match.group('address')
|
2804
|
-
port = match.group('port') or port
|
2805
|
-
else:
|
2806
|
-
if ':' in socket_string:
|
2807
|
-
tokens = socket_string.split(':')
|
2808
|
-
if len(tokens) > 2:
|
2809
|
-
raise ValueError("IPv6 addresses must be between '[]'")
|
2810
|
-
host, port = tokens
|
2811
|
-
else:
|
2812
|
-
host = socket_string
|
2813
|
-
return (host, port)
|
2814
|
-
|
2815
|
-
|
2816
2221
|
def select_ip_port(node_dict, use_replication=False):
|
2817
2222
|
"""
|
2818
2223
|
Get the ip address and port that should be used for the given
|
@@ -3197,6 +2602,7 @@ def readconf(conf_path, section_name=None, log_name=None, defaults=None,
|
|
3197
2602
|
# values like "1%" (which we want to support for
|
3198
2603
|
# fallocate_reserve).
|
3199
2604
|
c = ConfigParser(defaults, interpolation=NicerInterpolation())
|
2605
|
+
c.optionxform = str # Don't lower-case keys
|
3200
2606
|
|
3201
2607
|
if hasattr(conf_path, 'readline'):
|
3202
2608
|
if hasattr(conf_path, 'seek'):
|
@@ -3212,14 +2618,14 @@ def readconf(conf_path, section_name=None, log_name=None, defaults=None,
|
|
3212
2618
|
else:
|
3213
2619
|
success = c.read(conf_path)
|
3214
2620
|
if not success:
|
3215
|
-
raise IOError(
|
2621
|
+
raise IOError("Unable to read config from %s" %
|
3216
2622
|
conf_path)
|
3217
2623
|
if section_name:
|
3218
2624
|
if c.has_section(section_name):
|
3219
2625
|
conf = dict(c.items(section_name))
|
3220
2626
|
else:
|
3221
2627
|
raise ValueError(
|
3222
|
-
|
2628
|
+
"Unable to find %(section)s config section in %(conf)s" %
|
3223
2629
|
{'section': section_name, 'conf': conf_path})
|
3224
2630
|
if "log_name" not in conf:
|
3225
2631
|
if log_name is not None:
|
@@ -3449,7 +2855,7 @@ def audit_location_generator(devices, datadir, suffix='',
|
|
3449
2855
|
error_counter['unmounted'].append(device)
|
3450
2856
|
if logger:
|
3451
2857
|
logger.warning(
|
3452
|
-
|
2858
|
+
'Skipping %s as it is not mounted', device)
|
3453
2859
|
continue
|
3454
2860
|
if hook_pre_device:
|
3455
2861
|
hook_pre_device(os.path.join(devices, device))
|
@@ -3462,7 +2868,7 @@ def audit_location_generator(devices, datadir, suffix='',
|
|
3462
2868
|
error_counter.setdefault('unlistable_partitions', [])
|
3463
2869
|
error_counter['unlistable_partitions'].append(datadir_path)
|
3464
2870
|
if logger:
|
3465
|
-
logger.warning(
|
2871
|
+
logger.warning('Skipping %(datadir)s because %(err)s',
|
3466
2872
|
{'datadir': datadir_path, 'err': e})
|
3467
2873
|
continue
|
3468
2874
|
if partitions_filter:
|
@@ -3660,6 +3066,9 @@ class ContextPool(GreenPool):
|
|
3660
3066
|
return self
|
3661
3067
|
|
3662
3068
|
def __exit__(self, type, value, traceback):
|
3069
|
+
self.close()
|
3070
|
+
|
3071
|
+
def close(self):
|
3663
3072
|
for coro in list(self.coroutines_running):
|
3664
3073
|
coro.kill()
|
3665
3074
|
|
@@ -3682,6 +3091,7 @@ class GreenAsyncPile(object):
|
|
3682
3091
|
|
3683
3092
|
Correlating results with jobs (if necessary) is left to the caller.
|
3684
3093
|
"""
|
3094
|
+
|
3685
3095
|
def __init__(self, size_or_pool):
|
3686
3096
|
"""
|
3687
3097
|
:param size_or_pool: thread pool size or a pool to use
|
@@ -3775,6 +3185,7 @@ class StreamingPile(GreenAsyncPile):
|
|
3775
3185
|
When used as a context manager, has the same worker-killing properties as
|
3776
3186
|
:class:`ContextPool`.
|
3777
3187
|
"""
|
3188
|
+
|
3778
3189
|
def __init__(self, size):
|
3779
3190
|
""":param size: number of worker threads to use"""
|
3780
3191
|
self.pool = ContextPool(size)
|
@@ -3840,16 +3251,16 @@ def validate_sync_to(value, allowed_sync_hosts, realms_conf):
|
|
3840
3251
|
data = value[2:].split('/')
|
3841
3252
|
if len(data) != 4:
|
3842
3253
|
return (
|
3843
|
-
|
3254
|
+
'Invalid X-Container-Sync-To format %r' % orig_value,
|
3844
3255
|
None, None, None)
|
3845
3256
|
realm, cluster, account, container = data
|
3846
3257
|
realm_key = realms_conf.key(realm)
|
3847
3258
|
if not realm_key:
|
3848
|
-
return (
|
3259
|
+
return ('No realm key for %r' % realm, None, None, None)
|
3849
3260
|
endpoint = realms_conf.endpoint(realm, cluster)
|
3850
3261
|
if not endpoint:
|
3851
3262
|
return (
|
3852
|
-
|
3263
|
+
'No cluster endpoint for %(realm)r %(cluster)r'
|
3853
3264
|
% {'realm': realm, 'cluster': cluster},
|
3854
3265
|
None, None, None)
|
3855
3266
|
return (
|
@@ -3859,19 +3270,19 @@ def validate_sync_to(value, allowed_sync_hosts, realms_conf):
|
|
3859
3270
|
p = urlparse(value)
|
3860
3271
|
if p.scheme not in ('http', 'https'):
|
3861
3272
|
return (
|
3862
|
-
|
3863
|
-
|
3273
|
+
'Invalid scheme %r in X-Container-Sync-To, must be "//", '
|
3274
|
+
'"http", or "https".' % p.scheme,
|
3864
3275
|
None, None, None)
|
3865
3276
|
if not p.path:
|
3866
|
-
return (
|
3277
|
+
return ('Path required in X-Container-Sync-To', None, None, None)
|
3867
3278
|
if p.params or p.query or p.fragment:
|
3868
3279
|
return (
|
3869
|
-
|
3870
|
-
|
3280
|
+
'Params, queries, and fragments not allowed in '
|
3281
|
+
'X-Container-Sync-To',
|
3871
3282
|
None, None, None)
|
3872
3283
|
if p.hostname not in allowed_sync_hosts:
|
3873
3284
|
return (
|
3874
|
-
|
3285
|
+
'Invalid host %r in X-Container-Sync-To' % p.hostname,
|
3875
3286
|
None, None, None)
|
3876
3287
|
return (None, value, None, None)
|
3877
3288
|
|
@@ -4266,6 +3677,7 @@ class Everything(object):
|
|
4266
3677
|
A container that contains everything. If "e" is an instance of
|
4267
3678
|
Everything, then "x in e" is true for all x.
|
4268
3679
|
"""
|
3680
|
+
|
4269
3681
|
def __contains__(self, element):
|
4270
3682
|
return True
|
4271
3683
|
|
@@ -4297,6 +3709,7 @@ class CloseableChain(object):
|
|
4297
3709
|
Like itertools.chain, but with a close method that will attempt to invoke
|
4298
3710
|
its sub-iterators' close methods, if any.
|
4299
3711
|
"""
|
3712
|
+
|
4300
3713
|
def __init__(self, *iterables):
|
4301
3714
|
self.iterables = iterables
|
4302
3715
|
self.chained_iter = itertools.chain(*self.iterables)
|
@@ -4340,6 +3753,7 @@ class InputProxy(object):
|
|
4340
3753
|
File-like object that counts bytes read.
|
4341
3754
|
To be swapped in for wsgi.input for accounting purposes.
|
4342
3755
|
"""
|
3756
|
+
|
4343
3757
|
def __init__(self, wsgi_input):
|
4344
3758
|
"""
|
4345
3759
|
:param wsgi_input: file-like object to wrap the functionality of
|
@@ -4481,6 +3895,7 @@ class Spliterator(object):
|
|
4481
3895
|
"l" # shorter than requested; this can happen with the last iterator
|
4482
3896
|
|
4483
3897
|
"""
|
3898
|
+
|
4484
3899
|
def __init__(self, source_iterable):
|
4485
3900
|
self.input_iterator = iter(source_iterable)
|
4486
3901
|
self.leftovers = None
|
@@ -4722,7 +4137,7 @@ def override_bytes_from_content_type(listing_dict, logger=None):
|
|
4722
4137
|
listing_dict['bytes'] = int(swift_bytes)
|
4723
4138
|
except ValueError:
|
4724
4139
|
if logger:
|
4725
|
-
logger.exception(
|
4140
|
+
logger.exception("Invalid swift_bytes")
|
4726
4141
|
|
4727
4142
|
|
4728
4143
|
def clean_content_type(value):
|
@@ -5041,7 +4456,7 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
|
|
5041
4456
|
pass
|
5042
4457
|
else:
|
5043
4458
|
logger.warning(
|
5044
|
-
|
4459
|
+
"More than one part in a single-part response?")
|
5045
4460
|
|
5046
4461
|
return string_along(response_body_iter, ranges_iter, logger)
|
5047
4462
|
|
@@ -5093,87 +4508,6 @@ def parse_content_disposition(header):
|
|
5093
4508
|
return header, attributes
|
5094
4509
|
|
5095
4510
|
|
5096
|
-
class sockaddr_alg(ctypes.Structure):
|
5097
|
-
_fields_ = [("salg_family", ctypes.c_ushort),
|
5098
|
-
("salg_type", ctypes.c_ubyte * 14),
|
5099
|
-
("salg_feat", ctypes.c_uint),
|
5100
|
-
("salg_mask", ctypes.c_uint),
|
5101
|
-
("salg_name", ctypes.c_ubyte * 64)]
|
5102
|
-
|
5103
|
-
|
5104
|
-
_bound_md5_sockfd = None
|
5105
|
-
|
5106
|
-
|
5107
|
-
def get_md5_socket():
|
5108
|
-
"""
|
5109
|
-
Get an MD5 socket file descriptor. One can MD5 data with it by writing it
|
5110
|
-
to the socket with os.write, then os.read the 16 bytes of the checksum out
|
5111
|
-
later.
|
5112
|
-
|
5113
|
-
NOTE: It is the caller's responsibility to ensure that os.close() is
|
5114
|
-
called on the returned file descriptor. This is a bare file descriptor,
|
5115
|
-
not a Python object. It doesn't close itself.
|
5116
|
-
"""
|
5117
|
-
|
5118
|
-
# Linux's AF_ALG sockets work like this:
|
5119
|
-
#
|
5120
|
-
# First, initialize a socket with socket() and bind(). This tells the
|
5121
|
-
# socket what algorithm to use, as well as setting up any necessary bits
|
5122
|
-
# like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
|
5123
|
-
# algorithm name.
|
5124
|
-
#
|
5125
|
-
# Second, to hash some data, get a second socket by calling accept() on
|
5126
|
-
# the first socket. Write data to the socket, then when finished, read the
|
5127
|
-
# checksum from the socket and close it. This lets you checksum multiple
|
5128
|
-
# things without repeating all the setup code each time.
|
5129
|
-
#
|
5130
|
-
# Since we only need to bind() one socket, we do that here and save it for
|
5131
|
-
# future re-use. That way, we only use one file descriptor to get an MD5
|
5132
|
-
# socket instead of two, and we also get to save some syscalls.
|
5133
|
-
|
5134
|
-
global _bound_md5_sockfd
|
5135
|
-
global _libc_socket
|
5136
|
-
global _libc_bind
|
5137
|
-
global _libc_accept
|
5138
|
-
|
5139
|
-
if _libc_accept is None:
|
5140
|
-
_libc_accept = load_libc_function('accept', fail_if_missing=True)
|
5141
|
-
if _libc_socket is None:
|
5142
|
-
_libc_socket = load_libc_function('socket', fail_if_missing=True)
|
5143
|
-
if _libc_bind is None:
|
5144
|
-
_libc_bind = load_libc_function('bind', fail_if_missing=True)
|
5145
|
-
|
5146
|
-
# Do this at first call rather than at import time so that we don't use a
|
5147
|
-
# file descriptor on systems that aren't using any MD5 sockets.
|
5148
|
-
if _bound_md5_sockfd is None:
|
5149
|
-
sockaddr_setup = sockaddr_alg(
|
5150
|
-
AF_ALG,
|
5151
|
-
(ord('h'), ord('a'), ord('s'), ord('h'), 0),
|
5152
|
-
0, 0,
|
5153
|
-
(ord('m'), ord('d'), ord('5'), 0))
|
5154
|
-
hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
|
5155
|
-
ctypes.c_int(socket.SOCK_SEQPACKET),
|
5156
|
-
ctypes.c_int(0))
|
5157
|
-
if hash_sockfd < 0:
|
5158
|
-
raise IOError(ctypes.get_errno(),
|
5159
|
-
"Failed to initialize MD5 socket")
|
5160
|
-
|
5161
|
-
bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
|
5162
|
-
ctypes.pointer(sockaddr_setup),
|
5163
|
-
ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
|
5164
|
-
if bind_result < 0:
|
5165
|
-
os.close(hash_sockfd)
|
5166
|
-
raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
|
5167
|
-
|
5168
|
-
_bound_md5_sockfd = hash_sockfd
|
5169
|
-
|
5170
|
-
md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
|
5171
|
-
if md5_sockfd < 0:
|
5172
|
-
raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
|
5173
|
-
|
5174
|
-
return md5_sockfd
|
5175
|
-
|
5176
|
-
|
5177
4511
|
try:
|
5178
4512
|
_test_md5 = hashlib.md5(usedforsecurity=False) # nosec
|
5179
4513
|
|
@@ -5195,19 +4529,19 @@ except TypeError:
|
|
5195
4529
|
return hashlib.md5(string) # nosec
|
5196
4530
|
|
5197
4531
|
|
5198
|
-
class
|
4532
|
+
class NamespaceOuterBound(object):
|
5199
4533
|
"""
|
5200
4534
|
A custom singleton type to be subclassed for the outer bounds of
|
5201
|
-
|
4535
|
+
Namespaces.
|
5202
4536
|
"""
|
5203
4537
|
_singleton = None
|
5204
4538
|
|
5205
4539
|
def __new__(cls):
|
5206
|
-
if cls is
|
5207
|
-
raise TypeError('
|
4540
|
+
if cls is NamespaceOuterBound:
|
4541
|
+
raise TypeError('NamespaceOuterBound is an abstract class; '
|
5208
4542
|
'only subclasses should be instantiated')
|
5209
4543
|
if cls._singleton is None:
|
5210
|
-
cls._singleton = super(
|
4544
|
+
cls._singleton = super(NamespaceOuterBound, cls).__new__(cls)
|
5211
4545
|
return cls._singleton
|
5212
4546
|
|
5213
4547
|
def __str__(self):
|
@@ -5222,47 +4556,360 @@ class ShardRangeOuterBound(object):
|
|
5222
4556
|
__nonzero__ = __bool__
|
5223
4557
|
|
5224
4558
|
|
5225
|
-
|
4559
|
+
@functools.total_ordering
|
4560
|
+
class Namespace(object):
|
5226
4561
|
"""
|
5227
|
-
|
4562
|
+
A Namespace encapsulates parameters that define a range of the object
|
4563
|
+
namespace.
|
5228
4564
|
|
5229
|
-
|
5230
|
-
|
4565
|
+
:param name: the name of the ``Namespace``.
|
4566
|
+
:param lower: the lower bound of object names contained in the namespace;
|
4567
|
+
the lower bound *is not* included in the namespace.
|
4568
|
+
:param upper: the upper bound of object names contained in the namespace;
|
4569
|
+
the upper bound *is* included in the namespace.
|
4570
|
+
"""
|
4571
|
+
__slots__ = ('_lower', '_upper', 'name')
|
5231
4572
|
|
5232
|
-
|
4573
|
+
@functools.total_ordering
|
4574
|
+
class MaxBound(NamespaceOuterBound):
|
4575
|
+
# singleton for maximum bound
|
4576
|
+
def __ge__(self, other):
|
4577
|
+
return True
|
5233
4578
|
|
5234
|
-
|
4579
|
+
@functools.total_ordering
|
4580
|
+
class MinBound(NamespaceOuterBound):
|
4581
|
+
# singleton for minimum bound
|
4582
|
+
def __le__(self, other):
|
4583
|
+
return True
|
5235
4584
|
|
5236
|
-
|
5237
|
-
|
5238
|
-
root container's own shard range will have a name format of
|
5239
|
-
<account>/<root_container> which will raise ValueError if passed to parse.
|
5240
|
-
"""
|
5241
|
-
def __init__(self, account, root_container,
|
5242
|
-
parent_container_hash,
|
5243
|
-
timestamp,
|
5244
|
-
index):
|
5245
|
-
self.account = self._validate(account)
|
5246
|
-
self.root_container = self._validate(root_container)
|
5247
|
-
self.parent_container_hash = self._validate(parent_container_hash)
|
5248
|
-
self.timestamp = Timestamp(timestamp)
|
5249
|
-
self.index = int(index)
|
4585
|
+
MIN = MinBound()
|
4586
|
+
MAX = MaxBound()
|
5250
4587
|
|
5251
|
-
|
5252
|
-
|
5253
|
-
|
5254
|
-
|
5255
|
-
|
4588
|
+
def __init__(self, name, lower, upper):
|
4589
|
+
self._lower = Namespace.MIN
|
4590
|
+
self._upper = Namespace.MAX
|
4591
|
+
self.lower = lower
|
4592
|
+
self.upper = upper
|
4593
|
+
self.name = name
|
5256
4594
|
|
5257
|
-
def
|
5258
|
-
|
5259
|
-
|
5260
|
-
|
5261
|
-
self.timestamp.internal,
|
5262
|
-
self.index)
|
4595
|
+
def __iter__(self):
|
4596
|
+
yield 'name', str(self.name)
|
4597
|
+
yield 'lower', self.lower_str
|
4598
|
+
yield 'upper', self.upper_str
|
5263
4599
|
|
5264
|
-
|
5265
|
-
|
4600
|
+
def __repr__(self):
|
4601
|
+
return '%s(%s)' % (self.__class__.__name__, ', '.join(
|
4602
|
+
'%s=%r' % prop for prop in self))
|
4603
|
+
|
4604
|
+
def __lt__(self, other):
|
4605
|
+
# a Namespace is less than other if its entire namespace is less than
|
4606
|
+
# other; if other is another Namespace that implies that this
|
4607
|
+
# Namespace's upper must be less than or equal to the other
|
4608
|
+
# Namespace's lower
|
4609
|
+
if self.upper == Namespace.MAX:
|
4610
|
+
return False
|
4611
|
+
if isinstance(other, Namespace):
|
4612
|
+
return self.upper <= other.lower
|
4613
|
+
elif other is None:
|
4614
|
+
return True
|
4615
|
+
else:
|
4616
|
+
return self.upper < self._encode(other)
|
4617
|
+
|
4618
|
+
def __gt__(self, other):
|
4619
|
+
# a Namespace is greater than other if its entire namespace is greater
|
4620
|
+
# than other; if other is another Namespace that implies that this
|
4621
|
+
# Namespace's lower must be less greater than or equal to the other
|
4622
|
+
# Namespace's upper
|
4623
|
+
if self.lower == Namespace.MIN:
|
4624
|
+
return False
|
4625
|
+
if isinstance(other, Namespace):
|
4626
|
+
return self.lower >= other.upper
|
4627
|
+
elif other is None:
|
4628
|
+
return False
|
4629
|
+
else:
|
4630
|
+
return self.lower >= self._encode(other)
|
4631
|
+
|
4632
|
+
def __eq__(self, other):
|
4633
|
+
# test for equality of range bounds only
|
4634
|
+
if not isinstance(other, Namespace):
|
4635
|
+
return False
|
4636
|
+
return self.lower == other.lower and self.upper == other.upper
|
4637
|
+
|
4638
|
+
def __ne__(self, other):
|
4639
|
+
return not (self == other)
|
4640
|
+
|
4641
|
+
def __contains__(self, item):
|
4642
|
+
# test if the given item is within the namespace
|
4643
|
+
if item == '':
|
4644
|
+
return False
|
4645
|
+
item = self._encode_bound(item)
|
4646
|
+
return self.lower < item <= self.upper
|
4647
|
+
|
4648
|
+
@classmethod
|
4649
|
+
def _encode(cls, value):
|
4650
|
+
if six.PY2 and isinstance(value, six.text_type):
|
4651
|
+
return value.encode('utf-8')
|
4652
|
+
if six.PY3 and isinstance(value, six.binary_type):
|
4653
|
+
# This should never fail -- the value should always be coming from
|
4654
|
+
# valid swift paths, which means UTF-8
|
4655
|
+
return value.decode('utf-8')
|
4656
|
+
return value
|
4657
|
+
|
4658
|
+
def _encode_bound(self, bound):
|
4659
|
+
if isinstance(bound, NamespaceOuterBound):
|
4660
|
+
return bound
|
4661
|
+
if not (isinstance(bound, six.text_type) or
|
4662
|
+
isinstance(bound, six.binary_type)):
|
4663
|
+
raise TypeError('must be a string type')
|
4664
|
+
return self._encode(bound)
|
4665
|
+
|
4666
|
+
@property
|
4667
|
+
def lower(self):
|
4668
|
+
return self._lower
|
4669
|
+
|
4670
|
+
@property
|
4671
|
+
def lower_str(self):
|
4672
|
+
return str(self.lower)
|
4673
|
+
|
4674
|
+
@lower.setter
|
4675
|
+
def lower(self, value):
|
4676
|
+
if value is None or (value == b"" if isinstance(value, bytes) else
|
4677
|
+
value == u""):
|
4678
|
+
value = Namespace.MIN
|
4679
|
+
try:
|
4680
|
+
value = self._encode_bound(value)
|
4681
|
+
except TypeError as err:
|
4682
|
+
raise TypeError('lower %s' % err)
|
4683
|
+
if value > self._upper:
|
4684
|
+
raise ValueError(
|
4685
|
+
'lower (%r) must be less than or equal to upper (%r)' %
|
4686
|
+
(value, self.upper))
|
4687
|
+
self._lower = value
|
4688
|
+
|
4689
|
+
@property
|
4690
|
+
def upper(self):
|
4691
|
+
return self._upper
|
4692
|
+
|
4693
|
+
@property
|
4694
|
+
def upper_str(self):
|
4695
|
+
return str(self.upper)
|
4696
|
+
|
4697
|
+
@upper.setter
|
4698
|
+
def upper(self, value):
|
4699
|
+
if value is None or (value == b"" if isinstance(value, bytes) else
|
4700
|
+
value == u""):
|
4701
|
+
value = Namespace.MAX
|
4702
|
+
try:
|
4703
|
+
value = self._encode_bound(value)
|
4704
|
+
except TypeError as err:
|
4705
|
+
raise TypeError('upper %s' % err)
|
4706
|
+
if value < self._lower:
|
4707
|
+
raise ValueError(
|
4708
|
+
'upper (%r) must be greater than or equal to lower (%r)' %
|
4709
|
+
(value, self.lower))
|
4710
|
+
self._upper = value
|
4711
|
+
|
4712
|
+
@property
|
4713
|
+
def end_marker(self):
|
4714
|
+
return self.upper_str + '\x00' if self.upper else ''
|
4715
|
+
|
4716
|
+
def entire_namespace(self):
|
4717
|
+
"""
|
4718
|
+
Returns True if this namespace includes the entire namespace, False
|
4719
|
+
otherwise.
|
4720
|
+
"""
|
4721
|
+
return (self.lower == Namespace.MIN and
|
4722
|
+
self.upper == Namespace.MAX)
|
4723
|
+
|
4724
|
+
def overlaps(self, other):
|
4725
|
+
"""
|
4726
|
+
Returns True if this namespace overlaps with the other namespace.
|
4727
|
+
|
4728
|
+
:param other: an instance of :class:`~swift.common.utils.Namespace`
|
4729
|
+
"""
|
4730
|
+
if not isinstance(other, Namespace):
|
4731
|
+
return False
|
4732
|
+
return max(self.lower, other.lower) < min(self.upper, other.upper)
|
4733
|
+
|
4734
|
+
def includes(self, other):
|
4735
|
+
"""
|
4736
|
+
Returns True if this namespace includes the whole of the other
|
4737
|
+
namespace, False otherwise.
|
4738
|
+
|
4739
|
+
:param other: an instance of :class:`~swift.common.utils.Namespace`
|
4740
|
+
"""
|
4741
|
+
return (self.lower <= other.lower) and (other.upper <= self.upper)
|
4742
|
+
|
4743
|
+
def expand(self, donors):
|
4744
|
+
"""
|
4745
|
+
Expands the bounds as necessary to match the minimum and maximum bounds
|
4746
|
+
of the given donors.
|
4747
|
+
|
4748
|
+
:param donors: A list of :class:`~swift.common.utils.Namespace`
|
4749
|
+
:return: True if the bounds have been modified, False otherwise.
|
4750
|
+
"""
|
4751
|
+
modified = False
|
4752
|
+
new_lower = self.lower
|
4753
|
+
new_upper = self.upper
|
4754
|
+
for donor in donors:
|
4755
|
+
new_lower = min(new_lower, donor.lower)
|
4756
|
+
new_upper = max(new_upper, donor.upper)
|
4757
|
+
if self.lower > new_lower or self.upper < new_upper:
|
4758
|
+
self.lower = new_lower
|
4759
|
+
self.upper = new_upper
|
4760
|
+
modified = True
|
4761
|
+
return modified
|
4762
|
+
|
4763
|
+
|
4764
|
+
class NamespaceBoundList(object):
|
4765
|
+
def __init__(self, bounds):
|
4766
|
+
"""
|
4767
|
+
Encapsulate a compact representation of namespaces. Each item in the
|
4768
|
+
list is a list [lower bound, name].
|
4769
|
+
|
4770
|
+
:param bounds: a list of lists ``[lower bound, name]``. The list
|
4771
|
+
should be ordered by ``lower bound``.
|
4772
|
+
"""
|
4773
|
+
self.bounds = [] if bounds is None else bounds
|
4774
|
+
|
4775
|
+
def __eq__(self, other):
|
4776
|
+
# test for equality of NamespaceBoundList objects only
|
4777
|
+
if not isinstance(other, NamespaceBoundList):
|
4778
|
+
return False
|
4779
|
+
return self.bounds == other.bounds
|
4780
|
+
|
4781
|
+
@classmethod
|
4782
|
+
def parse(cls, namespaces):
|
4783
|
+
"""
|
4784
|
+
Create a NamespaceBoundList object by parsing a list of Namespaces or
|
4785
|
+
shard ranges and only storing the compact bounds list.
|
4786
|
+
|
4787
|
+
Each Namespace in the given list of ``namespaces`` provides the next
|
4788
|
+
[lower bound, name] list to append to the NamespaceBoundList. The
|
4789
|
+
given ``namespaces`` should be contiguous because the
|
4790
|
+
NamespaceBoundList only stores lower bounds; if ``namespaces`` has
|
4791
|
+
overlaps then at least one of the overlapping namespaces may be
|
4792
|
+
ignored; similarly, gaps between namespaces are not represented in the
|
4793
|
+
NamespaceBoundList.
|
4794
|
+
|
4795
|
+
:param namespaces: A list of Namespace instances. The list should be
|
4796
|
+
ordered by namespace bounds.
|
4797
|
+
:return: a NamespaceBoundList.
|
4798
|
+
"""
|
4799
|
+
if not namespaces:
|
4800
|
+
return None
|
4801
|
+
bounds = []
|
4802
|
+
upper = namespaces[0].lower
|
4803
|
+
for ns in namespaces:
|
4804
|
+
if ns.lower < upper:
|
4805
|
+
# Discard overlapping namespace.
|
4806
|
+
# Overlapping namespaces are expected in lists of shard ranges
|
4807
|
+
# fetched from the backend. For example, while a parent
|
4808
|
+
# container is in the process of sharding, the parent shard
|
4809
|
+
# range and its children shard ranges may be returned in the
|
4810
|
+
# list of shard ranges. However, the backend sorts the list by
|
4811
|
+
# (upper, state, lower, name) such that the children precede
|
4812
|
+
# the parent, and it is the children that we prefer to retain
|
4813
|
+
# in the NamespaceBoundList. For example, these namespaces:
|
4814
|
+
# (a-b, "child1"), (b-c, "child2"), (a-c, "parent")
|
4815
|
+
# would result in a NamespaceBoundList:
|
4816
|
+
# (a, "child1"), (b, "child2")
|
4817
|
+
# Unexpected overlaps or gaps may result in namespaces being
|
4818
|
+
# 'extended' because only lower bounds are stored. For example,
|
4819
|
+
# these namespaces:
|
4820
|
+
# (a-b, "ns1"), (d-e, "ns2")
|
4821
|
+
# would result in a NamespaceBoundList:
|
4822
|
+
# (a, "ns1"), (d, "ns2")
|
4823
|
+
# When used to find a target namespace for an object update
|
4824
|
+
# that lies in a gap, the NamespaceBoundList will map the
|
4825
|
+
# object name to the preceding namespace. In the example, an
|
4826
|
+
# object named "c" would be mapped to "ns1". (In previous
|
4827
|
+
# versions, an object update lying in a gap would have been
|
4828
|
+
# mapped to the root container.)
|
4829
|
+
continue
|
4830
|
+
bounds.append([ns.lower_str, str(ns.name)])
|
4831
|
+
upper = ns.upper
|
4832
|
+
return cls(bounds)
|
4833
|
+
|
4834
|
+
def get_namespace(self, item):
|
4835
|
+
"""
|
4836
|
+
Get a Namespace instance that contains ``item`` by bisecting on the
|
4837
|
+
lower bounds directly. This function is used for performance sensitive
|
4838
|
+
path, for example, '_get_update_shard' in proxy object controller. For
|
4839
|
+
normal paths, convert NamespaceBoundList to a list of Namespaces, and
|
4840
|
+
use `~swift.common.utils.find_namespace` or
|
4841
|
+
`~swift.common.utils.filter_namespaces`.
|
4842
|
+
|
4843
|
+
:param item: The item for a which a Namespace is to be found.
|
4844
|
+
:return: the Namespace that contains ``item``.
|
4845
|
+
"""
|
4846
|
+
pos = bisect.bisect(self.bounds, [item]) - 1
|
4847
|
+
lower, name = self.bounds[pos]
|
4848
|
+
upper = ('' if pos + 1 == len(self.bounds)
|
4849
|
+
else self.bounds[pos + 1][0])
|
4850
|
+
return Namespace(name, lower, upper)
|
4851
|
+
|
4852
|
+
def get_namespaces(self):
|
4853
|
+
"""
|
4854
|
+
Get the contained namespaces as a list of contiguous Namespaces ordered
|
4855
|
+
by lower bound.
|
4856
|
+
|
4857
|
+
:return: A list of Namespace objects which are ordered by
|
4858
|
+
``lower bound``.
|
4859
|
+
"""
|
4860
|
+
if not self.bounds:
|
4861
|
+
return []
|
4862
|
+
namespaces = []
|
4863
|
+
num_ns = len(self.bounds)
|
4864
|
+
for i in range(num_ns):
|
4865
|
+
lower, name = self.bounds[i]
|
4866
|
+
upper = ('' if i + 1 == num_ns else self.bounds[i + 1][0])
|
4867
|
+
namespaces.append(Namespace(name, lower, upper))
|
4868
|
+
return namespaces
|
4869
|
+
|
4870
|
+
|
4871
|
+
class ShardName(object):
|
4872
|
+
"""
|
4873
|
+
Encapsulates the components of a shard name.
|
4874
|
+
|
4875
|
+
Instances of this class would typically be constructed via the create() or
|
4876
|
+
parse() class methods.
|
4877
|
+
|
4878
|
+
Shard names have the form:
|
4879
|
+
|
4880
|
+
<account>/<root_container>-<parent_container_hash>-<timestamp>-<index>
|
4881
|
+
|
4882
|
+
Note: some instances of :class:`~swift.common.utils.ShardRange` have names
|
4883
|
+
that will NOT parse as a :class:`~swift.common.utils.ShardName`; e.g. a
|
4884
|
+
root container's own shard range will have a name format of
|
4885
|
+
<account>/<root_container> which will raise ValueError if passed to parse.
|
4886
|
+
"""
|
4887
|
+
|
4888
|
+
def __init__(self, account, root_container,
|
4889
|
+
parent_container_hash,
|
4890
|
+
timestamp,
|
4891
|
+
index):
|
4892
|
+
self.account = self._validate(account)
|
4893
|
+
self.root_container = self._validate(root_container)
|
4894
|
+
self.parent_container_hash = self._validate(parent_container_hash)
|
4895
|
+
self.timestamp = Timestamp(timestamp)
|
4896
|
+
self.index = int(index)
|
4897
|
+
|
4898
|
+
@classmethod
|
4899
|
+
def _validate(cls, arg):
|
4900
|
+
if arg is None:
|
4901
|
+
raise ValueError('arg must not be None')
|
4902
|
+
return arg
|
4903
|
+
|
4904
|
+
def __str__(self):
|
4905
|
+
return '%s/%s-%s-%s-%s' % (self.account,
|
4906
|
+
self.root_container,
|
4907
|
+
self.parent_container_hash,
|
4908
|
+
self.timestamp.internal,
|
4909
|
+
self.index)
|
4910
|
+
|
4911
|
+
@classmethod
|
4912
|
+
def hash_container_name(cls, container_name):
|
5266
4913
|
"""
|
5267
4914
|
Calculates the hash of a container name.
|
5268
4915
|
|
@@ -5329,7 +4976,7 @@ class ShardName(object):
|
|
5329
4976
|
raise ValueError('invalid name: %s' % name)
|
5330
4977
|
|
5331
4978
|
|
5332
|
-
class ShardRange(
|
4979
|
+
class ShardRange(Namespace):
|
5333
4980
|
"""
|
5334
4981
|
A ShardRange encapsulates sharding state related to a container including
|
5335
4982
|
lower and upper bounds that define the object namespace for which the
|
@@ -5398,41 +5045,25 @@ class ShardRange(object):
|
|
5398
5045
|
SHARDING_STATES = (SHARDING, SHARDED)
|
5399
5046
|
CLEAVING_STATES = SHRINKING_STATES + SHARDING_STATES
|
5400
5047
|
|
5401
|
-
@functools.total_ordering
|
5402
|
-
class MaxBound(ShardRangeOuterBound):
|
5403
|
-
# singleton for maximum bound
|
5404
|
-
def __ge__(self, other):
|
5405
|
-
return True
|
5406
|
-
|
5407
|
-
@functools.total_ordering
|
5408
|
-
class MinBound(ShardRangeOuterBound):
|
5409
|
-
# singleton for minimum bound
|
5410
|
-
def __le__(self, other):
|
5411
|
-
return True
|
5412
|
-
|
5413
|
-
MIN = MinBound()
|
5414
|
-
MAX = MaxBound()
|
5415
5048
|
__slots__ = (
|
5416
5049
|
'account', 'container',
|
5417
5050
|
'_timestamp', '_meta_timestamp', '_state_timestamp', '_epoch',
|
5418
|
-
'
|
5051
|
+
'_deleted', '_state', '_count', '_bytes',
|
5419
5052
|
'_tombstones', '_reported')
|
5420
5053
|
|
5421
|
-
def __init__(self, name, timestamp
|
5054
|
+
def __init__(self, name, timestamp=0,
|
5055
|
+
lower=Namespace.MIN, upper=Namespace.MAX,
|
5422
5056
|
object_count=0, bytes_used=0, meta_timestamp=None,
|
5423
5057
|
deleted=False, state=None, state_timestamp=None, epoch=None,
|
5424
|
-
reported=False, tombstones=-1):
|
5058
|
+
reported=False, tombstones=-1, **kwargs):
|
5059
|
+
super(ShardRange, self).__init__(name=name, lower=lower, upper=upper)
|
5425
5060
|
self.account = self.container = self._timestamp = \
|
5426
5061
|
self._meta_timestamp = self._state_timestamp = self._epoch = None
|
5427
|
-
self._lower = ShardRange.MIN
|
5428
|
-
self._upper = ShardRange.MAX
|
5429
5062
|
self._deleted = False
|
5430
5063
|
self._state = None
|
5431
5064
|
|
5432
5065
|
self.name = name
|
5433
5066
|
self.timestamp = timestamp
|
5434
|
-
self.lower = lower
|
5435
|
-
self.upper = upper
|
5436
5067
|
self.deleted = deleted
|
5437
5068
|
self.object_count = object_count
|
5438
5069
|
self.bytes_used = bytes_used
|
@@ -5447,27 +5078,10 @@ class ShardRange(object):
|
|
5447
5078
|
def sort_key(cls, sr):
|
5448
5079
|
# defines the sort order for shard ranges
|
5449
5080
|
# note if this ever changes to *not* sort by upper first then it breaks
|
5450
|
-
# a key assumption for bisect, which is used by utils.
|
5081
|
+
# a key assumption for bisect, which is used by utils.find_namespace
|
5082
|
+
# with shard ranges.
|
5451
5083
|
return sr.upper, sr.state, sr.lower, sr.name
|
5452
5084
|
|
5453
|
-
@classmethod
|
5454
|
-
def _encode(cls, value):
|
5455
|
-
if six.PY2 and isinstance(value, six.text_type):
|
5456
|
-
return value.encode('utf-8')
|
5457
|
-
if six.PY3 and isinstance(value, six.binary_type):
|
5458
|
-
# This should never fail -- the value should always be coming from
|
5459
|
-
# valid swift paths, which means UTF-8
|
5460
|
-
return value.decode('utf-8')
|
5461
|
-
return value
|
5462
|
-
|
5463
|
-
def _encode_bound(self, bound):
|
5464
|
-
if isinstance(bound, ShardRangeOuterBound):
|
5465
|
-
return bound
|
5466
|
-
if not (isinstance(bound, six.text_type) or
|
5467
|
-
isinstance(bound, six.binary_type)):
|
5468
|
-
raise TypeError('must be a string type')
|
5469
|
-
return self._encode(bound)
|
5470
|
-
|
5471
5085
|
def is_child_of(self, parent):
|
5472
5086
|
"""
|
5473
5087
|
Test if this shard range is a child of another shard range. The
|
@@ -5638,56 +5252,6 @@ class ShardRange(object):
|
|
5638
5252
|
def meta_timestamp(self, ts):
|
5639
5253
|
self._meta_timestamp = self._to_timestamp(ts)
|
5640
5254
|
|
5641
|
-
@property
|
5642
|
-
def lower(self):
|
5643
|
-
return self._lower
|
5644
|
-
|
5645
|
-
@property
|
5646
|
-
def lower_str(self):
|
5647
|
-
return str(self.lower)
|
5648
|
-
|
5649
|
-
@lower.setter
|
5650
|
-
def lower(self, value):
|
5651
|
-
if value is None or (value == b"" if isinstance(value, bytes) else
|
5652
|
-
value == u""):
|
5653
|
-
value = ShardRange.MIN
|
5654
|
-
try:
|
5655
|
-
value = self._encode_bound(value)
|
5656
|
-
except TypeError as err:
|
5657
|
-
raise TypeError('lower %s' % err)
|
5658
|
-
if value > self._upper:
|
5659
|
-
raise ValueError(
|
5660
|
-
'lower (%r) must be less than or equal to upper (%r)' %
|
5661
|
-
(value, self.upper))
|
5662
|
-
self._lower = value
|
5663
|
-
|
5664
|
-
@property
|
5665
|
-
def end_marker(self):
|
5666
|
-
return self.upper_str + '\x00' if self.upper else ''
|
5667
|
-
|
5668
|
-
@property
|
5669
|
-
def upper(self):
|
5670
|
-
return self._upper
|
5671
|
-
|
5672
|
-
@property
|
5673
|
-
def upper_str(self):
|
5674
|
-
return str(self.upper)
|
5675
|
-
|
5676
|
-
@upper.setter
|
5677
|
-
def upper(self, value):
|
5678
|
-
if value is None or (value == b"" if isinstance(value, bytes) else
|
5679
|
-
value == u""):
|
5680
|
-
value = ShardRange.MAX
|
5681
|
-
try:
|
5682
|
-
value = self._encode_bound(value)
|
5683
|
-
except TypeError as err:
|
5684
|
-
raise TypeError('upper %s' % err)
|
5685
|
-
if value < self._lower:
|
5686
|
-
raise ValueError(
|
5687
|
-
'upper (%r) must be greater than or equal to lower (%r)' %
|
5688
|
-
(value, self.lower))
|
5689
|
-
self._upper = value
|
5690
|
-
|
5691
5255
|
@property
|
5692
5256
|
def object_count(self):
|
5693
5257
|
return self._count
|
@@ -5895,56 +5459,12 @@ class ShardRange(object):
|
|
5895
5459
|
self.timestamp = timestamp or Timestamp.now()
|
5896
5460
|
return True
|
5897
5461
|
|
5898
|
-
def __contains__(self, item):
|
5899
|
-
# test if the given item is within the namespace
|
5900
|
-
if item == '':
|
5901
|
-
return False
|
5902
|
-
item = self._encode_bound(item)
|
5903
|
-
return self.lower < item <= self.upper
|
5904
|
-
|
5905
|
-
def __lt__(self, other):
|
5906
|
-
# a ShardRange is less than other if its entire namespace is less than
|
5907
|
-
# other; if other is another ShardRange that implies that this
|
5908
|
-
# ShardRange's upper must be less than or equal to the other
|
5909
|
-
# ShardRange's lower
|
5910
|
-
if self.upper == ShardRange.MAX:
|
5911
|
-
return False
|
5912
|
-
if isinstance(other, ShardRange):
|
5913
|
-
return self.upper <= other.lower
|
5914
|
-
elif other is None:
|
5915
|
-
return True
|
5916
|
-
else:
|
5917
|
-
return self.upper < self._encode(other)
|
5918
|
-
|
5919
|
-
def __gt__(self, other):
|
5920
|
-
# a ShardRange is greater than other if its entire namespace is greater
|
5921
|
-
# than other; if other is another ShardRange that implies that this
|
5922
|
-
# ShardRange's lower must be less greater than or equal to the other
|
5923
|
-
# ShardRange's upper
|
5924
|
-
if self.lower == ShardRange.MIN:
|
5925
|
-
return False
|
5926
|
-
if isinstance(other, ShardRange):
|
5927
|
-
return self.lower >= other.upper
|
5928
|
-
elif other is None:
|
5929
|
-
return False
|
5930
|
-
else:
|
5931
|
-
return self.lower >= self._encode(other)
|
5932
|
-
|
5933
|
-
def __eq__(self, other):
|
5934
|
-
# test for equality of range bounds only
|
5935
|
-
if not isinstance(other, ShardRange):
|
5936
|
-
return False
|
5937
|
-
return self.lower == other.lower and self.upper == other.upper
|
5938
|
-
|
5939
5462
|
# A by-the-book implementation should probably hash the value, which
|
5940
5463
|
# in our case would be account+container+lower+upper (+timestamp ?).
|
5941
5464
|
# But we seem to be okay with just the identity.
|
5942
5465
|
def __hash__(self):
|
5943
5466
|
return id(self)
|
5944
5467
|
|
5945
|
-
def __ne__(self, other):
|
5946
|
-
return not (self == other)
|
5947
|
-
|
5948
5468
|
def __repr__(self):
|
5949
5469
|
return '%s<%r to %r as of %s, (%d, %d) as of %s, %s as of %s>' % (
|
5950
5470
|
self.__class__.__name__, self.lower, self.upper,
|
@@ -5952,34 +5472,6 @@ class ShardRange(object):
|
|
5952
5472
|
self.meta_timestamp.internal, self.state_text,
|
5953
5473
|
self.state_timestamp.internal)
|
5954
5474
|
|
5955
|
-
def entire_namespace(self):
|
5956
|
-
"""
|
5957
|
-
Returns True if the ShardRange includes the entire namespace, False
|
5958
|
-
otherwise.
|
5959
|
-
"""
|
5960
|
-
return (self.lower == ShardRange.MIN and
|
5961
|
-
self.upper == ShardRange.MAX)
|
5962
|
-
|
5963
|
-
def overlaps(self, other):
|
5964
|
-
"""
|
5965
|
-
Returns True if the ShardRange namespace overlaps with the other
|
5966
|
-
ShardRange's namespace.
|
5967
|
-
|
5968
|
-
:param other: an instance of :class:`~swift.common.utils.ShardRange`
|
5969
|
-
"""
|
5970
|
-
if not isinstance(other, ShardRange):
|
5971
|
-
return False
|
5972
|
-
return max(self.lower, other.lower) < min(self.upper, other.upper)
|
5973
|
-
|
5974
|
-
def includes(self, other):
|
5975
|
-
"""
|
5976
|
-
Returns True if this namespace includes the whole of the other
|
5977
|
-
namespace, False otherwise.
|
5978
|
-
|
5979
|
-
:param other: an instance of :class:`~swift.common.utils.ShardRange`
|
5980
|
-
"""
|
5981
|
-
return (self.lower <= other.lower) and (other.upper <= self.upper)
|
5982
|
-
|
5983
5475
|
def __iter__(self):
|
5984
5476
|
yield 'name', self.name
|
5985
5477
|
yield 'timestamp', self.timestamp.internal
|
@@ -6028,26 +5520,6 @@ class ShardRange(object):
|
|
6028
5520
|
params['state_timestamp'], params['epoch'],
|
6029
5521
|
params.get('reported', 0), params.get('tombstones', -1))
|
6030
5522
|
|
6031
|
-
def expand(self, donors):
|
6032
|
-
"""
|
6033
|
-
Expands the bounds as necessary to match the minimum and maximum bounds
|
6034
|
-
of the given donors.
|
6035
|
-
|
6036
|
-
:param donors: A list of :class:`~swift.common.utils.ShardRange`
|
6037
|
-
:return: True if the bounds have been modified, False otherwise.
|
6038
|
-
"""
|
6039
|
-
modified = False
|
6040
|
-
new_lower = self.lower
|
6041
|
-
new_upper = self.upper
|
6042
|
-
for donor in donors:
|
6043
|
-
new_lower = min(new_lower, donor.lower)
|
6044
|
-
new_upper = max(new_upper, donor.upper)
|
6045
|
-
if self.lower > new_lower or self.upper < new_upper:
|
6046
|
-
self.lower = new_lower
|
6047
|
-
self.upper = new_upper
|
6048
|
-
modified = True
|
6049
|
-
return modified
|
6050
|
-
|
6051
5523
|
|
6052
5524
|
class ShardRangeList(UserList):
|
6053
5525
|
"""
|
@@ -6057,6 +5529,7 @@ class ShardRangeList(UserList):
|
|
6057
5529
|
This class does not enforce ordering or continuity of the list items:
|
6058
5530
|
callers should ensure that items are added in order as appropriate.
|
6059
5531
|
"""
|
5532
|
+
|
6060
5533
|
def __getitem__(self, index):
|
6061
5534
|
# workaround for py3 - not needed for py2.7,py3.8
|
6062
5535
|
result = self.data[index]
|
@@ -6069,27 +5542,27 @@ class ShardRangeList(UserList):
|
|
6069
5542
|
only be equal to the lowest bound of all items in the list if the list
|
6070
5543
|
contents has been sorted.
|
6071
5544
|
|
6072
|
-
:return: lower bound of first item in the list, or
|
5545
|
+
:return: lower bound of first item in the list, or Namespace.MIN
|
6073
5546
|
if the list is empty.
|
6074
5547
|
"""
|
6075
5548
|
if not self:
|
6076
5549
|
# empty list has range MIN->MIN
|
6077
|
-
return
|
5550
|
+
return Namespace.MIN
|
6078
5551
|
return self[0].lower
|
6079
5552
|
|
6080
5553
|
@property
|
6081
5554
|
def upper(self):
|
6082
5555
|
"""
|
6083
|
-
Returns the upper bound of the
|
5556
|
+
Returns the upper bound of the last item in the list. Note: this will
|
6084
5557
|
only be equal to the uppermost bound of all items in the list if the
|
6085
5558
|
list has previously been sorted.
|
6086
5559
|
|
6087
|
-
:return: upper bound of
|
5560
|
+
:return: upper bound of last item in the list, or Namespace.MIN
|
6088
5561
|
if the list is empty.
|
6089
5562
|
"""
|
6090
5563
|
if not self:
|
6091
5564
|
# empty list has range MIN->MIN
|
6092
|
-
return
|
5565
|
+
return Namespace.MIN
|
6093
5566
|
return self[-1].upper
|
6094
5567
|
|
6095
5568
|
@property
|
@@ -6162,7 +5635,7 @@ class ShardRangeList(UserList):
|
|
6162
5635
|
containing the filtered shard ranges.
|
6163
5636
|
"""
|
6164
5637
|
return ShardRangeList(
|
6165
|
-
|
5638
|
+
filter_namespaces(self, includes, marker, end_marker))
|
6166
5639
|
|
6167
5640
|
def find_lower(self, condition):
|
6168
5641
|
"""
|
@@ -6183,44 +5656,45 @@ class ShardRangeList(UserList):
|
|
6183
5656
|
return self.upper
|
6184
5657
|
|
6185
5658
|
|
6186
|
-
def
|
5659
|
+
def find_namespace(item, namespaces):
|
6187
5660
|
"""
|
6188
|
-
Find a ShardRange in given list of ``
|
5661
|
+
Find a Namespace/ShardRange in given list of ``namespaces`` whose namespace
|
6189
5662
|
contains ``item``.
|
6190
5663
|
|
6191
|
-
:param item: The item for a which a
|
6192
|
-
:param ranges: a sorted list of
|
6193
|
-
:return: the ShardRange whose namespace contains ``item``, or
|
6194
|
-
no suitable
|
5664
|
+
:param item: The item for a which a Namespace is to be found.
|
5665
|
+
:param ranges: a sorted list of Namespaces.
|
5666
|
+
:return: the Namespace/ShardRange whose namespace contains ``item``, or
|
5667
|
+
None if no suitable Namespace is found.
|
6195
5668
|
"""
|
6196
|
-
index = bisect.bisect_left(
|
6197
|
-
if index != len(
|
6198
|
-
return
|
5669
|
+
index = bisect.bisect_left(namespaces, item)
|
5670
|
+
if index != len(namespaces) and item in namespaces[index]:
|
5671
|
+
return namespaces[index]
|
6199
5672
|
return None
|
6200
5673
|
|
6201
5674
|
|
6202
|
-
def
|
5675
|
+
def filter_namespaces(namespaces, includes, marker, end_marker):
|
6203
5676
|
"""
|
6204
|
-
Filter the given
|
6205
|
-
``includes`` name or any part of the namespace between ``marker`` and
|
5677
|
+
Filter the given Namespaces/ShardRanges to those whose namespace includes
|
5678
|
+
the ``includes`` name or any part of the namespace between ``marker`` and
|
6206
5679
|
``end_marker``. If none of ``includes``, ``marker`` or ``end_marker`` are
|
6207
|
-
specified then all
|
5680
|
+
specified then all Namespaces will be returned.
|
6208
5681
|
|
6209
|
-
:param
|
6210
|
-
|
6211
|
-
|
6212
|
-
|
5682
|
+
:param namespaces: A list of :class:`~swift.common.utils.Namespace` or
|
5683
|
+
:class:`~swift.common.utils.ShardRange`.
|
5684
|
+
:param includes: a string; if not empty then only the Namespace,
|
5685
|
+
if any, whose namespace includes this string will be returned,
|
5686
|
+
``marker`` and ``end_marker`` will be ignored.
|
6213
5687
|
:param marker: if specified then only shard ranges whose upper bound is
|
6214
5688
|
greater than this value will be returned.
|
6215
5689
|
:param end_marker: if specified then only shard ranges whose lower bound is
|
6216
5690
|
less than this value will be returned.
|
6217
|
-
:return: A filtered list of :class:`~swift.common.utils.
|
5691
|
+
:return: A filtered list of :class:`~swift.common.utils.Namespace`.
|
6218
5692
|
"""
|
6219
5693
|
if includes:
|
6220
|
-
|
6221
|
-
return [
|
5694
|
+
namespace = find_namespace(includes, namespaces)
|
5695
|
+
return [namespace] if namespace else []
|
6222
5696
|
|
6223
|
-
def
|
5697
|
+
def namespace_filter(sr):
|
6224
5698
|
end = start = True
|
6225
5699
|
if end_marker:
|
6226
5700
|
end = end_marker > sr.lower
|
@@ -6229,79 +5703,13 @@ def filter_shard_ranges(shard_ranges, includes, marker, end_marker):
|
|
6229
5703
|
return start and end
|
6230
5704
|
|
6231
5705
|
if marker or end_marker:
|
6232
|
-
return list(filter(
|
5706
|
+
return list(filter(namespace_filter, namespaces))
|
6233
5707
|
|
6234
|
-
if marker ==
|
6235
|
-
# MIN and MAX are both Falsy so not handled by
|
5708
|
+
if marker == Namespace.MAX or end_marker == Namespace.MIN:
|
5709
|
+
# MIN and MAX are both Falsy so not handled by namespace_filter
|
6236
5710
|
return []
|
6237
5711
|
|
6238
|
-
return
|
6239
|
-
|
6240
|
-
|
6241
|
-
def modify_priority(conf, logger):
|
6242
|
-
"""
|
6243
|
-
Modify priority by nice and ionice.
|
6244
|
-
"""
|
6245
|
-
|
6246
|
-
global _libc_setpriority
|
6247
|
-
if _libc_setpriority is None:
|
6248
|
-
_libc_setpriority = load_libc_function('setpriority',
|
6249
|
-
errcheck=True)
|
6250
|
-
|
6251
|
-
def _setpriority(nice_priority):
|
6252
|
-
"""
|
6253
|
-
setpriority for this pid
|
6254
|
-
|
6255
|
-
:param nice_priority: valid values are -19 to 20
|
6256
|
-
"""
|
6257
|
-
try:
|
6258
|
-
_libc_setpriority(PRIO_PROCESS, os.getpid(),
|
6259
|
-
int(nice_priority))
|
6260
|
-
except (ValueError, OSError):
|
6261
|
-
print(_("WARNING: Unable to modify scheduling priority of process."
|
6262
|
-
" Keeping unchanged! Check logs for more info. "))
|
6263
|
-
logger.exception('Unable to modify nice priority')
|
6264
|
-
else:
|
6265
|
-
logger.debug('set nice priority to %s' % nice_priority)
|
6266
|
-
|
6267
|
-
nice_priority = conf.get('nice_priority')
|
6268
|
-
if nice_priority is not None:
|
6269
|
-
_setpriority(nice_priority)
|
6270
|
-
|
6271
|
-
global _posix_syscall
|
6272
|
-
if _posix_syscall is None:
|
6273
|
-
_posix_syscall = load_libc_function('syscall', errcheck=True)
|
6274
|
-
|
6275
|
-
def _ioprio_set(io_class, io_priority):
|
6276
|
-
"""
|
6277
|
-
ioprio_set for this process
|
6278
|
-
|
6279
|
-
:param io_class: the I/O class component, can be
|
6280
|
-
IOPRIO_CLASS_RT, IOPRIO_CLASS_BE,
|
6281
|
-
or IOPRIO_CLASS_IDLE
|
6282
|
-
:param io_priority: priority value in the I/O class
|
6283
|
-
"""
|
6284
|
-
try:
|
6285
|
-
io_class = IO_CLASS_ENUM[io_class]
|
6286
|
-
io_priority = int(io_priority)
|
6287
|
-
_posix_syscall(NR_ioprio_set(),
|
6288
|
-
IOPRIO_WHO_PROCESS,
|
6289
|
-
os.getpid(),
|
6290
|
-
IOPRIO_PRIO_VALUE(io_class, io_priority))
|
6291
|
-
except (KeyError, ValueError, OSError):
|
6292
|
-
print(_("WARNING: Unable to modify I/O scheduling class "
|
6293
|
-
"and priority of process. Keeping unchanged! "
|
6294
|
-
"Check logs for more info."))
|
6295
|
-
logger.exception("Unable to modify ionice priority")
|
6296
|
-
else:
|
6297
|
-
logger.debug('set ionice class %s priority %s',
|
6298
|
-
io_class, io_priority)
|
6299
|
-
|
6300
|
-
io_class = conf.get("ionice_class")
|
6301
|
-
if io_class is None:
|
6302
|
-
return
|
6303
|
-
io_priority = conf.get("ionice_priority", 0)
|
6304
|
-
_ioprio_set(io_class, io_priority)
|
5712
|
+
return namespaces
|
6305
5713
|
|
6306
5714
|
|
6307
5715
|
def o_tmpfile_in_path_supported(dirpath):
|
@@ -6369,6 +5777,15 @@ def strict_b64decode(value, allow_line_breaks=False):
|
|
6369
5777
|
raise ValueError
|
6370
5778
|
|
6371
5779
|
|
5780
|
+
def cap_length(value, max_length):
|
5781
|
+
if value and len(value) > max_length:
|
5782
|
+
if isinstance(value, bytes):
|
5783
|
+
return value[:max_length] + b'...'
|
5784
|
+
else:
|
5785
|
+
return value[:max_length] + '...'
|
5786
|
+
return value
|
5787
|
+
|
5788
|
+
|
6372
5789
|
MD5_BLOCK_READ_BYTES = 4096
|
6373
5790
|
|
6374
5791
|
|
@@ -6590,6 +6007,7 @@ class NoopMutex(object):
|
|
6590
6007
|
of which have the message-interleaving trouble you'd expect from TCP or
|
6591
6008
|
file handlers.
|
6592
6009
|
"""
|
6010
|
+
|
6593
6011
|
def __init__(self):
|
6594
6012
|
# Usually, it's an error to have multiple greenthreads all waiting
|
6595
6013
|
# to write to the same file descriptor. It's often a sign of inadequate
|
@@ -6857,6 +6275,7 @@ class Watchdog(object):
|
|
6857
6275
|
=> the exception is raised, then the greenlet watchdog sleep(3) to
|
6858
6276
|
wake up for the 1st timeout expiration
|
6859
6277
|
"""
|
6278
|
+
|
6860
6279
|
def __init__(self):
|
6861
6280
|
# key => (timeout, timeout_at, caller_greenthread, exception)
|
6862
6281
|
self._timeouts = dict()
|
@@ -6870,14 +6289,15 @@ class Watchdog(object):
|
|
6870
6289
|
|
6871
6290
|
:param timeout: duration before the timeout expires
|
6872
6291
|
:param exc: exception to throw when the timeout expire, must inherit
|
6873
|
-
from eventlet.
|
6292
|
+
from eventlet.Timeout
|
6874
6293
|
:param timeout_at: allow to force the expiration timestamp
|
6875
6294
|
:return: id of the scheduled timeout, needed to cancel it
|
6876
6295
|
"""
|
6296
|
+
now = time.time()
|
6877
6297
|
if not timeout_at:
|
6878
|
-
timeout_at =
|
6298
|
+
timeout_at = now + timeout
|
6879
6299
|
gth = eventlet.greenthread.getcurrent()
|
6880
|
-
timeout_definition = (timeout, timeout_at, gth, exc)
|
6300
|
+
timeout_definition = (timeout, timeout_at, gth, exc, now)
|
6881
6301
|
key = id(timeout_definition)
|
6882
6302
|
self._timeouts[key] = timeout_definition
|
6883
6303
|
|
@@ -6900,8 +6320,7 @@ class Watchdog(object):
|
|
6900
6320
|
:param key: timeout id, as returned by start()
|
6901
6321
|
"""
|
6902
6322
|
try:
|
6903
|
-
|
6904
|
-
del(self._timeouts[key])
|
6323
|
+
del(self._timeouts[key])
|
6905
6324
|
except KeyError:
|
6906
6325
|
pass
|
6907
6326
|
|
@@ -6921,15 +6340,14 @@ class Watchdog(object):
|
|
6921
6340
|
self._next_expiration = None
|
6922
6341
|
if self._evt.ready():
|
6923
6342
|
self._evt.reset()
|
6924
|
-
for k, (timeout, timeout_at, gth, exc
|
6343
|
+
for k, (timeout, timeout_at, gth, exc,
|
6344
|
+
created_at) in list(self._timeouts.items()):
|
6925
6345
|
if timeout_at <= now:
|
6926
|
-
|
6927
|
-
if k in self._timeouts:
|
6928
|
-
del(self._timeouts[k])
|
6929
|
-
except KeyError:
|
6930
|
-
pass
|
6346
|
+
self.stop(k)
|
6931
6347
|
e = exc()
|
6348
|
+
# set this after __init__ to keep it off the eventlet scheduler
|
6932
6349
|
e.seconds = timeout
|
6350
|
+
e.created_at = created_at
|
6933
6351
|
eventlet.hubs.get_hub().schedule_call_global(0, gth.throw, e)
|
6934
6352
|
else:
|
6935
6353
|
if (self._next_expiration is None
|
@@ -6946,6 +6364,7 @@ class WatchdogTimeout(object):
|
|
6946
6364
|
"""
|
6947
6365
|
Context manager to schedule a timeout in a Watchdog instance
|
6948
6366
|
"""
|
6367
|
+
|
6949
6368
|
def __init__(self, watchdog, timeout, exc, timeout_at=None):
|
6950
6369
|
"""
|
6951
6370
|
Schedule a timeout in a Watchdog instance
|
@@ -6964,3 +6383,48 @@ class WatchdogTimeout(object):
|
|
6964
6383
|
|
6965
6384
|
def __exit__(self, type, value, traceback):
|
6966
6385
|
self.watchdog.stop(self.key)
|
6386
|
+
|
6387
|
+
|
6388
|
+
class CooperativeIterator(object):
|
6389
|
+
"""
|
6390
|
+
Wrapper to make a deliberate periodic call to ``sleep()`` while iterating
|
6391
|
+
over wrapped iterator, providing an opportunity to switch greenthreads.
|
6392
|
+
|
6393
|
+
This is for fairness; if the network is outpacing the CPU, we'll always be
|
6394
|
+
able to read and write data without encountering an EWOULDBLOCK, and so
|
6395
|
+
eventlet will not switch greenthreads on its own. We do it manually so that
|
6396
|
+
clients don't starve.
|
6397
|
+
|
6398
|
+
The number 5 here was chosen by making stuff up. It's not every single
|
6399
|
+
chunk, but it's not too big either, so it seemed like it would probably be
|
6400
|
+
an okay choice.
|
6401
|
+
|
6402
|
+
Note that we may trampoline to other greenthreads more often than once
|
6403
|
+
every 5 chunks, depending on how blocking our network IO is; the explicit
|
6404
|
+
sleep here simply provides a lower bound on the rate of trampolining.
|
6405
|
+
|
6406
|
+
:param iterable: iterator to wrap.
|
6407
|
+
:param period: number of items yielded from this iterator between calls to
|
6408
|
+
``sleep()``.
|
6409
|
+
"""
|
6410
|
+
__slots__ = ('period', 'count', 'wrapped_iter')
|
6411
|
+
|
6412
|
+
def __init__(self, iterable, period=5):
|
6413
|
+
self.wrapped_iter = iterable
|
6414
|
+
self.count = 0
|
6415
|
+
self.period = period
|
6416
|
+
|
6417
|
+
def __iter__(self):
|
6418
|
+
return self
|
6419
|
+
|
6420
|
+
def next(self):
|
6421
|
+
if self.count >= self.period:
|
6422
|
+
self.count = 0
|
6423
|
+
sleep()
|
6424
|
+
self.count += 1
|
6425
|
+
return next(self.wrapped_iter)
|
6426
|
+
|
6427
|
+
__next__ = next
|
6428
|
+
|
6429
|
+
def close(self):
|
6430
|
+
close_if_possible(self.wrapped_iter)
|