swift 2.33.0__py2.py3-none-any.whl → 2.34.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/account/auditor.py +11 -0
- swift/account/reaper.py +11 -1
- swift/account/replicator.py +22 -0
- swift/account/server.py +12 -1
- swift-2.33.0.data/scripts/swift-account-audit → swift/cli/account_audit.py +6 -2
- swift-2.33.0.data/scripts/swift-config → swift/cli/config.py +1 -1
- swift-2.33.0.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +6 -2
- swift-2.33.0.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +12 -3
- swift-2.33.0.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +6 -2
- swift/cli/info.py +103 -2
- swift-2.33.0.data/scripts/swift-oldies → swift/cli/oldies.py +6 -3
- swift-2.33.0.data/scripts/swift-orphans → swift/cli/orphans.py +7 -2
- swift/cli/recon_cron.py +5 -5
- swift-2.33.0.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
- swift/cli/relinker.py +1 -1
- swift/cli/ringbuilder.py +24 -0
- swift/common/db.py +2 -1
- swift/common/db_auditor.py +2 -2
- swift/common/db_replicator.py +6 -0
- swift/common/exceptions.py +12 -0
- swift/common/manager.py +102 -0
- swift/common/memcached.py +6 -13
- swift/common/middleware/account_quotas.py +144 -43
- swift/common/middleware/backend_ratelimit.py +166 -24
- swift/common/middleware/catch_errors.py +1 -3
- swift/common/middleware/cname_lookup.py +3 -5
- swift/common/middleware/container_sync.py +6 -10
- swift/common/middleware/crypto/crypto_utils.py +4 -5
- swift/common/middleware/crypto/decrypter.py +4 -5
- swift/common/middleware/crypto/kms_keymaster.py +2 -1
- swift/common/middleware/proxy_logging.py +22 -16
- swift/common/middleware/ratelimit.py +6 -7
- swift/common/middleware/recon.py +6 -7
- swift/common/middleware/s3api/acl_handlers.py +9 -0
- swift/common/middleware/s3api/controllers/multi_upload.py +1 -9
- swift/common/middleware/s3api/controllers/obj.py +20 -1
- swift/common/middleware/s3api/s3api.py +2 -0
- swift/common/middleware/s3api/s3request.py +171 -62
- swift/common/middleware/s3api/s3response.py +35 -6
- swift/common/middleware/s3api/s3token.py +2 -2
- swift/common/middleware/s3api/utils.py +1 -0
- swift/common/middleware/slo.py +153 -52
- swift/common/middleware/tempauth.py +6 -4
- swift/common/middleware/tempurl.py +2 -2
- swift/common/middleware/x_profile/exceptions.py +1 -4
- swift/common/middleware/x_profile/html_viewer.py +9 -10
- swift/common/middleware/x_profile/profile_model.py +1 -2
- swift/common/middleware/xprofile.py +1 -2
- swift/common/request_helpers.py +69 -0
- swift/common/statsd_client.py +207 -0
- swift/common/utils/__init__.py +97 -1635
- swift/common/utils/base.py +138 -0
- swift/common/utils/config.py +443 -0
- swift/common/utils/logs.py +999 -0
- swift/common/wsgi.py +11 -3
- swift/container/auditor.py +11 -0
- swift/container/backend.py +10 -10
- swift/container/reconciler.py +11 -2
- swift/container/replicator.py +22 -1
- swift/container/server.py +12 -1
- swift/container/sharder.py +36 -12
- swift/container/sync.py +11 -1
- swift/container/updater.py +11 -2
- swift/obj/auditor.py +18 -2
- swift/obj/diskfile.py +8 -6
- swift/obj/expirer.py +155 -36
- swift/obj/reconstructor.py +28 -4
- swift/obj/replicator.py +61 -22
- swift/obj/server.py +64 -36
- swift/obj/updater.py +11 -2
- swift/proxy/controllers/base.py +38 -22
- swift/proxy/controllers/obj.py +23 -26
- swift/proxy/server.py +15 -1
- {swift-2.33.0.dist-info → swift-2.34.0.dist-info}/AUTHORS +11 -3
- {swift-2.33.0.dist-info → swift-2.34.0.dist-info}/METADATA +6 -5
- {swift-2.33.0.dist-info → swift-2.34.0.dist-info}/RECORD +81 -107
- {swift-2.33.0.dist-info → swift-2.34.0.dist-info}/entry_points.txt +38 -0
- swift-2.34.0.dist-info/pbr.json +1 -0
- swift-2.33.0.data/scripts/swift-account-auditor +0 -23
- swift-2.33.0.data/scripts/swift-account-info +0 -52
- swift-2.33.0.data/scripts/swift-account-reaper +0 -23
- swift-2.33.0.data/scripts/swift-account-replicator +0 -34
- swift-2.33.0.data/scripts/swift-account-server +0 -23
- swift-2.33.0.data/scripts/swift-container-auditor +0 -23
- swift-2.33.0.data/scripts/swift-container-info +0 -59
- swift-2.33.0.data/scripts/swift-container-reconciler +0 -21
- swift-2.33.0.data/scripts/swift-container-replicator +0 -34
- swift-2.33.0.data/scripts/swift-container-server +0 -23
- swift-2.33.0.data/scripts/swift-container-sharder +0 -37
- swift-2.33.0.data/scripts/swift-container-sync +0 -23
- swift-2.33.0.data/scripts/swift-container-updater +0 -23
- swift-2.33.0.data/scripts/swift-dispersion-report +0 -24
- swift-2.33.0.data/scripts/swift-form-signature +0 -20
- swift-2.33.0.data/scripts/swift-init +0 -119
- swift-2.33.0.data/scripts/swift-object-auditor +0 -29
- swift-2.33.0.data/scripts/swift-object-expirer +0 -33
- swift-2.33.0.data/scripts/swift-object-info +0 -60
- swift-2.33.0.data/scripts/swift-object-reconstructor +0 -33
- swift-2.33.0.data/scripts/swift-object-relinker +0 -23
- swift-2.33.0.data/scripts/swift-object-replicator +0 -37
- swift-2.33.0.data/scripts/swift-object-server +0 -27
- swift-2.33.0.data/scripts/swift-object-updater +0 -23
- swift-2.33.0.data/scripts/swift-proxy-server +0 -23
- swift-2.33.0.data/scripts/swift-recon +0 -24
- swift-2.33.0.data/scripts/swift-recon-cron +0 -24
- swift-2.33.0.data/scripts/swift-ring-builder +0 -37
- swift-2.33.0.data/scripts/swift-ring-builder-analyzer +0 -22
- swift-2.33.0.data/scripts/swift-ring-composer +0 -22
- swift-2.33.0.dist-info/pbr.json +0 -1
- {swift-2.33.0.dist-info → swift-2.34.0.dist-info}/LICENSE +0 -0
- {swift-2.33.0.dist-info → swift-2.34.0.dist-info}/WHEEL +0 -0
- {swift-2.33.0.dist-info → swift-2.34.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,999 @@
|
|
1
|
+
# Copyright (c) 2010-2012 OpenStack Foundation
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
12
|
+
# implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
|
16
|
+
from __future__ import print_function
|
17
|
+
|
18
|
+
import errno
|
19
|
+
import hashlib
|
20
|
+
import logging
|
21
|
+
from logging.handlers import SysLogHandler
|
22
|
+
import os
|
23
|
+
import socket
|
24
|
+
import stat
|
25
|
+
import string
|
26
|
+
import sys
|
27
|
+
import functools
|
28
|
+
import time
|
29
|
+
import warnings
|
30
|
+
import fcntl
|
31
|
+
import eventlet
|
32
|
+
import six
|
33
|
+
import datetime
|
34
|
+
|
35
|
+
from swift.common.utils.base import md5, quote, split_path
|
36
|
+
from swift.common.utils.timestamp import UTC
|
37
|
+
from swift.common.utils.config import config_true_value
|
38
|
+
from swift.common import statsd_client
|
39
|
+
# common.utils imports a fully qualified common.exceptions so that
|
40
|
+
# common.exceptions can import common.utils with out a circular import error
|
41
|
+
# (if we only make reference to attributes of a module w/i our function/method
|
42
|
+
# bodies fully qualifed module names can have their attributes lazily
|
43
|
+
# evaluated); as the only other module with-in utils that imports exceptions:
|
44
|
+
# we do the same here
|
45
|
+
import swift.common.exceptions
|
46
|
+
|
47
|
+
if six.PY2:
|
48
|
+
from eventlet.green import httplib as green_http_client
|
49
|
+
else:
|
50
|
+
from eventlet.green.http import client as green_http_client
|
51
|
+
from six.moves import http_client
|
52
|
+
from eventlet.green import threading
|
53
|
+
|
54
|
+
|
55
|
+
NOTICE = 25
|
56
|
+
|
57
|
+
LOG_LINE_DEFAULT_FORMAT = '{remote_addr} - - [{time.d}/{time.b}/{time.Y}' \
|
58
|
+
':{time.H}:{time.M}:{time.S} +0000] ' \
|
59
|
+
'"{method} {path}" {status} {content_length} ' \
|
60
|
+
'"{referer}" "{txn_id}" "{user_agent}" ' \
|
61
|
+
'{trans_time:.4f} "{additional_info}" {pid} ' \
|
62
|
+
'{policy_index}'
|
63
|
+
|
64
|
+
|
65
|
+
def logging_monkey_patch():
|
66
|
+
# explicitly patch the logging lock
|
67
|
+
logging._lock = logging.threading.RLock()
|
68
|
+
# setup notice level logging
|
69
|
+
logging.addLevelName(NOTICE, 'NOTICE')
|
70
|
+
SysLogHandler.priority_map['NOTICE'] = 'notice'
|
71
|
+
# Trying to log threads while monkey-patched can lead to deadlocks; see
|
72
|
+
# https://bugs.launchpad.net/swift/+bug/1895739
|
73
|
+
logging.logThreads = 0
|
74
|
+
|
75
|
+
|
76
|
+
class PipeMutex(object):
|
77
|
+
"""
|
78
|
+
Mutex using a pipe. Works across both greenlets and real threads, even
|
79
|
+
at the same time.
|
80
|
+
"""
|
81
|
+
|
82
|
+
def __init__(self):
|
83
|
+
self.rfd, self.wfd = os.pipe()
|
84
|
+
|
85
|
+
# You can't create a pipe in non-blocking mode; you must set it
|
86
|
+
# later.
|
87
|
+
rflags = fcntl.fcntl(self.rfd, fcntl.F_GETFL)
|
88
|
+
fcntl.fcntl(self.rfd, fcntl.F_SETFL, rflags | os.O_NONBLOCK)
|
89
|
+
os.write(self.wfd, b'-') # start unlocked
|
90
|
+
|
91
|
+
self.owner = None
|
92
|
+
self.recursion_depth = 0
|
93
|
+
|
94
|
+
# Usually, it's an error to have multiple greenthreads all waiting
|
95
|
+
# to read the same file descriptor. It's often a sign of inadequate
|
96
|
+
# concurrency control; for example, if you have two greenthreads
|
97
|
+
# trying to use the same memcache connection, they'll end up writing
|
98
|
+
# interleaved garbage to the socket or stealing part of each others'
|
99
|
+
# responses.
|
100
|
+
#
|
101
|
+
# In this case, we have multiple greenthreads waiting on the same
|
102
|
+
# file descriptor by design. This lets greenthreads in real thread A
|
103
|
+
# wait with greenthreads in real thread B for the same mutex.
|
104
|
+
# Therefore, we must turn off eventlet's multiple-reader detection.
|
105
|
+
#
|
106
|
+
# It would be better to turn off multiple-reader detection for only
|
107
|
+
# our calls to trampoline(), but eventlet does not support that.
|
108
|
+
eventlet.debug.hub_prevent_multiple_readers(False)
|
109
|
+
|
110
|
+
def acquire(self, blocking=True):
|
111
|
+
"""
|
112
|
+
Acquire the mutex.
|
113
|
+
|
114
|
+
If called with blocking=False, returns True if the mutex was
|
115
|
+
acquired and False if it wasn't. Otherwise, blocks until the mutex
|
116
|
+
is acquired and returns True.
|
117
|
+
|
118
|
+
This lock is recursive; the same greenthread may acquire it as many
|
119
|
+
times as it wants to, though it must then release it that many times
|
120
|
+
too.
|
121
|
+
"""
|
122
|
+
current_greenthread_id = id(eventlet.greenthread.getcurrent())
|
123
|
+
if self.owner == current_greenthread_id:
|
124
|
+
self.recursion_depth += 1
|
125
|
+
return True
|
126
|
+
|
127
|
+
while True:
|
128
|
+
try:
|
129
|
+
# If there is a byte available, this will read it and remove
|
130
|
+
# it from the pipe. If not, this will raise OSError with
|
131
|
+
# errno=EAGAIN.
|
132
|
+
os.read(self.rfd, 1)
|
133
|
+
self.owner = current_greenthread_id
|
134
|
+
return True
|
135
|
+
except OSError as err:
|
136
|
+
if err.errno != errno.EAGAIN:
|
137
|
+
raise
|
138
|
+
|
139
|
+
if not blocking:
|
140
|
+
return False
|
141
|
+
|
142
|
+
# Tell eventlet to suspend the current greenthread until
|
143
|
+
# self.rfd becomes readable. This will happen when someone
|
144
|
+
# else writes to self.wfd.
|
145
|
+
eventlet.hubs.trampoline(self.rfd, read=True)
|
146
|
+
|
147
|
+
def release(self):
|
148
|
+
"""
|
149
|
+
Release the mutex.
|
150
|
+
"""
|
151
|
+
current_greenthread_id = id(eventlet.greenthread.getcurrent())
|
152
|
+
if self.owner != current_greenthread_id:
|
153
|
+
raise RuntimeError("cannot release un-acquired lock")
|
154
|
+
|
155
|
+
if self.recursion_depth > 0:
|
156
|
+
self.recursion_depth -= 1
|
157
|
+
return
|
158
|
+
|
159
|
+
self.owner = None
|
160
|
+
os.write(self.wfd, b'X')
|
161
|
+
|
162
|
+
def close(self):
|
163
|
+
"""
|
164
|
+
Close the mutex. This releases its file descriptors.
|
165
|
+
|
166
|
+
You can't use a mutex after it's been closed.
|
167
|
+
"""
|
168
|
+
if self.wfd is not None:
|
169
|
+
os.close(self.rfd)
|
170
|
+
self.rfd = None
|
171
|
+
os.close(self.wfd)
|
172
|
+
self.wfd = None
|
173
|
+
self.owner = None
|
174
|
+
self.recursion_depth = 0
|
175
|
+
|
176
|
+
def __del__(self):
|
177
|
+
# We need this so we don't leak file descriptors. Otherwise, if you
|
178
|
+
# call get_logger() and don't explicitly dispose of it by calling
|
179
|
+
# logger.logger.handlers[0].lock.close() [1], the pipe file
|
180
|
+
# descriptors are leaked.
|
181
|
+
#
|
182
|
+
# This only really comes up in tests. Swift processes tend to call
|
183
|
+
# get_logger() once and then hang on to it until they exit, but the
|
184
|
+
# test suite calls get_logger() a lot.
|
185
|
+
#
|
186
|
+
# [1] and that's a completely ridiculous thing to expect callers to
|
187
|
+
# do, so nobody does it and that's okay.
|
188
|
+
self.close()
|
189
|
+
|
190
|
+
def __enter__(self):
|
191
|
+
self.acquire()
|
192
|
+
return self
|
193
|
+
|
194
|
+
def __exit__(self, *args):
|
195
|
+
self.release()
|
196
|
+
|
197
|
+
|
198
|
+
class NoopMutex(object):
|
199
|
+
"""
|
200
|
+
"Mutex" that doesn't lock anything.
|
201
|
+
|
202
|
+
We only allow our syslog logging to be configured via UDS or UDP, neither
|
203
|
+
of which have the message-interleaving trouble you'd expect from TCP or
|
204
|
+
file handlers.
|
205
|
+
"""
|
206
|
+
|
207
|
+
def __init__(self):
|
208
|
+
# Usually, it's an error to have multiple greenthreads all waiting
|
209
|
+
# to write to the same file descriptor. It's often a sign of inadequate
|
210
|
+
# concurrency control; for example, if you have two greenthreads
|
211
|
+
# trying to use the same memcache connection, they'll end up writing
|
212
|
+
# interleaved garbage to the socket or stealing part of each others'
|
213
|
+
# responses.
|
214
|
+
#
|
215
|
+
# In this case, we have multiple greenthreads waiting on the same
|
216
|
+
# (logging) file descriptor by design. So, similar to the PipeMutex,
|
217
|
+
# we must turn off eventlet's multiple-waiter detection.
|
218
|
+
#
|
219
|
+
# It would be better to turn off multiple-reader detection for only
|
220
|
+
# the logging socket fd, but eventlet does not support that.
|
221
|
+
eventlet.debug.hub_prevent_multiple_readers(False)
|
222
|
+
|
223
|
+
def acquire(self, blocking=True):
|
224
|
+
pass
|
225
|
+
|
226
|
+
def release(self):
|
227
|
+
pass
|
228
|
+
|
229
|
+
def __enter__(self):
|
230
|
+
return self
|
231
|
+
|
232
|
+
def __exit__(self, *args):
|
233
|
+
pass
|
234
|
+
|
235
|
+
|
236
|
+
class ThreadSafeSysLogHandler(SysLogHandler):
|
237
|
+
def createLock(self):
|
238
|
+
if config_true_value(os.environ.get(
|
239
|
+
'SWIFT_NOOP_LOGGING_MUTEX') or 'true'):
|
240
|
+
self.lock = NoopMutex()
|
241
|
+
else:
|
242
|
+
self.lock = PipeMutex()
|
243
|
+
|
244
|
+
|
245
|
+
# double inheritance to support property with setter
|
246
|
+
class LogAdapter(logging.LoggerAdapter, object):
|
247
|
+
"""
|
248
|
+
A Logger like object which performs some reformatting on calls to
|
249
|
+
:meth:`exception`. Can be used to store a threadlocal transaction id and
|
250
|
+
client ip.
|
251
|
+
"""
|
252
|
+
|
253
|
+
_cls_thread_local = threading.local()
|
254
|
+
|
255
|
+
def __init__(self, logger, server):
|
256
|
+
logging.LoggerAdapter.__init__(self, logger, {})
|
257
|
+
self.server = server
|
258
|
+
self.warn = self.warning
|
259
|
+
|
260
|
+
# There are a few properties needed for py35; see
|
261
|
+
# - https://bugs.python.org/issue31457
|
262
|
+
# - https://github.com/python/cpython/commit/1bbd482
|
263
|
+
# - https://github.com/python/cpython/commit/0b6a118
|
264
|
+
# - https://github.com/python/cpython/commit/ce9e625
|
265
|
+
def _log(self, level, msg, args, exc_info=None, extra=None,
|
266
|
+
stack_info=False):
|
267
|
+
"""
|
268
|
+
Low-level log implementation, proxied to allow nested logger adapters.
|
269
|
+
"""
|
270
|
+
return self.logger._log(
|
271
|
+
level,
|
272
|
+
msg,
|
273
|
+
args,
|
274
|
+
exc_info=exc_info,
|
275
|
+
extra=extra,
|
276
|
+
stack_info=stack_info,
|
277
|
+
)
|
278
|
+
|
279
|
+
@property
|
280
|
+
def manager(self):
|
281
|
+
return self.logger.manager
|
282
|
+
|
283
|
+
@manager.setter
|
284
|
+
def manager(self, value):
|
285
|
+
self.logger.manager = value
|
286
|
+
|
287
|
+
@property
|
288
|
+
def name(self):
|
289
|
+
return self.logger.name
|
290
|
+
|
291
|
+
@property
|
292
|
+
def txn_id(self):
|
293
|
+
if hasattr(self._cls_thread_local, 'txn_id'):
|
294
|
+
return self._cls_thread_local.txn_id
|
295
|
+
|
296
|
+
@txn_id.setter
|
297
|
+
def txn_id(self, value):
|
298
|
+
self._cls_thread_local.txn_id = value
|
299
|
+
|
300
|
+
@property
|
301
|
+
def client_ip(self):
|
302
|
+
if hasattr(self._cls_thread_local, 'client_ip'):
|
303
|
+
return self._cls_thread_local.client_ip
|
304
|
+
|
305
|
+
@client_ip.setter
|
306
|
+
def client_ip(self, value):
|
307
|
+
self._cls_thread_local.client_ip = value
|
308
|
+
|
309
|
+
@property
|
310
|
+
def thread_locals(self):
|
311
|
+
return (self.txn_id, self.client_ip)
|
312
|
+
|
313
|
+
@thread_locals.setter
|
314
|
+
def thread_locals(self, value):
|
315
|
+
self.txn_id, self.client_ip = value
|
316
|
+
|
317
|
+
def getEffectiveLevel(self):
|
318
|
+
return self.logger.getEffectiveLevel()
|
319
|
+
|
320
|
+
def process(self, msg, kwargs):
|
321
|
+
"""
|
322
|
+
Add extra info to message
|
323
|
+
"""
|
324
|
+
kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id,
|
325
|
+
'client_ip': self.client_ip}
|
326
|
+
return msg, kwargs
|
327
|
+
|
328
|
+
def notice(self, msg, *args, **kwargs):
|
329
|
+
"""
|
330
|
+
Convenience function for syslog priority LOG_NOTICE. The python
|
331
|
+
logging lvl is set to 25, just above info. SysLogHandler is
|
332
|
+
monkey patched to map this log lvl to the LOG_NOTICE syslog
|
333
|
+
priority.
|
334
|
+
"""
|
335
|
+
self.log(NOTICE, msg, *args, **kwargs)
|
336
|
+
|
337
|
+
def _exception(self, msg, *args, **kwargs):
|
338
|
+
logging.LoggerAdapter.exception(self, msg, *args, **kwargs)
|
339
|
+
|
340
|
+
def exception(self, msg, *args, **kwargs):
|
341
|
+
_junk, exc, _junk = sys.exc_info()
|
342
|
+
call = self.error
|
343
|
+
emsg = ''
|
344
|
+
if isinstance(exc, (http_client.BadStatusLine,
|
345
|
+
green_http_client.BadStatusLine)):
|
346
|
+
# Use error(); not really exceptional
|
347
|
+
emsg = repr(exc)
|
348
|
+
# Note that on py3, we've seen a RemoteDisconnected error getting
|
349
|
+
# raised, which inherits from *both* BadStatusLine and OSError;
|
350
|
+
# we want it getting caught here
|
351
|
+
elif isinstance(exc, (OSError, socket.error)):
|
352
|
+
if exc.errno in (errno.EIO, errno.ENOSPC):
|
353
|
+
emsg = str(exc)
|
354
|
+
elif exc.errno == errno.ECONNREFUSED:
|
355
|
+
emsg = 'Connection refused'
|
356
|
+
elif exc.errno == errno.ECONNRESET:
|
357
|
+
emsg = 'Connection reset'
|
358
|
+
elif exc.errno == errno.EHOSTUNREACH:
|
359
|
+
emsg = 'Host unreachable'
|
360
|
+
elif exc.errno == errno.ENETUNREACH:
|
361
|
+
emsg = 'Network unreachable'
|
362
|
+
elif exc.errno == errno.ETIMEDOUT:
|
363
|
+
emsg = 'Connection timeout'
|
364
|
+
elif exc.errno == errno.EPIPE:
|
365
|
+
emsg = 'Broken pipe'
|
366
|
+
else:
|
367
|
+
call = self._exception
|
368
|
+
elif isinstance(exc, eventlet.Timeout):
|
369
|
+
emsg = exc.__class__.__name__
|
370
|
+
detail = '%ss' % exc.seconds
|
371
|
+
if hasattr(exc, 'created_at'):
|
372
|
+
detail += ' after %0.2fs' % (time.time() - exc.created_at)
|
373
|
+
emsg += ' (%s)' % detail
|
374
|
+
if isinstance(exc, swift.common.exceptions.MessageTimeout):
|
375
|
+
if exc.msg:
|
376
|
+
emsg += ' %s' % exc.msg
|
377
|
+
else:
|
378
|
+
call = self._exception
|
379
|
+
call('%s: %s' % (msg, emsg), *args, **kwargs)
|
380
|
+
|
381
|
+
def set_statsd_prefix(self, prefix):
|
382
|
+
"""
|
383
|
+
This method is deprecated. Callers should use the
|
384
|
+
``statsd_tail_prefix`` argument of ``get_logger`` when instantiating a
|
385
|
+
logger.
|
386
|
+
|
387
|
+
The StatsD client prefix defaults to the "name" of the logger. This
|
388
|
+
method may override that default with a specific value. Currently used
|
389
|
+
in the proxy-server to differentiate the Account, Container, and Object
|
390
|
+
controllers.
|
391
|
+
"""
|
392
|
+
warnings.warn(
|
393
|
+
'set_statsd_prefix() is deprecated; use the '
|
394
|
+
'``statsd_tail_prefix`` argument to ``get_logger`` instead.',
|
395
|
+
DeprecationWarning, stacklevel=2
|
396
|
+
)
|
397
|
+
if self.logger.statsd_client:
|
398
|
+
self.logger.statsd_client._set_prefix(prefix)
|
399
|
+
|
400
|
+
def statsd_delegate(statsd_func_name):
|
401
|
+
"""
|
402
|
+
Factory to create methods which delegate to methods on
|
403
|
+
self.logger.statsd_client (an instance of StatsdClient). The
|
404
|
+
created methods conditionally delegate to a method whose name is given
|
405
|
+
in 'statsd_func_name'. The created delegate methods are a no-op when
|
406
|
+
StatsD logging is not configured.
|
407
|
+
|
408
|
+
:param statsd_func_name: the name of a method on StatsdClient.
|
409
|
+
"""
|
410
|
+
func = getattr(statsd_client.StatsdClient, statsd_func_name)
|
411
|
+
|
412
|
+
@functools.wraps(func)
|
413
|
+
def wrapped(self, *a, **kw):
|
414
|
+
if getattr(self.logger, 'statsd_client'):
|
415
|
+
func = getattr(self.logger.statsd_client, statsd_func_name)
|
416
|
+
return func(*a, **kw)
|
417
|
+
return wrapped
|
418
|
+
|
419
|
+
update_stats = statsd_delegate('update_stats')
|
420
|
+
increment = statsd_delegate('increment')
|
421
|
+
decrement = statsd_delegate('decrement')
|
422
|
+
timing = statsd_delegate('timing')
|
423
|
+
timing_since = statsd_delegate('timing_since')
|
424
|
+
transfer_rate = statsd_delegate('transfer_rate')
|
425
|
+
|
426
|
+
|
427
|
+
class SwiftLogFormatter(logging.Formatter):
|
428
|
+
"""
|
429
|
+
Custom logging.Formatter will append txn_id to a log message if the
|
430
|
+
record has one and the message does not. Optionally it can shorten
|
431
|
+
overly long log lines.
|
432
|
+
"""
|
433
|
+
|
434
|
+
def __init__(self, fmt=None, datefmt=None, max_line_length=0):
|
435
|
+
logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)
|
436
|
+
self.max_line_length = max_line_length
|
437
|
+
|
438
|
+
def format(self, record):
|
439
|
+
if not hasattr(record, 'server'):
|
440
|
+
# Catch log messages that were not initiated by swift
|
441
|
+
# (for example, the keystone auth middleware)
|
442
|
+
record.server = record.name
|
443
|
+
|
444
|
+
# Included from Python's logging.Formatter and then altered slightly to
|
445
|
+
# replace \n with #012
|
446
|
+
record.message = record.getMessage()
|
447
|
+
if self._fmt.find('%(asctime)') >= 0:
|
448
|
+
record.asctime = self.formatTime(record, self.datefmt)
|
449
|
+
msg = (self._fmt % record.__dict__).replace('\n', '#012')
|
450
|
+
if record.exc_info:
|
451
|
+
# Cache the traceback text to avoid converting it multiple times
|
452
|
+
# (it's constant anyway)
|
453
|
+
if not record.exc_text:
|
454
|
+
record.exc_text = self.formatException(
|
455
|
+
record.exc_info).replace('\n', '#012')
|
456
|
+
if record.exc_text:
|
457
|
+
if not msg.endswith('#012'):
|
458
|
+
msg = msg + '#012'
|
459
|
+
msg = msg + record.exc_text
|
460
|
+
|
461
|
+
if (hasattr(record, 'txn_id') and record.txn_id and
|
462
|
+
record.txn_id not in msg):
|
463
|
+
msg = "%s (txn: %s)" % (msg, record.txn_id)
|
464
|
+
if (hasattr(record, 'client_ip') and record.client_ip and
|
465
|
+
record.levelno != logging.INFO and
|
466
|
+
record.client_ip not in msg):
|
467
|
+
msg = "%s (client_ip: %s)" % (msg, record.client_ip)
|
468
|
+
if self.max_line_length > 0 and len(msg) > self.max_line_length:
|
469
|
+
if self.max_line_length < 7:
|
470
|
+
msg = msg[:self.max_line_length]
|
471
|
+
else:
|
472
|
+
approxhalf = (self.max_line_length - 5) // 2
|
473
|
+
msg = msg[:approxhalf] + " ... " + msg[-approxhalf:]
|
474
|
+
return msg
|
475
|
+
|
476
|
+
|
477
|
+
class LoggerFileObject(object):
|
478
|
+
|
479
|
+
# Note: this is greenthread-local storage
|
480
|
+
_cls_thread_local = threading.local()
|
481
|
+
|
482
|
+
def __init__(self, logger, log_type='STDOUT'):
|
483
|
+
self.logger = logger
|
484
|
+
self.log_type = log_type
|
485
|
+
|
486
|
+
def write(self, value):
|
487
|
+
# We can get into a nasty situation when logs are going to syslog
|
488
|
+
# and syslog dies.
|
489
|
+
#
|
490
|
+
# It's something like this:
|
491
|
+
#
|
492
|
+
# (A) someone logs something
|
493
|
+
#
|
494
|
+
# (B) there's an exception in sending to /dev/log since syslog is
|
495
|
+
# not working
|
496
|
+
#
|
497
|
+
# (C) logging takes that exception and writes it to stderr (see
|
498
|
+
# logging.Handler.handleError)
|
499
|
+
#
|
500
|
+
# (D) stderr was replaced with a LoggerFileObject at process start,
|
501
|
+
# so the LoggerFileObject takes the provided string and tells
|
502
|
+
# its logger to log it (to syslog, naturally).
|
503
|
+
#
|
504
|
+
# Then, steps B through D repeat until we run out of stack.
|
505
|
+
if getattr(self._cls_thread_local, 'already_called_write', False):
|
506
|
+
return
|
507
|
+
|
508
|
+
self._cls_thread_local.already_called_write = True
|
509
|
+
try:
|
510
|
+
value = value.strip()
|
511
|
+
if value:
|
512
|
+
if 'Connection reset by peer' in value:
|
513
|
+
self.logger.error(
|
514
|
+
'%s: Connection reset by peer', self.log_type)
|
515
|
+
else:
|
516
|
+
self.logger.error('%(type)s: %(value)s',
|
517
|
+
{'type': self.log_type, 'value': value})
|
518
|
+
finally:
|
519
|
+
self._cls_thread_local.already_called_write = False
|
520
|
+
|
521
|
+
def writelines(self, values):
|
522
|
+
if getattr(self._cls_thread_local, 'already_called_writelines', False):
|
523
|
+
return
|
524
|
+
|
525
|
+
self._cls_thread_local.already_called_writelines = True
|
526
|
+
try:
|
527
|
+
self.logger.error('%(type)s: %(value)s',
|
528
|
+
{'type': self.log_type,
|
529
|
+
'value': '#012'.join(values)})
|
530
|
+
finally:
|
531
|
+
self._cls_thread_local.already_called_writelines = False
|
532
|
+
|
533
|
+
def close(self):
|
534
|
+
pass
|
535
|
+
|
536
|
+
def flush(self):
|
537
|
+
pass
|
538
|
+
|
539
|
+
def __iter__(self):
|
540
|
+
return self
|
541
|
+
|
542
|
+
def next(self):
|
543
|
+
raise IOError(errno.EBADF, 'Bad file descriptor')
|
544
|
+
__next__ = next
|
545
|
+
|
546
|
+
def read(self, size=-1):
|
547
|
+
raise IOError(errno.EBADF, 'Bad file descriptor')
|
548
|
+
|
549
|
+
def readline(self, size=-1):
|
550
|
+
raise IOError(errno.EBADF, 'Bad file descriptor')
|
551
|
+
|
552
|
+
def tell(self):
|
553
|
+
return 0
|
554
|
+
|
555
|
+
def xreadlines(self):
|
556
|
+
return self
|
557
|
+
|
558
|
+
|
559
|
+
class SwiftLoggerAdapter(logging.LoggerAdapter):
|
560
|
+
"""
|
561
|
+
A logging.LoggerAdapter subclass that also passes through StatsD method
|
562
|
+
calls.
|
563
|
+
|
564
|
+
Like logging.LoggerAdapter, you have to subclass this and override the
|
565
|
+
process() method to accomplish anything useful.
|
566
|
+
"""
|
567
|
+
|
568
|
+
@property
|
569
|
+
def name(self):
|
570
|
+
# py3 does this for us already; add it for py2
|
571
|
+
return self.logger.name
|
572
|
+
|
573
|
+
def update_stats(self, *a, **kw):
|
574
|
+
return self.logger.update_stats(*a, **kw)
|
575
|
+
|
576
|
+
def increment(self, *a, **kw):
|
577
|
+
return self.logger.increment(*a, **kw)
|
578
|
+
|
579
|
+
def decrement(self, *a, **kw):
|
580
|
+
return self.logger.decrement(*a, **kw)
|
581
|
+
|
582
|
+
def timing(self, *a, **kw):
|
583
|
+
return self.logger.timing(*a, **kw)
|
584
|
+
|
585
|
+
def timing_since(self, *a, **kw):
|
586
|
+
return self.logger.timing_since(*a, **kw)
|
587
|
+
|
588
|
+
def transfer_rate(self, *a, **kw):
|
589
|
+
return self.logger.transfer_rate(*a, **kw)
|
590
|
+
|
591
|
+
@property
|
592
|
+
def thread_locals(self):
|
593
|
+
return self.logger.thread_locals
|
594
|
+
|
595
|
+
@thread_locals.setter
|
596
|
+
def thread_locals(self, thread_locals):
|
597
|
+
self.logger.thread_locals = thread_locals
|
598
|
+
|
599
|
+
def exception(self, msg, *a, **kw):
|
600
|
+
# We up-call to exception() where stdlib uses error() so we can get
|
601
|
+
# some of the traceback suppression from LogAdapter, below
|
602
|
+
self.logger.exception(msg, *a, **kw)
|
603
|
+
|
604
|
+
|
605
|
+
class PrefixLoggerAdapter(SwiftLoggerAdapter):
|
606
|
+
"""
|
607
|
+
Adds an optional prefix to all its log messages. When the prefix has not
|
608
|
+
been set, messages are unchanged.
|
609
|
+
"""
|
610
|
+
|
611
|
+
def set_prefix(self, prefix):
|
612
|
+
self.extra['prefix'] = prefix
|
613
|
+
|
614
|
+
def exception(self, msg, *a, **kw):
|
615
|
+
if 'prefix' in self.extra:
|
616
|
+
msg = self.extra['prefix'] + msg
|
617
|
+
super(PrefixLoggerAdapter, self).exception(msg, *a, **kw)
|
618
|
+
|
619
|
+
def process(self, msg, kwargs):
|
620
|
+
msg, kwargs = super(PrefixLoggerAdapter, self).process(msg, kwargs)
|
621
|
+
if 'prefix' in self.extra:
|
622
|
+
msg = self.extra['prefix'] + msg
|
623
|
+
return (msg, kwargs)
|
624
|
+
|
625
|
+
|
626
|
+
class LogLevelFilter(object):
|
627
|
+
"""
|
628
|
+
Drop messages for the logger based on level.
|
629
|
+
|
630
|
+
This is useful when dependencies log too much information.
|
631
|
+
|
632
|
+
:param level: All messages at or below this level are dropped
|
633
|
+
(DEBUG < INFO < WARN < ERROR < CRITICAL|FATAL)
|
634
|
+
Default: DEBUG
|
635
|
+
"""
|
636
|
+
|
637
|
+
def __init__(self, level=logging.DEBUG):
|
638
|
+
self.level = level
|
639
|
+
|
640
|
+
def filter(self, record):
|
641
|
+
if record.levelno <= self.level:
|
642
|
+
return 0
|
643
|
+
return 1
|
644
|
+
|
645
|
+
|
646
|
+
def get_logger(conf, name=None, log_to_console=False, log_route=None,
|
647
|
+
fmt="%(server)s: %(message)s", statsd_tail_prefix=None):
|
648
|
+
"""
|
649
|
+
Get the current system logger using config settings.
|
650
|
+
|
651
|
+
**Log config and defaults**::
|
652
|
+
|
653
|
+
log_facility = LOG_LOCAL0
|
654
|
+
log_level = INFO
|
655
|
+
log_name = swift
|
656
|
+
log_max_line_length = 0
|
657
|
+
log_udp_host = (disabled)
|
658
|
+
log_udp_port = logging.handlers.SYSLOG_UDP_PORT
|
659
|
+
log_address = /dev/log
|
660
|
+
|
661
|
+
:param conf: Configuration dict to read settings from
|
662
|
+
:param name: This value is used to populate the ``server`` field in the log
|
663
|
+
format, as the prefix for statsd messages, and as the default
|
664
|
+
value for ``log_route``; defaults to the ``log_name`` value in
|
665
|
+
``conf``, if it exists, or to 'swift'.
|
666
|
+
:param log_to_console: Add handler which writes to console on stderr
|
667
|
+
:param log_route: Route for the logging, not emitted to the log, just used
|
668
|
+
to separate logging configurations; defaults to the value
|
669
|
+
of ``name`` or whatever ``name`` defaults to. This value
|
670
|
+
is used as the name attribute of the
|
671
|
+
``logging.LogAdapter`` that is returned.
|
672
|
+
:param fmt: Override log format
|
673
|
+
:param statsd_tail_prefix: tail prefix to pass to statsd client; if None
|
674
|
+
then the tail prefix defaults to the value of ``name``.
|
675
|
+
:return: an instance of ``LogAdapter``
|
676
|
+
"""
|
677
|
+
# note: log_name is typically specified in conf (i.e. defined by
|
678
|
+
# operators), whereas log_route is typically hard-coded in callers of
|
679
|
+
# get_logger (i.e. defined by developers)
|
680
|
+
if not conf:
|
681
|
+
conf = {}
|
682
|
+
if name is None:
|
683
|
+
name = conf.get('log_name', 'swift')
|
684
|
+
if not log_route:
|
685
|
+
log_route = name
|
686
|
+
logger = logging.getLogger(log_route)
|
687
|
+
logger.propagate = False
|
688
|
+
# all new handlers will get the same formatter
|
689
|
+
formatter = SwiftLogFormatter(
|
690
|
+
fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0)))
|
691
|
+
|
692
|
+
# get_logger will only ever add one SysLog Handler to a logger
|
693
|
+
if not hasattr(get_logger, 'handler4logger'):
|
694
|
+
get_logger.handler4logger = {}
|
695
|
+
if logger in get_logger.handler4logger:
|
696
|
+
logger.removeHandler(get_logger.handler4logger[logger])
|
697
|
+
|
698
|
+
# facility for this logger will be set by last call wins
|
699
|
+
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
|
700
|
+
SysLogHandler.LOG_LOCAL0)
|
701
|
+
udp_host = conf.get('log_udp_host')
|
702
|
+
if udp_host:
|
703
|
+
udp_port = int(conf.get('log_udp_port',
|
704
|
+
logging.handlers.SYSLOG_UDP_PORT))
|
705
|
+
handler = ThreadSafeSysLogHandler(address=(udp_host, udp_port),
|
706
|
+
facility=facility)
|
707
|
+
else:
|
708
|
+
log_address = conf.get('log_address', '/dev/log')
|
709
|
+
handler = None
|
710
|
+
try:
|
711
|
+
mode = os.stat(log_address).st_mode
|
712
|
+
if stat.S_ISSOCK(mode):
|
713
|
+
handler = ThreadSafeSysLogHandler(address=log_address,
|
714
|
+
facility=facility)
|
715
|
+
except (OSError, socket.error) as e:
|
716
|
+
# If either /dev/log isn't a UNIX socket or it does not exist at
|
717
|
+
# all then py2 would raise an error
|
718
|
+
if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
|
719
|
+
raise
|
720
|
+
if handler is None:
|
721
|
+
# fallback to default UDP
|
722
|
+
handler = ThreadSafeSysLogHandler(facility=facility)
|
723
|
+
handler.setFormatter(formatter)
|
724
|
+
logger.addHandler(handler)
|
725
|
+
get_logger.handler4logger[logger] = handler
|
726
|
+
|
727
|
+
# setup console logging
|
728
|
+
if log_to_console or hasattr(get_logger, 'console_handler4logger'):
|
729
|
+
# remove pre-existing console handler for this logger
|
730
|
+
if not hasattr(get_logger, 'console_handler4logger'):
|
731
|
+
get_logger.console_handler4logger = {}
|
732
|
+
if logger in get_logger.console_handler4logger:
|
733
|
+
logger.removeHandler(get_logger.console_handler4logger[logger])
|
734
|
+
|
735
|
+
console_handler = logging.StreamHandler(sys.__stderr__)
|
736
|
+
console_handler.setFormatter(formatter)
|
737
|
+
logger.addHandler(console_handler)
|
738
|
+
get_logger.console_handler4logger[logger] = console_handler
|
739
|
+
|
740
|
+
# set the level for the logger
|
741
|
+
logger.setLevel(
|
742
|
+
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
|
743
|
+
|
744
|
+
# Setup logger with a StatsD client if so configured
|
745
|
+
if statsd_tail_prefix is None:
|
746
|
+
statsd_tail_prefix = name
|
747
|
+
logger.statsd_client = statsd_client.get_statsd_client(
|
748
|
+
conf, statsd_tail_prefix, logger)
|
749
|
+
|
750
|
+
adapted_logger = LogAdapter(logger, name)
|
751
|
+
other_handlers = conf.get('log_custom_handlers', None)
|
752
|
+
if other_handlers:
|
753
|
+
log_custom_handlers = [s.strip() for s in other_handlers.split(',')
|
754
|
+
if s.strip()]
|
755
|
+
for hook in log_custom_handlers:
|
756
|
+
try:
|
757
|
+
mod, fnc = hook.rsplit('.', 1)
|
758
|
+
logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc)
|
759
|
+
logger_hook(conf, name, log_to_console, log_route, fmt,
|
760
|
+
logger, adapted_logger)
|
761
|
+
except (AttributeError, ImportError):
|
762
|
+
print('Error calling custom handler [%s]' % hook,
|
763
|
+
file=sys.stderr)
|
764
|
+
except ValueError:
|
765
|
+
print('Invalid custom handler format [%s]' % hook,
|
766
|
+
file=sys.stderr)
|
767
|
+
|
768
|
+
return adapted_logger
|
769
|
+
|
770
|
+
|
771
|
+
class NullLogger(object):
|
772
|
+
"""A no-op logger for eventlet wsgi."""
|
773
|
+
|
774
|
+
def write(self, *args):
|
775
|
+
# "Logs" the args to nowhere
|
776
|
+
pass
|
777
|
+
|
778
|
+
def exception(self, *args):
|
779
|
+
pass
|
780
|
+
|
781
|
+
def critical(self, *args):
|
782
|
+
pass
|
783
|
+
|
784
|
+
def error(self, *args):
|
785
|
+
pass
|
786
|
+
|
787
|
+
def warning(self, *args):
|
788
|
+
pass
|
789
|
+
|
790
|
+
def info(self, *args):
|
791
|
+
pass
|
792
|
+
|
793
|
+
def debug(self, *args):
|
794
|
+
pass
|
795
|
+
|
796
|
+
def log(self, *args):
|
797
|
+
pass
|
798
|
+
|
799
|
+
|
800
|
+
def capture_stdio(logger, **kwargs):
|
801
|
+
"""
|
802
|
+
Log unhandled exceptions, close stdio, capture stdout and stderr.
|
803
|
+
|
804
|
+
param logger: Logger object to use
|
805
|
+
"""
|
806
|
+
# log uncaught exceptions
|
807
|
+
sys.excepthook = lambda * exc_info: \
|
808
|
+
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
809
|
+
|
810
|
+
# collect stdio file desc not in use for logging
|
811
|
+
stdio_files = [sys.stdin, sys.stdout, sys.stderr]
|
812
|
+
console_fds = [h.stream.fileno() for _junk, h in getattr(
|
813
|
+
get_logger, 'console_handler4logger', {}).items()]
|
814
|
+
stdio_files = [f for f in stdio_files if f.fileno() not in console_fds]
|
815
|
+
|
816
|
+
with open(os.devnull, 'r+b') as nullfile:
|
817
|
+
# close stdio (excludes fds open for logging)
|
818
|
+
for f in stdio_files:
|
819
|
+
# some platforms throw an error when attempting an stdin flush
|
820
|
+
try:
|
821
|
+
f.flush()
|
822
|
+
except IOError:
|
823
|
+
pass
|
824
|
+
|
825
|
+
try:
|
826
|
+
os.dup2(nullfile.fileno(), f.fileno())
|
827
|
+
except OSError:
|
828
|
+
pass
|
829
|
+
|
830
|
+
# redirect stdio
|
831
|
+
if kwargs.pop('capture_stdout', True):
|
832
|
+
sys.stdout = LoggerFileObject(logger)
|
833
|
+
if kwargs.pop('capture_stderr', True):
|
834
|
+
sys.stderr = LoggerFileObject(logger, 'STDERR')
|
835
|
+
|
836
|
+
|
837
|
+
class StrAnonymizer(str):
|
838
|
+
"""
|
839
|
+
Class that permits to get a string anonymized or simply quoted.
|
840
|
+
"""
|
841
|
+
|
842
|
+
def __new__(cls, data, method, salt):
|
843
|
+
method = method.lower()
|
844
|
+
if method not in (hashlib.algorithms if six.PY2 else
|
845
|
+
hashlib.algorithms_guaranteed):
|
846
|
+
raise ValueError('Unsupported hashing method: %r' % method)
|
847
|
+
s = str.__new__(cls, data or '')
|
848
|
+
s.method = method
|
849
|
+
s.salt = salt
|
850
|
+
return s
|
851
|
+
|
852
|
+
@property
|
853
|
+
def anonymized(self):
|
854
|
+
if not self:
|
855
|
+
return self
|
856
|
+
else:
|
857
|
+
if self.method == 'md5':
|
858
|
+
h = md5(usedforsecurity=False)
|
859
|
+
else:
|
860
|
+
h = getattr(hashlib, self.method)()
|
861
|
+
if self.salt:
|
862
|
+
h.update(six.b(self.salt))
|
863
|
+
h.update(six.b(self))
|
864
|
+
return '{%s%s}%s' % ('S' if self.salt else '', self.method.upper(),
|
865
|
+
h.hexdigest())
|
866
|
+
|
867
|
+
|
868
|
+
class StrFormatTime(object):
|
869
|
+
"""
|
870
|
+
Class that permits to get formats or parts of a time.
|
871
|
+
"""
|
872
|
+
|
873
|
+
def __init__(self, ts):
|
874
|
+
self.time = ts
|
875
|
+
self.time_struct = time.gmtime(ts)
|
876
|
+
|
877
|
+
def __str__(self):
|
878
|
+
return "%.9f" % self.time
|
879
|
+
|
880
|
+
def __getattr__(self, attr):
|
881
|
+
if attr not in ['a', 'A', 'b', 'B', 'c', 'd', 'H',
|
882
|
+
'I', 'j', 'm', 'M', 'p', 'S', 'U',
|
883
|
+
'w', 'W', 'x', 'X', 'y', 'Y', 'Z']:
|
884
|
+
raise ValueError(("The attribute %s is not a correct directive "
|
885
|
+
"for time.strftime formater.") % attr)
|
886
|
+
return datetime.datetime(*self.time_struct[:-2],
|
887
|
+
tzinfo=UTC).strftime('%' + attr)
|
888
|
+
|
889
|
+
@property
|
890
|
+
def asctime(self):
|
891
|
+
return time.asctime(self.time_struct)
|
892
|
+
|
893
|
+
@property
|
894
|
+
def datetime(self):
|
895
|
+
return time.strftime('%d/%b/%Y/%H/%M/%S', self.time_struct)
|
896
|
+
|
897
|
+
@property
|
898
|
+
def iso8601(self):
|
899
|
+
return time.strftime('%Y-%m-%dT%H:%M:%S', self.time_struct)
|
900
|
+
|
901
|
+
@property
|
902
|
+
def ms(self):
|
903
|
+
return self.__str__().split('.')[1][:3]
|
904
|
+
|
905
|
+
@property
|
906
|
+
def us(self):
|
907
|
+
return self.__str__().split('.')[1][:6]
|
908
|
+
|
909
|
+
@property
|
910
|
+
def ns(self):
|
911
|
+
return self.__str__().split('.')[1]
|
912
|
+
|
913
|
+
@property
|
914
|
+
def s(self):
|
915
|
+
return self.__str__().split('.')[0]
|
916
|
+
|
917
|
+
|
918
|
+
def get_log_line(req, res, trans_time, additional_info, fmt,
|
919
|
+
anonymization_method, anonymization_salt):
|
920
|
+
"""
|
921
|
+
Make a line for logging that matches the documented log line format
|
922
|
+
for backend servers.
|
923
|
+
|
924
|
+
:param req: the request.
|
925
|
+
:param res: the response.
|
926
|
+
:param trans_time: the time the request took to complete, a float.
|
927
|
+
:param additional_info: a string to log at the end of the line
|
928
|
+
|
929
|
+
:returns: a properly formatted line for logging.
|
930
|
+
"""
|
931
|
+
|
932
|
+
policy_index = get_policy_index(req.headers, res.headers)
|
933
|
+
if req.path.startswith('/'):
|
934
|
+
disk, partition, account, container, obj = split_path(req.path, 0, 5,
|
935
|
+
True)
|
936
|
+
else:
|
937
|
+
disk, partition, account, container, obj = (None, ) * 5
|
938
|
+
replacements = {
|
939
|
+
'remote_addr': StrAnonymizer(req.remote_addr, anonymization_method,
|
940
|
+
anonymization_salt),
|
941
|
+
'time': StrFormatTime(time.time()),
|
942
|
+
'method': req.method,
|
943
|
+
'path': StrAnonymizer(req.path, anonymization_method,
|
944
|
+
anonymization_salt),
|
945
|
+
'disk': disk,
|
946
|
+
'partition': partition,
|
947
|
+
'account': StrAnonymizer(account, anonymization_method,
|
948
|
+
anonymization_salt),
|
949
|
+
'container': StrAnonymizer(container, anonymization_method,
|
950
|
+
anonymization_salt),
|
951
|
+
'object': StrAnonymizer(obj, anonymization_method,
|
952
|
+
anonymization_salt),
|
953
|
+
'status': res.status.split()[0],
|
954
|
+
'content_length': res.content_length,
|
955
|
+
'referer': StrAnonymizer(req.referer, anonymization_method,
|
956
|
+
anonymization_salt),
|
957
|
+
'txn_id': req.headers.get('x-trans-id'),
|
958
|
+
'user_agent': StrAnonymizer(req.user_agent, anonymization_method,
|
959
|
+
anonymization_salt),
|
960
|
+
'trans_time': trans_time,
|
961
|
+
'additional_info': additional_info,
|
962
|
+
'pid': os.getpid(),
|
963
|
+
'policy_index': policy_index,
|
964
|
+
}
|
965
|
+
return LogStringFormatter(default='-').format(fmt, **replacements)
|
966
|
+
|
967
|
+
|
968
|
+
def get_policy_index(req_headers, res_headers):
|
969
|
+
"""
|
970
|
+
Returns the appropriate index of the storage policy for the request from
|
971
|
+
a proxy server
|
972
|
+
|
973
|
+
:param req_headers: dict of the request headers.
|
974
|
+
:param res_headers: dict of the response headers.
|
975
|
+
|
976
|
+
:returns: string index of storage policy, or None
|
977
|
+
"""
|
978
|
+
header = 'X-Backend-Storage-Policy-Index'
|
979
|
+
policy_index = res_headers.get(header, req_headers.get(header))
|
980
|
+
if isinstance(policy_index, six.binary_type) and not six.PY2:
|
981
|
+
policy_index = policy_index.decode('ascii')
|
982
|
+
return str(policy_index) if policy_index is not None else None
|
983
|
+
|
984
|
+
|
985
|
+
class LogStringFormatter(string.Formatter):
|
986
|
+
def __init__(self, default='', quote=False):
|
987
|
+
super(LogStringFormatter, self).__init__()
|
988
|
+
self.default = default
|
989
|
+
self.quote = quote
|
990
|
+
|
991
|
+
def format_field(self, value, spec):
|
992
|
+
if not value:
|
993
|
+
return self.default
|
994
|
+
else:
|
995
|
+
log = super(LogStringFormatter, self).format_field(value, spec)
|
996
|
+
if self.quote:
|
997
|
+
return quote(log, ':/{}')
|
998
|
+
else:
|
999
|
+
return log
|