s3_cmd_bin 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +17 -0
- data/Gemfile +3 -0
- data/LICENSE.txt +22 -0
- data/README.md +28 -0
- data/Rakefile +1 -0
- data/lib/s3_cmd_bin/version.rb +3 -0
- data/lib/s3_cmd_bin.rb +15 -0
- data/resources/ChangeLog +1462 -0
- data/resources/INSTALL +97 -0
- data/resources/LICENSE +339 -0
- data/resources/MANIFEST.in +2 -0
- data/resources/Makefile +4 -0
- data/resources/NEWS +234 -0
- data/resources/README +342 -0
- data/resources/S3/ACL.py +224 -0
- data/resources/S3/ACL.pyc +0 -0
- data/resources/S3/AccessLog.py +92 -0
- data/resources/S3/AccessLog.pyc +0 -0
- data/resources/S3/BidirMap.py +42 -0
- data/resources/S3/BidirMap.pyc +0 -0
- data/resources/S3/CloudFront.py +773 -0
- data/resources/S3/CloudFront.pyc +0 -0
- data/resources/S3/Config.py +294 -0
- data/resources/S3/Config.pyc +0 -0
- data/resources/S3/ConnMan.py +71 -0
- data/resources/S3/ConnMan.pyc +0 -0
- data/resources/S3/Exceptions.py +88 -0
- data/resources/S3/Exceptions.pyc +0 -0
- data/resources/S3/FileDict.py +53 -0
- data/resources/S3/FileDict.pyc +0 -0
- data/resources/S3/FileLists.py +517 -0
- data/resources/S3/FileLists.pyc +0 -0
- data/resources/S3/HashCache.py +53 -0
- data/resources/S3/HashCache.pyc +0 -0
- data/resources/S3/MultiPart.py +137 -0
- data/resources/S3/MultiPart.pyc +0 -0
- data/resources/S3/PkgInfo.py +14 -0
- data/resources/S3/PkgInfo.pyc +0 -0
- data/resources/S3/Progress.py +173 -0
- data/resources/S3/Progress.pyc +0 -0
- data/resources/S3/S3.py +979 -0
- data/resources/S3/S3.pyc +0 -0
- data/resources/S3/S3Uri.py +223 -0
- data/resources/S3/S3Uri.pyc +0 -0
- data/resources/S3/SimpleDB.py +178 -0
- data/resources/S3/SortedDict.py +66 -0
- data/resources/S3/SortedDict.pyc +0 -0
- data/resources/S3/Utils.py +462 -0
- data/resources/S3/Utils.pyc +0 -0
- data/resources/S3/__init__.py +0 -0
- data/resources/S3/__init__.pyc +0 -0
- data/resources/TODO +52 -0
- data/resources/artwork/AtomicClockRadio.ttf +0 -0
- data/resources/artwork/TypeRa.ttf +0 -0
- data/resources/artwork/site-top-full-size.xcf +0 -0
- data/resources/artwork/site-top-label-download.png +0 -0
- data/resources/artwork/site-top-label-s3cmd.png +0 -0
- data/resources/artwork/site-top-label-s3sync.png +0 -0
- data/resources/artwork/site-top-s3tools-logo.png +0 -0
- data/resources/artwork/site-top.jpg +0 -0
- data/resources/artwork/site-top.png +0 -0
- data/resources/artwork/site-top.xcf +0 -0
- data/resources/format-manpage.pl +196 -0
- data/resources/magic +63 -0
- data/resources/run-tests.py +537 -0
- data/resources/s3cmd +2116 -0
- data/resources/s3cmd.1 +435 -0
- data/resources/s3db +55 -0
- data/resources/setup.cfg +2 -0
- data/resources/setup.py +80 -0
- data/resources/testsuite.tar.gz +0 -0
- data/resources/upload-to-sf.sh +7 -0
- data/s3_cmd_bin.gemspec +23 -0
- metadata +152 -0
@@ -0,0 +1,462 @@
|
|
1
|
+
## Amazon S3 manager
|
2
|
+
## Author: Michal Ludvig <michal@logix.cz>
|
3
|
+
## http://www.logix.cz/michal
|
4
|
+
## License: GPL Version 2
|
5
|
+
|
6
|
+
import datetime
|
7
|
+
import os
|
8
|
+
import sys
|
9
|
+
import time
|
10
|
+
import re
|
11
|
+
import string
|
12
|
+
import random
|
13
|
+
import rfc822
|
14
|
+
import hmac
|
15
|
+
import base64
|
16
|
+
import errno
|
17
|
+
import urllib
|
18
|
+
|
19
|
+
from logging import debug, info, warning, error
|
20
|
+
|
21
|
+
|
22
|
+
import Config
|
23
|
+
import Exceptions
|
24
|
+
|
25
|
+
# hashlib backported to python 2.4 / 2.5 is not compatible with hmac!
|
26
|
+
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
|
27
|
+
from md5 import md5
|
28
|
+
import sha as sha1
|
29
|
+
else:
|
30
|
+
from hashlib import md5, sha1
|
31
|
+
|
32
|
+
try:
|
33
|
+
import xml.etree.ElementTree as ET
|
34
|
+
except ImportError:
|
35
|
+
import elementtree.ElementTree as ET
|
36
|
+
from xml.parsers.expat import ExpatError
|
37
|
+
|
38
|
+
__all__ = []
|
39
|
+
def parseNodes(nodes):
|
40
|
+
## WARNING: Ignores text nodes from mixed xml/text.
|
41
|
+
## For instance <tag1>some text<tag2>other text</tag2></tag1>
|
42
|
+
## will be ignore "some text" node
|
43
|
+
retval = []
|
44
|
+
for node in nodes:
|
45
|
+
retval_item = {}
|
46
|
+
for child in node.getchildren():
|
47
|
+
name = child.tag
|
48
|
+
if child.getchildren():
|
49
|
+
retval_item[name] = parseNodes([child])
|
50
|
+
else:
|
51
|
+
retval_item[name] = node.findtext(".//%s" % child.tag)
|
52
|
+
retval.append(retval_item)
|
53
|
+
return retval
|
54
|
+
__all__.append("parseNodes")
|
55
|
+
|
56
|
+
def stripNameSpace(xml):
|
57
|
+
"""
|
58
|
+
removeNameSpace(xml) -- remove top-level AWS namespace
|
59
|
+
"""
|
60
|
+
r = re.compile('^(<?[^>]+?>\s?)(<\w+) xmlns=[\'"](http://[^\'"]+)[\'"](.*)', re.MULTILINE)
|
61
|
+
if r.match(xml):
|
62
|
+
xmlns = r.match(xml).groups()[2]
|
63
|
+
xml = r.sub("\\1\\2\\4", xml)
|
64
|
+
else:
|
65
|
+
xmlns = None
|
66
|
+
return xml, xmlns
|
67
|
+
__all__.append("stripNameSpace")
|
68
|
+
|
69
|
+
def getTreeFromXml(xml):
|
70
|
+
xml, xmlns = stripNameSpace(xml)
|
71
|
+
try:
|
72
|
+
tree = ET.fromstring(xml)
|
73
|
+
if xmlns:
|
74
|
+
tree.attrib['xmlns'] = xmlns
|
75
|
+
return tree
|
76
|
+
except ExpatError, e:
|
77
|
+
error(e)
|
78
|
+
raise Exceptions.ParameterError("Bucket contains invalid filenames. Please run: s3cmd fixbucket s3://your-bucket/")
|
79
|
+
__all__.append("getTreeFromXml")
|
80
|
+
|
81
|
+
def getListFromXml(xml, node):
|
82
|
+
tree = getTreeFromXml(xml)
|
83
|
+
nodes = tree.findall('.//%s' % (node))
|
84
|
+
return parseNodes(nodes)
|
85
|
+
__all__.append("getListFromXml")
|
86
|
+
|
87
|
+
def getDictFromTree(tree):
|
88
|
+
ret_dict = {}
|
89
|
+
for child in tree.getchildren():
|
90
|
+
if child.getchildren():
|
91
|
+
## Complex-type child. Recurse
|
92
|
+
content = getDictFromTree(child)
|
93
|
+
else:
|
94
|
+
content = child.text
|
95
|
+
if ret_dict.has_key(child.tag):
|
96
|
+
if not type(ret_dict[child.tag]) == list:
|
97
|
+
ret_dict[child.tag] = [ret_dict[child.tag]]
|
98
|
+
ret_dict[child.tag].append(content or "")
|
99
|
+
else:
|
100
|
+
ret_dict[child.tag] = content or ""
|
101
|
+
return ret_dict
|
102
|
+
__all__.append("getDictFromTree")
|
103
|
+
|
104
|
+
def getTextFromXml(xml, xpath):
|
105
|
+
tree = getTreeFromXml(xml)
|
106
|
+
if tree.tag.endswith(xpath):
|
107
|
+
return tree.text
|
108
|
+
else:
|
109
|
+
return tree.findtext(xpath)
|
110
|
+
__all__.append("getTextFromXml")
|
111
|
+
|
112
|
+
def getRootTagName(xml):
|
113
|
+
tree = getTreeFromXml(xml)
|
114
|
+
return tree.tag
|
115
|
+
__all__.append("getRootTagName")
|
116
|
+
|
117
|
+
def xmlTextNode(tag_name, text):
|
118
|
+
el = ET.Element(tag_name)
|
119
|
+
el.text = unicode(text)
|
120
|
+
return el
|
121
|
+
__all__.append("xmlTextNode")
|
122
|
+
|
123
|
+
def appendXmlTextNode(tag_name, text, parent):
|
124
|
+
"""
|
125
|
+
Creates a new <tag_name> Node and sets
|
126
|
+
its content to 'text'. Then appends the
|
127
|
+
created Node to 'parent' element if given.
|
128
|
+
Returns the newly created Node.
|
129
|
+
"""
|
130
|
+
el = xmlTextNode(tag_name, text)
|
131
|
+
parent.append(el)
|
132
|
+
return el
|
133
|
+
__all__.append("appendXmlTextNode")
|
134
|
+
|
135
|
+
def dateS3toPython(date):
|
136
|
+
date = re.compile("(\.\d*)?Z").sub(".000Z", date)
|
137
|
+
return time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z")
|
138
|
+
__all__.append("dateS3toPython")
|
139
|
+
|
140
|
+
def dateS3toUnix(date):
|
141
|
+
## FIXME: This should be timezone-aware.
|
142
|
+
## Currently the argument to strptime() is GMT but mktime()
|
143
|
+
## treats it as "localtime". Anyway...
|
144
|
+
return time.mktime(dateS3toPython(date))
|
145
|
+
__all__.append("dateS3toUnix")
|
146
|
+
|
147
|
+
def dateRFC822toPython(date):
|
148
|
+
return rfc822.parsedate(date)
|
149
|
+
__all__.append("dateRFC822toPython")
|
150
|
+
|
151
|
+
def dateRFC822toUnix(date):
|
152
|
+
return time.mktime(dateRFC822toPython(date))
|
153
|
+
__all__.append("dateRFC822toUnix")
|
154
|
+
|
155
|
+
def formatSize(size, human_readable = False, floating_point = False):
|
156
|
+
size = floating_point and float(size) or int(size)
|
157
|
+
if human_readable:
|
158
|
+
coeffs = ['k', 'M', 'G', 'T']
|
159
|
+
coeff = ""
|
160
|
+
while size > 2048:
|
161
|
+
size /= 1024
|
162
|
+
coeff = coeffs.pop(0)
|
163
|
+
return (size, coeff)
|
164
|
+
else:
|
165
|
+
return (size, "")
|
166
|
+
__all__.append("formatSize")
|
167
|
+
|
168
|
+
def formatDateTime(s3timestamp):
|
169
|
+
try:
|
170
|
+
import pytz
|
171
|
+
timezone = pytz.timezone(os.environ.get('TZ', 'UTC'))
|
172
|
+
tz = pytz.timezone('UTC')
|
173
|
+
## Can't unpack args and follow that with kwargs in python 2.5
|
174
|
+
## So we pass them all as kwargs
|
175
|
+
params = zip(('year', 'month', 'day', 'hour', 'minute', 'second', 'tzinfo'),
|
176
|
+
dateS3toPython(s3timestamp)[0:6] + (tz,))
|
177
|
+
params = dict(params)
|
178
|
+
utc_dt = datetime.datetime(**params)
|
179
|
+
dt_object = utc_dt.astimezone(timezone)
|
180
|
+
except ImportError:
|
181
|
+
dt_object = datetime.datetime(*dateS3toPython(s3timestamp)[0:6])
|
182
|
+
return dt_object.strftime("%Y-%m-%d %H:%M")
|
183
|
+
__all__.append("formatDateTime")
|
184
|
+
|
185
|
+
def convertTupleListToDict(list):
|
186
|
+
retval = {}
|
187
|
+
for tuple in list:
|
188
|
+
retval[tuple[0]] = tuple[1]
|
189
|
+
return retval
|
190
|
+
__all__.append("convertTupleListToDict")
|
191
|
+
|
192
|
+
_rnd_chars = string.ascii_letters+string.digits
|
193
|
+
_rnd_chars_len = len(_rnd_chars)
|
194
|
+
def rndstr(len):
|
195
|
+
retval = ""
|
196
|
+
while len > 0:
|
197
|
+
retval += _rnd_chars[random.randint(0, _rnd_chars_len-1)]
|
198
|
+
len -= 1
|
199
|
+
return retval
|
200
|
+
__all__.append("rndstr")
|
201
|
+
|
202
|
+
def mktmpsomething(prefix, randchars, createfunc):
|
203
|
+
old_umask = os.umask(0077)
|
204
|
+
tries = 5
|
205
|
+
while tries > 0:
|
206
|
+
dirname = prefix + rndstr(randchars)
|
207
|
+
try:
|
208
|
+
createfunc(dirname)
|
209
|
+
break
|
210
|
+
except OSError, e:
|
211
|
+
if e.errno != errno.EEXIST:
|
212
|
+
os.umask(old_umask)
|
213
|
+
raise
|
214
|
+
tries -= 1
|
215
|
+
|
216
|
+
os.umask(old_umask)
|
217
|
+
return dirname
|
218
|
+
__all__.append("mktmpsomething")
|
219
|
+
|
220
|
+
def mktmpdir(prefix = "/tmp/tmpdir-", randchars = 10):
|
221
|
+
return mktmpsomething(prefix, randchars, os.mkdir)
|
222
|
+
__all__.append("mktmpdir")
|
223
|
+
|
224
|
+
def mktmpfile(prefix = "/tmp/tmpfile-", randchars = 20):
|
225
|
+
createfunc = lambda filename : os.close(os.open(filename, os.O_CREAT | os.O_EXCL))
|
226
|
+
return mktmpsomething(prefix, randchars, createfunc)
|
227
|
+
__all__.append("mktmpfile")
|
228
|
+
|
229
|
+
def hash_file_md5(filename):
|
230
|
+
h = md5()
|
231
|
+
f = open(filename, "rb")
|
232
|
+
while True:
|
233
|
+
# Hash 32kB chunks
|
234
|
+
data = f.read(32*1024)
|
235
|
+
if not data:
|
236
|
+
break
|
237
|
+
h.update(data)
|
238
|
+
f.close()
|
239
|
+
return h.hexdigest()
|
240
|
+
__all__.append("hash_file_md5")
|
241
|
+
|
242
|
+
def mkdir_with_parents(dir_name):
|
243
|
+
"""
|
244
|
+
mkdir_with_parents(dst_dir)
|
245
|
+
|
246
|
+
Create directory 'dir_name' with all parent directories
|
247
|
+
|
248
|
+
Returns True on success, False otherwise.
|
249
|
+
"""
|
250
|
+
pathmembers = dir_name.split(os.sep)
|
251
|
+
tmp_stack = []
|
252
|
+
while pathmembers and not os.path.isdir(os.sep.join(pathmembers)):
|
253
|
+
tmp_stack.append(pathmembers.pop())
|
254
|
+
while tmp_stack:
|
255
|
+
pathmembers.append(tmp_stack.pop())
|
256
|
+
cur_dir = os.sep.join(pathmembers)
|
257
|
+
try:
|
258
|
+
debug("mkdir(%s)" % cur_dir)
|
259
|
+
os.mkdir(cur_dir)
|
260
|
+
except (OSError, IOError), e:
|
261
|
+
warning("%s: can not make directory: %s" % (cur_dir, e.strerror))
|
262
|
+
return False
|
263
|
+
except Exception, e:
|
264
|
+
warning("%s: %s" % (cur_dir, e))
|
265
|
+
return False
|
266
|
+
return True
|
267
|
+
__all__.append("mkdir_with_parents")
|
268
|
+
|
269
|
+
def unicodise(string, encoding = None, errors = "replace"):
|
270
|
+
"""
|
271
|
+
Convert 'string' to Unicode or raise an exception.
|
272
|
+
"""
|
273
|
+
|
274
|
+
if not encoding:
|
275
|
+
encoding = Config.Config().encoding
|
276
|
+
|
277
|
+
if type(string) == unicode:
|
278
|
+
return string
|
279
|
+
debug("Unicodising %r using %s" % (string, encoding))
|
280
|
+
try:
|
281
|
+
return string.decode(encoding, errors)
|
282
|
+
except UnicodeDecodeError:
|
283
|
+
raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
|
284
|
+
__all__.append("unicodise")
|
285
|
+
|
286
|
+
def deunicodise(string, encoding = None, errors = "replace"):
|
287
|
+
"""
|
288
|
+
Convert unicode 'string' to <type str>, by default replacing
|
289
|
+
all invalid characters with '?' or raise an exception.
|
290
|
+
"""
|
291
|
+
|
292
|
+
if not encoding:
|
293
|
+
encoding = Config.Config().encoding
|
294
|
+
|
295
|
+
if type(string) != unicode:
|
296
|
+
return str(string)
|
297
|
+
debug("DeUnicodising %r using %s" % (string, encoding))
|
298
|
+
try:
|
299
|
+
return string.encode(encoding, errors)
|
300
|
+
except UnicodeEncodeError:
|
301
|
+
raise UnicodeEncodeError("Conversion from unicode failed: %r" % string)
|
302
|
+
__all__.append("deunicodise")
|
303
|
+
|
304
|
+
def unicodise_safe(string, encoding = None):
|
305
|
+
"""
|
306
|
+
Convert 'string' to Unicode according to current encoding
|
307
|
+
and replace all invalid characters with '?'
|
308
|
+
"""
|
309
|
+
|
310
|
+
return unicodise(deunicodise(string, encoding), encoding).replace(u'\ufffd', '?')
|
311
|
+
__all__.append("unicodise_safe")
|
312
|
+
|
313
|
+
def replace_nonprintables(string):
|
314
|
+
"""
|
315
|
+
replace_nonprintables(string)
|
316
|
+
|
317
|
+
Replaces all non-printable characters 'ch' in 'string'
|
318
|
+
where ord(ch) <= 26 with ^@, ^A, ... ^Z
|
319
|
+
"""
|
320
|
+
new_string = ""
|
321
|
+
modified = 0
|
322
|
+
for c in string:
|
323
|
+
o = ord(c)
|
324
|
+
if (o <= 31):
|
325
|
+
new_string += "^" + chr(ord('@') + o)
|
326
|
+
modified += 1
|
327
|
+
elif (o == 127):
|
328
|
+
new_string += "^?"
|
329
|
+
modified += 1
|
330
|
+
else:
|
331
|
+
new_string += c
|
332
|
+
if modified and Config.Config().urlencoding_mode != "fixbucket":
|
333
|
+
warning("%d non-printable characters replaced in: %s" % (modified, new_string))
|
334
|
+
return new_string
|
335
|
+
__all__.append("replace_nonprintables")
|
336
|
+
|
337
|
+
def sign_string(string_to_sign):
|
338
|
+
"""Sign a string with the secret key, returning base64 encoded results.
|
339
|
+
By default the configured secret key is used, but may be overridden as
|
340
|
+
an argument.
|
341
|
+
|
342
|
+
Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
|
343
|
+
"""
|
344
|
+
signature = base64.encodestring(hmac.new(Config.Config().secret_key, string_to_sign, sha1).digest()).strip()
|
345
|
+
return signature
|
346
|
+
__all__.append("sign_string")
|
347
|
+
|
348
|
+
def sign_url(url_to_sign, expiry):
|
349
|
+
"""Sign a URL in s3://bucket/object form with the given expiry
|
350
|
+
time. The object will be accessible via the signed URL until the
|
351
|
+
AWS key and secret are revoked or the expiry time is reached, even
|
352
|
+
if the object is otherwise private.
|
353
|
+
|
354
|
+
See: http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
|
355
|
+
"""
|
356
|
+
return sign_url_base(
|
357
|
+
bucket = url_to_sign.bucket(),
|
358
|
+
object = url_to_sign.object(),
|
359
|
+
expiry = expiry
|
360
|
+
)
|
361
|
+
__all__.append("sign_url")
|
362
|
+
|
363
|
+
def sign_url_base(**parms):
|
364
|
+
"""Shared implementation of sign_url methods. Takes a hash of 'bucket', 'object' and 'expiry' as args."""
|
365
|
+
parms['expiry']=time_to_epoch(parms['expiry'])
|
366
|
+
parms['access_key']=Config.Config().access_key
|
367
|
+
debug("Expiry interpreted as epoch time %s", parms['expiry'])
|
368
|
+
signtext = 'GET\n\n\n%(expiry)d\n/%(bucket)s/%(object)s' % parms
|
369
|
+
debug("Signing plaintext: %r", signtext)
|
370
|
+
parms['sig'] = urllib.quote_plus(sign_string(signtext))
|
371
|
+
debug("Urlencoded signature: %s", parms['sig'])
|
372
|
+
return "http://%(bucket)s.s3.amazonaws.com/%(object)s?AWSAccessKeyId=%(access_key)s&Expires=%(expiry)d&Signature=%(sig)s" % parms
|
373
|
+
|
374
|
+
def time_to_epoch(t):
|
375
|
+
"""Convert time specified in a variety of forms into UNIX epoch time.
|
376
|
+
Accepts datetime.datetime, int, anything that has a strftime() method, and standard time 9-tuples
|
377
|
+
"""
|
378
|
+
if isinstance(t, int):
|
379
|
+
# Already an int
|
380
|
+
return t
|
381
|
+
elif isinstance(t, tuple) or isinstance(t, time.struct_time):
|
382
|
+
# Assume it's a time 9-tuple
|
383
|
+
return int(time.mktime(t))
|
384
|
+
elif hasattr(t, 'timetuple'):
|
385
|
+
# Looks like a datetime object or compatible
|
386
|
+
return int(time.mktime(ex.timetuple()))
|
387
|
+
elif hasattr(t, 'strftime'):
|
388
|
+
# Looks like the object supports standard srftime()
|
389
|
+
return int(t.strftime('%s'))
|
390
|
+
elif isinstance(t, str) or isinstance(t, unicode):
|
391
|
+
# See if it's a string representation of an epoch
|
392
|
+
try:
|
393
|
+
return int(t)
|
394
|
+
except ValueError:
|
395
|
+
# Try to parse it as a timestamp string
|
396
|
+
try:
|
397
|
+
return time.strptime(t)
|
398
|
+
except ValueError, ex:
|
399
|
+
# Will fall through
|
400
|
+
debug("Failed to parse date with strptime: %s", ex)
|
401
|
+
pass
|
402
|
+
raise Exceptions.ParameterError('Unable to convert %r to an epoch time. Pass an epoch time. Try `date -d \'now + 1 year\' +%%s` (shell) or time.mktime (Python).' % t)
|
403
|
+
|
404
|
+
|
405
|
+
def check_bucket_name(bucket, dns_strict = True):
|
406
|
+
if dns_strict:
|
407
|
+
invalid = re.search("([^a-z0-9\.-])", bucket)
|
408
|
+
if invalid:
|
409
|
+
raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: lowercase us-ascii letters (a-z), digits (0-9), dot (.) and hyphen (-)." % (bucket, invalid.groups()[0]))
|
410
|
+
else:
|
411
|
+
invalid = re.search("([^A-Za-z0-9\._-])", bucket)
|
412
|
+
if invalid:
|
413
|
+
raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: us-ascii letters (a-z, A-Z), digits (0-9), dot (.), hyphen (-) and underscore (_)." % (bucket, invalid.groups()[0]))
|
414
|
+
|
415
|
+
if len(bucket) < 3:
|
416
|
+
raise Exceptions.ParameterError("Bucket name '%s' is too short (min 3 characters)" % bucket)
|
417
|
+
if len(bucket) > 255:
|
418
|
+
raise Exceptions.ParameterError("Bucket name '%s' is too long (max 255 characters)" % bucket)
|
419
|
+
if dns_strict:
|
420
|
+
if len(bucket) > 63:
|
421
|
+
raise Exceptions.ParameterError("Bucket name '%s' is too long (max 63 characters)" % bucket)
|
422
|
+
if re.search("-\.", bucket):
|
423
|
+
raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '-.' for DNS compatibility" % bucket)
|
424
|
+
if re.search("\.\.", bucket):
|
425
|
+
raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '..' for DNS compatibility" % bucket)
|
426
|
+
if not re.search("^[0-9a-z]", bucket):
|
427
|
+
raise Exceptions.ParameterError("Bucket name '%s' must start with a letter or a digit" % bucket)
|
428
|
+
if not re.search("[0-9a-z]$", bucket):
|
429
|
+
raise Exceptions.ParameterError("Bucket name '%s' must end with a letter or a digit" % bucket)
|
430
|
+
return True
|
431
|
+
__all__.append("check_bucket_name")
|
432
|
+
|
433
|
+
def check_bucket_name_dns_conformity(bucket):
|
434
|
+
try:
|
435
|
+
return check_bucket_name(bucket, dns_strict = True)
|
436
|
+
except Exceptions.ParameterError:
|
437
|
+
return False
|
438
|
+
__all__.append("check_bucket_name_dns_conformity")
|
439
|
+
|
440
|
+
def getBucketFromHostname(hostname):
|
441
|
+
"""
|
442
|
+
bucket, success = getBucketFromHostname(hostname)
|
443
|
+
|
444
|
+
Only works for hostnames derived from bucket names
|
445
|
+
using Config.host_bucket pattern.
|
446
|
+
|
447
|
+
Returns bucket name and a boolean success flag.
|
448
|
+
"""
|
449
|
+
|
450
|
+
# Create RE pattern from Config.host_bucket
|
451
|
+
pattern = Config.Config().host_bucket % { 'bucket' : '(?P<bucket>.*)' }
|
452
|
+
m = re.match(pattern, hostname)
|
453
|
+
if not m:
|
454
|
+
return (hostname, False)
|
455
|
+
return m.groups()[0], True
|
456
|
+
__all__.append("getBucketFromHostname")
|
457
|
+
|
458
|
+
def getHostnameFromBucket(bucket):
|
459
|
+
return Config.Config().host_bucket % { 'bucket' : bucket }
|
460
|
+
__all__.append("getHostnameFromBucket")
|
461
|
+
|
462
|
+
# vim:et:ts=4:sts=4:ai
|
Binary file
|
File without changes
|
Binary file
|
data/resources/TODO
ADDED
@@ -0,0 +1,52 @@
|
|
1
|
+
TODO list for s3cmd project
|
2
|
+
===========================
|
3
|
+
|
4
|
+
- Before 1.0.0 (or asap after 1.0.0)
|
5
|
+
- Make 'sync s3://bkt/some-filename local/other-filename' work
|
6
|
+
(at the moment it'll always download).
|
7
|
+
- Enable --exclude for [ls].
|
8
|
+
- Allow change /tmp to somewhere else
|
9
|
+
- With --guess-mime use 'magic' module if available.
|
10
|
+
- Support --preserve for [put] and [get]. Update manpage.
|
11
|
+
- Don't let --continue fail if the file is already fully downloaded.
|
12
|
+
- Option --mime-type should set mime type with 'cp' and 'mv'.
|
13
|
+
If possible --guess-mime-type should do as well.
|
14
|
+
- Make upload throttling configurable.
|
15
|
+
- Allow removing 'DefaultRootObject' from CloudFront distributions.
|
16
|
+
- Get s3://bucket/non-existent creates empty local file 'non-existent'
|
17
|
+
- Add 'geturl' command, both Unicode and urlencoded output.
|
18
|
+
- Add a command for generating "Query String Authentication" URLs.
|
19
|
+
- Support --acl-grant (together with --acl-public/private) for [put] and [sync]
|
20
|
+
- Filter 's3cmd ls' output by --bucket-location=
|
21
|
+
|
22
|
+
- After 1.0.0
|
23
|
+
- Sync must backup non-files as well. At least directories,
|
24
|
+
symlinks and device nodes.
|
25
|
+
- Speed up upload / download with multiple threads.
|
26
|
+
(see http://blog.50projects.com/p/s3cmd-modifications.html)
|
27
|
+
- Sync should be able to update metadata (UID, timstamps, etc)
|
28
|
+
if only these change (i.e. same content, different metainfo).
|
29
|
+
- If GPG fails error() and exit. If un-GPG fails save the
|
30
|
+
file with .gpg extension.
|
31
|
+
- Keep backup files remotely on put/sync-to if requested
|
32
|
+
(move the old 'object' to e.g. 'object~' and only then upload
|
33
|
+
the new one). Could be more advanced to keep, say, last 5
|
34
|
+
copies, etc.
|
35
|
+
- Memory consumption on very large upload sets is terribly high.
|
36
|
+
- Implement per-bucket (or per-regexp?) default settings. For
|
37
|
+
example regarding ACLs, encryption, etc.
|
38
|
+
|
39
|
+
- Implement GPG for sync
|
40
|
+
(it's not that easy since it won't be easy to compare
|
41
|
+
the encrypted-remote-object size with local file.
|
42
|
+
either we can store the metadata in a dedicated file
|
43
|
+
where we face a risk of inconsistencies, or we'll store
|
44
|
+
the metadata encrypted in each object header where we'll
|
45
|
+
have to do large number for object/HEAD requests. tough
|
46
|
+
call).
|
47
|
+
Or we can only compare local timestamps with remote object
|
48
|
+
timestamps. If the local one is older we'll *assume* it
|
49
|
+
hasn't been changed. But what to do about remote2local sync?
|
50
|
+
|
51
|
+
- Keep man page up to date and write some more documentation
|
52
|
+
- Yeah, right ;-)
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|