s3_cmd_bin 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. data/lib/s3_cmd_bin/version.rb +1 -1
  2. data/resources/ChangeLog +0 -0
  3. data/resources/INSTALL +0 -0
  4. data/resources/MANIFEST.in +1 -0
  5. data/resources/NEWS +1 -40
  6. data/resources/README +0 -0
  7. data/resources/S3/ACL.py +0 -0
  8. data/resources/S3/AccessLog.py +0 -0
  9. data/resources/S3/BidirMap.py +0 -0
  10. data/resources/S3/CloudFront.py +8 -37
  11. data/resources/S3/Config.py +1 -88
  12. data/resources/S3/Exceptions.py +1 -1
  13. data/resources/S3/FileLists.py +100 -272
  14. data/resources/S3/MultiPart.py +21 -45
  15. data/resources/S3/PkgInfo.py +1 -1
  16. data/resources/S3/Progress.py +0 -17
  17. data/resources/S3/S3.py +52 -148
  18. data/resources/S3/S3Uri.py +2 -3
  19. data/resources/S3/SimpleDB.py +0 -3
  20. data/resources/S3/SortedDict.py +0 -3
  21. data/resources/S3/Utils.py +3 -80
  22. data/resources/S3/__init__.py +0 -0
  23. data/resources/TODO +0 -0
  24. data/resources/artwork/AtomicClockRadio.ttf +0 -0
  25. data/resources/artwork/TypeRa.ttf +0 -0
  26. data/resources/artwork/site-top-full-size.xcf +0 -0
  27. data/resources/artwork/site-top-label-download.png +0 -0
  28. data/resources/artwork/site-top-label-s3cmd.png +0 -0
  29. data/resources/artwork/site-top-label-s3sync.png +0 -0
  30. data/resources/artwork/site-top-s3tools-logo.png +0 -0
  31. data/resources/artwork/site-top.jpg +0 -0
  32. data/resources/artwork/site-top.png +0 -0
  33. data/resources/artwork/site-top.xcf +0 -0
  34. data/resources/run-tests.py +2 -2
  35. data/resources/s3cmd +306 -600
  36. data/resources/s3cmd.1 +97 -84
  37. data/resources/setup.cfg +0 -0
  38. data/resources/setup.py +0 -0
  39. data/resources/testsuite.tar.gz +0 -0
  40. metadata +2 -26
  41. data/resources/LICENSE +0 -339
  42. data/resources/Makefile +0 -4
  43. data/resources/S3/ACL.pyc +0 -0
  44. data/resources/S3/AccessLog.pyc +0 -0
  45. data/resources/S3/BidirMap.pyc +0 -0
  46. data/resources/S3/CloudFront.pyc +0 -0
  47. data/resources/S3/Config.pyc +0 -0
  48. data/resources/S3/ConnMan.py +0 -71
  49. data/resources/S3/ConnMan.pyc +0 -0
  50. data/resources/S3/Exceptions.pyc +0 -0
  51. data/resources/S3/FileDict.py +0 -53
  52. data/resources/S3/FileDict.pyc +0 -0
  53. data/resources/S3/FileLists.pyc +0 -0
  54. data/resources/S3/HashCache.py +0 -53
  55. data/resources/S3/HashCache.pyc +0 -0
  56. data/resources/S3/MultiPart.pyc +0 -0
  57. data/resources/S3/PkgInfo.pyc +0 -0
  58. data/resources/S3/Progress.pyc +0 -0
  59. data/resources/S3/S3.pyc +0 -0
  60. data/resources/S3/S3Uri.pyc +0 -0
  61. data/resources/S3/SortedDict.pyc +0 -0
  62. data/resources/S3/Utils.pyc +0 -0
  63. data/resources/S3/__init__.pyc +0 -0
  64. data/resources/magic +0 -63
@@ -10,7 +10,6 @@ from BidirMap import BidirMap
10
10
  from logging import debug
11
11
  import S3
12
12
  from Utils import unicodise, check_bucket_name_dns_conformity
13
- import Config
14
13
 
15
14
  class S3Uri(object):
16
15
  type = None
@@ -81,9 +80,9 @@ class S3UriS3(S3Uri):
81
80
 
82
81
  def public_url(self):
83
82
  if self.is_dns_compatible():
84
- return "http://%s.%s/%s" % (self._bucket, Config.Config().host_base, self._object)
83
+ return "http://%s.s3.amazonaws.com/%s" % (self._bucket, self._object)
85
84
  else:
86
- return "http://%s/%s/%s" % (self._bucket, Config.Config().host_base, self._object)
85
+ return "http://s3.amazonaws.com/%s/%s" % (self._bucket, self._object)
87
86
 
88
87
  def host_name(self):
89
88
  if self.is_dns_compatible():
@@ -131,9 +131,6 @@ class SimpleDB(object):
131
131
  def create_request(self, Action, DomainName, parameters = None):
132
132
  if not parameters:
133
133
  parameters = SortedDict()
134
- if len(self.config.access_token) > 0:
135
- self.config.refresh_role()
136
- parameters['Signature']=self.config.access_token
137
134
  parameters['AWSAccessKeyId'] = self.config.access_key
138
135
  parameters['Version'] = self.Version
139
136
  parameters['SignatureVersion'] = self.SignatureVersion
@@ -4,7 +4,6 @@
4
4
  ## License: GPL Version 2
5
5
 
6
6
  from BidirMap import BidirMap
7
- import Utils
8
7
 
9
8
  class SortedDictIterator(object):
10
9
  def __init__(self, sorted_dict, keys):
@@ -46,8 +45,6 @@ class SortedDict(dict):
46
45
  def __iter__(self):
47
46
  return SortedDictIterator(self, self.keys())
48
47
 
49
-
50
-
51
48
  if __name__ == "__main__":
52
49
  d = { 'AWS' : 1, 'Action' : 2, 'america' : 3, 'Auckland' : 4, 'America' : 5 }
53
50
  sd = SortedDict(d)
@@ -3,7 +3,6 @@
3
3
  ## http://www.logix.cz/michal
4
4
  ## License: GPL Version 2
5
5
 
6
- import datetime
7
6
  import os
8
7
  import sys
9
8
  import time
@@ -14,11 +13,9 @@ import rfc822
14
13
  import hmac
15
14
  import base64
16
15
  import errno
17
- import urllib
18
16
 
19
17
  from logging import debug, info, warning, error
20
18
 
21
-
22
19
  import Config
23
20
  import Exceptions
24
21
 
@@ -166,20 +163,7 @@ def formatSize(size, human_readable = False, floating_point = False):
166
163
  __all__.append("formatSize")
167
164
 
168
165
  def formatDateTime(s3timestamp):
169
- try:
170
- import pytz
171
- timezone = pytz.timezone(os.environ.get('TZ', 'UTC'))
172
- tz = pytz.timezone('UTC')
173
- ## Can't unpack args and follow that with kwargs in python 2.5
174
- ## So we pass them all as kwargs
175
- params = zip(('year', 'month', 'day', 'hour', 'minute', 'second', 'tzinfo'),
176
- dateS3toPython(s3timestamp)[0:6] + (tz,))
177
- params = dict(params)
178
- utc_dt = datetime.datetime(**params)
179
- dt_object = utc_dt.astimezone(timezone)
180
- except ImportError:
181
- dt_object = datetime.datetime(*dateS3toPython(s3timestamp)[0:6])
182
- return dt_object.strftime("%Y-%m-%d %H:%M")
166
+ return time.strftime("%Y-%m-%d %H:%M", dateS3toPython(s3timestamp))
183
167
  __all__.append("formatDateTime")
184
168
 
185
169
  def convertTupleListToDict(list):
@@ -335,73 +319,12 @@ def replace_nonprintables(string):
335
319
  __all__.append("replace_nonprintables")
336
320
 
337
321
  def sign_string(string_to_sign):
338
- """Sign a string with the secret key, returning base64 encoded results.
339
- By default the configured secret key is used, but may be overridden as
340
- an argument.
341
-
342
- Useful for REST authentication. See http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
343
- """
322
+ #debug("string_to_sign: %s" % string_to_sign)
344
323
  signature = base64.encodestring(hmac.new(Config.Config().secret_key, string_to_sign, sha1).digest()).strip()
324
+ #debug("signature: %s" % signature)
345
325
  return signature
346
326
  __all__.append("sign_string")
347
327
 
348
- def sign_url(url_to_sign, expiry):
349
- """Sign a URL in s3://bucket/object form with the given expiry
350
- time. The object will be accessible via the signed URL until the
351
- AWS key and secret are revoked or the expiry time is reached, even
352
- if the object is otherwise private.
353
-
354
- See: http://s3.amazonaws.com/doc/s3-developer-guide/RESTAuthentication.html
355
- """
356
- return sign_url_base(
357
- bucket = url_to_sign.bucket(),
358
- object = url_to_sign.object(),
359
- expiry = expiry
360
- )
361
- __all__.append("sign_url")
362
-
363
- def sign_url_base(**parms):
364
- """Shared implementation of sign_url methods. Takes a hash of 'bucket', 'object' and 'expiry' as args."""
365
- parms['expiry']=time_to_epoch(parms['expiry'])
366
- parms['access_key']=Config.Config().access_key
367
- debug("Expiry interpreted as epoch time %s", parms['expiry'])
368
- signtext = 'GET\n\n\n%(expiry)d\n/%(bucket)s/%(object)s' % parms
369
- debug("Signing plaintext: %r", signtext)
370
- parms['sig'] = urllib.quote_plus(sign_string(signtext))
371
- debug("Urlencoded signature: %s", parms['sig'])
372
- return "http://%(bucket)s.s3.amazonaws.com/%(object)s?AWSAccessKeyId=%(access_key)s&Expires=%(expiry)d&Signature=%(sig)s" % parms
373
-
374
- def time_to_epoch(t):
375
- """Convert time specified in a variety of forms into UNIX epoch time.
376
- Accepts datetime.datetime, int, anything that has a strftime() method, and standard time 9-tuples
377
- """
378
- if isinstance(t, int):
379
- # Already an int
380
- return t
381
- elif isinstance(t, tuple) or isinstance(t, time.struct_time):
382
- # Assume it's a time 9-tuple
383
- return int(time.mktime(t))
384
- elif hasattr(t, 'timetuple'):
385
- # Looks like a datetime object or compatible
386
- return int(time.mktime(ex.timetuple()))
387
- elif hasattr(t, 'strftime'):
388
- # Looks like the object supports standard srftime()
389
- return int(t.strftime('%s'))
390
- elif isinstance(t, str) or isinstance(t, unicode):
391
- # See if it's a string representation of an epoch
392
- try:
393
- return int(t)
394
- except ValueError:
395
- # Try to parse it as a timestamp string
396
- try:
397
- return time.strptime(t)
398
- except ValueError, ex:
399
- # Will fall through
400
- debug("Failed to parse date with strptime: %s", ex)
401
- pass
402
- raise Exceptions.ParameterError('Unable to convert %r to an epoch time. Pass an epoch time. Try `date -d \'now + 1 year\' +%%s` (shell) or time.mktime (Python).' % t)
403
-
404
-
405
328
  def check_bucket_name(bucket, dns_strict = True):
406
329
  if dns_strict:
407
330
  invalid = re.search("([^a-z0-9\.-])", bucket)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
@@ -471,7 +471,7 @@ test_s3cmd("Rename within S3", ['mv', '%s/copy/etc2/Logo.PNG' % pbucket(2), '%s/
471
471
  ## ====== Sync between buckets
472
472
  test_s3cmd("Sync remote2remote", ['sync', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--delete-removed', '--exclude', 'non-printables*'],
473
473
  must_find = [ "File %s/xyz/demo/dir1/file1-1.txt copied to %s/copy/demo/dir1/file1-1.txt" % (pbucket(1), pbucket(2)),
474
- "remote copy: etc/logo.png -> etc2/Logo.PNG",
474
+ "File %s/xyz/etc2/Logo.PNG copied to %s/copy/etc2/Logo.PNG" % (pbucket(1), pbucket(2)),
475
475
  "deleted: '%s/copy/etc/logo.png'" % pbucket(2) ],
476
476
  must_not_find = [ "blah.txt" ])
477
477
 
@@ -485,7 +485,7 @@ test_s3cmd("Put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/x
485
485
 
486
486
  ## ====== Sync symbolic links
487
487
  test_s3cmd("Sync symbolic links", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--follow-symlinks' ],
488
- must_find = ["remote copy: etc2/Logo.PNG -> etc/linked.png"],
488
+ must_find = ["File 'testsuite/etc/linked.png' stored as '%s/xyz/etc/linked.png'" % pbucket(1)],
489
489
  # Don't want to recursively copy linked directories!
490
490
  must_not_find_re = ["etc/more/linked-dir/more/give-me-more.txt",
491
491
  "etc/brokenlink.png"],
@@ -23,9 +23,6 @@ import locale
23
23
  import subprocess
24
24
  import htmlentitydefs
25
25
  import socket
26
- import shutil
27
- import tempfile
28
- import S3.Exceptions
29
26
 
30
27
  from copy import copy
31
28
  from optparse import OptionParser, Option, OptionValueError, IndentedHelpFormatter
@@ -34,7 +31,6 @@ from distutils.spawn import find_executable
34
31
 
35
32
  def output(message):
36
33
  sys.stdout.write(message + "\n")
37
- sys.stdout.flush()
38
34
 
39
35
  def check_args_type(args, type, verbose_type):
40
36
  for arg in args:
@@ -69,28 +65,18 @@ def subcmd_bucket_usage(s3, uri):
69
65
 
70
66
  if object.endswith('*'):
71
67
  object = object[:-1]
72
-
68
+ try:
69
+ response = s3.bucket_list(bucket, prefix = object, recursive = True)
70
+ except S3Error, e:
71
+ if S3.codes.has_key(e.info["Code"]):
72
+ error(S3.codes[e.info["Code"]] % bucket)
73
+ return
74
+ else:
75
+ raise
73
76
  bucket_size = 0
74
- # iterate and store directories to traverse, while summing objects:
75
- dirs = [object]
76
- while dirs:
77
- try:
78
- response = s3.bucket_list(bucket, prefix=dirs.pop())
79
- except S3Error, e:
80
- if S3.codes.has_key(e.info["Code"]):
81
- error(S3.codes[e.info["Code"]] % bucket)
82
- return
83
- else:
84
- raise
85
-
86
- # objects in the current scope:
87
- for obj in response["list"]:
88
- bucket_size += int(obj["Size"])
89
-
90
- # directories found in current scope:
91
- for obj in response["common_prefixes"]:
92
- dirs.append(obj["Prefix"])
93
-
77
+ for object in response["list"]:
78
+ size, size_coeff = formatSize(object["Size"], False)
79
+ bucket_size += size
94
80
  total_size, size_coeff = formatSize(bucket_size, Config().human_readable_sizes)
95
81
  total_size_str = str(total_size) + size_coeff
96
82
  output(u"%s %s" % (total_size_str.ljust(8), uri))
@@ -280,11 +266,7 @@ def cmd_object_put(args):
280
266
  info(u"Summary: %d local files to upload" % local_count)
281
267
 
282
268
  if local_count > 0:
283
- if not single_file_local and '-' in local_list.keys():
284
- raise ParameterError("Cannot specify multiple local files if uploading from '-' (ie stdin)")
285
- elif single_file_local and local_list.keys()[0] == "-" and destination_base.endswith("/"):
286
- raise ParameterError("Destination S3 URI must not end with '/' when uploading from stdin.")
287
- elif not destination_base.endswith("/"):
269
+ if not destination_base.endswith("/"):
288
270
  if not single_file_local:
289
271
  raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
290
272
  local_list[local_list.keys()[0]]['remote_uri'] = unicodise(destination_base)
@@ -296,13 +278,9 @@ def cmd_object_put(args):
296
278
  for key in exclude_list:
297
279
  output(u"exclude: %s" % unicodise(key))
298
280
  for key in local_list:
299
- if key != "-":
300
- nicekey = local_list[key]['full_name_unicode']
301
- else:
302
- nicekey = "<stdin>"
303
- output(u"upload: %s -> %s" % (nicekey, local_list[key]['remote_uri']))
281
+ output(u"upload: %s -> %s" % (local_list[key]['full_name_unicode'], local_list[key]['remote_uri']))
304
282
 
305
- warning(u"Exiting now because of --dry-run")
283
+ warning(u"Exitting now because of --dry-run")
306
284
  return
307
285
 
308
286
  seq = 0
@@ -415,7 +393,7 @@ def cmd_object_get(args):
415
393
  for key in remote_list:
416
394
  output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
417
395
 
418
- warning(u"Exiting now because of --dry-run")
396
+ warning(u"Exitting now because of --dry-run")
419
397
  return
420
398
 
421
399
  seq = 0
@@ -462,14 +440,7 @@ def cmd_object_get(args):
462
440
  except IOError, e:
463
441
  error(u"Skipping %s: %s" % (destination, e.strerror))
464
442
  continue
465
- try:
466
- response = s3.object_get(uri, dst_stream, start_position = start_position, extra_label = seq_label)
467
- except S3Error, e:
468
- if not file_exists: # Delete, only if file didn't exist before!
469
- debug(u"object_get failed for '%s', deleting..." % (destination,))
470
- os.unlink(destination)
471
- raise
472
-
443
+ response = s3.object_get(uri, dst_stream, start_position = start_position, extra_label = seq_label)
473
444
  if response["headers"].has_key("x-amz-meta-s3tools-gpgenc"):
474
445
  gpg_decrypt(destination, response["headers"]["x-amz-meta-s3tools-gpgenc"])
475
446
  response["size"] = os.stat(destination)[6]
@@ -477,9 +448,6 @@ def cmd_object_get(args):
477
448
  speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
478
449
  output(u"File %s saved as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s)" %
479
450
  (uri, destination, response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1]))
480
- if Config().delete_after_fetch:
481
- s3.object_delete(uri)
482
- output(u"File %s removed after fetch" % (uri))
483
451
 
484
452
  def cmd_object_del(args):
485
453
  for uri_str in args:
@@ -512,7 +480,7 @@ def subcmd_object_del_uri(uri_str, recursive = None):
512
480
  for key in remote_list:
513
481
  output(u"delete: %s" % remote_list[key]['object_uri_str'])
514
482
 
515
- warning(u"Exiting now because of --dry-run")
483
+ warning(u"Exitting now because of --dry-run")
516
484
  return
517
485
 
518
486
  for key in remote_list:
@@ -553,7 +521,7 @@ def subcmd_cp_mv(args, process_fce, action_str, message):
553
521
  for key in remote_list:
554
522
  output(u"%s: %s -> %s" % (action_str, remote_list[key]['object_uri_str'], remote_list[key]['dest_name']))
555
523
 
556
- warning(u"Exiting now because of --dry-run")
524
+ warning(u"Exitting now because of --dry-run")
557
525
  return
558
526
 
559
527
  seq = 0
@@ -602,18 +570,10 @@ def cmd_info(args):
602
570
  output(u" Location: %s" % info['bucket-location'])
603
571
  acl = s3.get_acl(uri)
604
572
  acl_grant_list = acl.getGrantList()
605
-
606
- try:
607
- policy = s3.get_policy(uri)
608
- output(u" policy: %s" % policy)
609
- except:
610
- output(u" policy: none")
611
-
612
573
  for grant in acl_grant_list:
613
574
  output(u" ACL: %s: %s" % (grant['grantee'], grant['permission']))
614
575
  if acl.isAnonRead():
615
576
  output(u" URL: %s" % uri.public_url())
616
-
617
577
  except S3Error, e:
618
578
  if S3.codes.has_key(e.info["Code"]):
619
579
  error(S3.codes[e.info["Code"]] % uri.bucket())
@@ -622,17 +582,6 @@ def cmd_info(args):
622
582
  raise
623
583
 
624
584
  def cmd_sync_remote2remote(args):
625
- def _do_deletes(s3, dst_list):
626
- # Delete items in destination that are not in source
627
- if cfg.dry_run:
628
- for key in dst_list:
629
- output(u"delete: %s" % dst_list[key]['object_uri_str'])
630
- else:
631
- for key in dst_list:
632
- uri = S3Uri(dst_list[key]['object_uri_str'])
633
- s3.object_delete(uri)
634
- output(u"deleted: '%s'" % uri)
635
-
636
585
  s3 = S3(Config())
637
586
 
638
587
  # Normalise s3://uri (e.g. assert trailing slash)
@@ -648,19 +597,17 @@ def cmd_sync_remote2remote(args):
648
597
 
649
598
  src_list, exclude_list = filter_exclude_include(src_list)
650
599
 
651
- src_list, dst_list, update_list, copy_pairs = compare_filelists(src_list, dst_list, src_remote = True, dst_remote = True, delay_updates = cfg.delay_updates)
600
+ src_list, dst_list, existing_list = compare_filelists(src_list, dst_list, src_remote = True, dst_remote = True)
652
601
 
653
602
  src_count = len(src_list)
654
- update_count = len(update_list)
655
603
  dst_count = len(dst_list)
656
604
 
657
605
  print(u"Summary: %d source files to copy, %d files at destination to delete" % (src_count, dst_count))
658
606
 
659
- ### Populate 'target_uri' only if we've got something to sync from src to dst
660
- for key in src_list:
661
- src_list[key]['target_uri'] = destination_base + key
662
- for key in update_list:
663
- update_list[key]['target_uri'] = destination_base + key
607
+ if src_count > 0:
608
+ ### Populate 'remote_uri' only if we've got something to sync from src to dst
609
+ for key in src_list:
610
+ src_list[key]['target_uri'] = destination_base + key
664
611
 
665
612
  if cfg.dry_run:
666
613
  for key in exclude_list:
@@ -670,42 +617,37 @@ def cmd_sync_remote2remote(args):
670
617
  output(u"delete: %s" % dst_list[key]['object_uri_str'])
671
618
  for key in src_list:
672
619
  output(u"Sync: %s -> %s" % (src_list[key]['object_uri_str'], src_list[key]['target_uri']))
673
- warning(u"Exiting now because of --dry-run")
620
+ warning(u"Exitting now because of --dry-run")
674
621
  return
675
622
 
676
- # if there are copy pairs, we can't do delete_before, on the chance
677
- # we need one of the to-be-deleted files as a copy source.
678
- if len(copy_pairs) > 0:
679
- cfg.delete_after = True
680
-
681
623
  # Delete items in destination that are not in source
682
- if cfg.delete_removed and not cfg.delete_after:
683
- _do_deletes(s3, dst_list)
684
-
685
- def _upload(src_list, seq, src_count):
686
- file_list = src_list.keys()
687
- file_list.sort()
688
- for file in file_list:
689
- seq += 1
690
- item = src_list[file]
691
- src_uri = S3Uri(item['object_uri_str'])
692
- dst_uri = S3Uri(item['target_uri'])
693
- seq_label = "[%d of %d]" % (seq, src_count)
694
- extra_headers = copy(cfg.extra_headers)
695
- try:
696
- response = s3.object_copy(src_uri, dst_uri, extra_headers)
697
- output("File %(src)s copied to %(dst)s" % { "src" : src_uri, "dst" : dst_uri })
698
- except S3Error, e:
699
- error("File %(src)s could not be copied: %(e)s" % { "src" : src_uri, "e" : e })
700
- return seq
624
+ if cfg.delete_removed:
625
+ if cfg.dry_run:
626
+ for key in dst_list:
627
+ output(u"delete: %s" % dst_list[key]['object_uri_str'])
628
+ else:
629
+ for key in dst_list:
630
+ uri = S3Uri(dst_list[key]['object_uri_str'])
631
+ s3.object_delete(uri)
632
+ output(u"deleted: '%s'" % uri)
701
633
 
702
634
  # Perform the synchronization of files
703
635
  timestamp_start = time.time()
704
636
  seq = 0
705
- seq = _upload(src_list, seq, src_count + update_count)
706
- seq = _upload(update_list, seq, src_count + update_count)
707
- n_copied, bytes_saved = remote_copy(s3, copy_pairs, destination_base)
708
-
637
+ file_list = src_list.keys()
638
+ file_list.sort()
639
+ for file in file_list:
640
+ seq += 1
641
+ item = src_list[file]
642
+ src_uri = S3Uri(item['object_uri_str'])
643
+ dst_uri = S3Uri(item['target_uri'])
644
+ seq_label = "[%d of %d]" % (seq, src_count)
645
+ extra_headers = copy(cfg.extra_headers)
646
+ try:
647
+ response = s3.object_copy(src_uri, dst_uri, extra_headers)
648
+ output("File %(src)s copied to %(dst)s" % { "src" : src_uri, "dst" : dst_uri })
649
+ except S3Error, e:
650
+ error("File %(src)s could not be copied: %(e)s" % { "src" : src_uri, "e" : e })
709
651
  total_elapsed = time.time() - timestamp_start
710
652
  outstr = "Done. Copied %d files in %0.1f seconds, %0.2f files/s" % (seq, total_elapsed, seq/total_elapsed)
711
653
  if seq > 0:
@@ -713,15 +655,13 @@ def cmd_sync_remote2remote(args):
713
655
  else:
714
656
  info(outstr)
715
657
 
716
- # Delete items in destination that are not in source
717
- if cfg.delete_removed and cfg.delete_after:
718
- _do_deletes(s3, dst_list)
719
-
720
658
  def cmd_sync_remote2local(args):
721
- def _do_deletes(local_list):
722
- for key in local_list:
723
- os.unlink(local_list[key]['full_name'])
724
- output(u"deleted: %s" % local_list[key]['full_name_unicode'])
659
+ def _parse_attrs_header(attrs_header):
660
+ attrs = {}
661
+ for attr in attrs_header.split("/"):
662
+ key, val = attr.split(":")
663
+ attrs[key] = val
664
+ return attrs
725
665
 
726
666
  s3 = S3(Config())
727
667
 
@@ -736,34 +676,26 @@ def cmd_sync_remote2local(args):
736
676
 
737
677
  remote_list, exclude_list = filter_exclude_include(remote_list)
738
678
 
739
- remote_list, local_list, update_list, copy_pairs = compare_filelists(remote_list, local_list, src_remote = True, dst_remote = False, delay_updates = cfg.delay_updates)
679
+ remote_list, local_list, existing_list = compare_filelists(remote_list, local_list, src_remote = True, dst_remote = False)
740
680
 
741
681
  local_count = len(local_list)
742
682
  remote_count = len(remote_list)
743
- update_count = len(update_list)
744
- copy_pairs_count = len(copy_pairs)
745
-
746
- info(u"Summary: %d remote files to download, %d local files to delete, %d local files to hardlink" % (remote_count + update_count, local_count, copy_pairs_count))
747
683
 
748
- def _set_local_filename(remote_list, destination_base):
749
- if len(remote_list) == 0:
750
- return
751
- if not os.path.isdir(destination_base):
752
- ## We were either given a file name (existing or not) or want STDOUT
753
- if len(remote_list) > 1:
754
- raise ParameterError("Destination must be a directory when downloading multiple sources.")
755
- remote_list[remote_list.keys()[0]]['local_filename'] = deunicodise(destination_base)
756
- else:
757
- if destination_base[-1] != os.path.sep:
758
- destination_base += os.path.sep
759
- for key in remote_list:
760
- local_filename = destination_base + key
761
- if os.path.sep != "/":
762
- local_filename = os.path.sep.join(local_filename.split("/"))
763
- remote_list[key]['local_filename'] = deunicodise(local_filename)
684
+ info(u"Summary: %d remote files to download, %d local files to delete" % (remote_count, local_count))
764
685
 
765
- _set_local_filename(remote_list, destination_base)
766
- _set_local_filename(update_list, destination_base)
686
+ if not os.path.isdir(destination_base):
687
+ ## We were either given a file name (existing or not) or want STDOUT
688
+ if remote_count > 1:
689
+ raise ParameterError("Destination must be a directory when downloading multiple sources.")
690
+ remote_list[remote_list.keys()[0]]['local_filename'] = deunicodise(destination_base)
691
+ else:
692
+ if destination_base[-1] != os.path.sep:
693
+ destination_base += os.path.sep
694
+ for key in remote_list:
695
+ local_filename = destination_base + key
696
+ if os.path.sep != "/":
697
+ local_filename = os.path.sep.join(local_filename.split("/"))
698
+ remote_list[key]['local_filename'] = deunicodise(local_filename)
767
699
 
768
700
  if cfg.dry_run:
769
701
  for key in exclude_list:
@@ -772,128 +704,94 @@ def cmd_sync_remote2local(args):
772
704
  for key in local_list:
773
705
  output(u"delete: %s" % local_list[key]['full_name_unicode'])
774
706
  for key in remote_list:
775
- output(u"download: %s -> %s" % (unicodise(remote_list[key]['object_uri_str']), unicodise(remote_list[key]['local_filename'])))
776
- for key in update_list:
777
- output(u"download: %s -> %s" % (update_list[key]['object_uri_str'], update_list[key]['local_filename']))
707
+ output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
778
708
 
779
- warning(u"Exiting now because of --dry-run")
709
+ warning(u"Exitting now because of --dry-run")
780
710
  return
781
711
 
782
- # if there are copy pairs, we can't do delete_before, on the chance
783
- # we need one of the to-be-deleted files as a copy source.
784
- if len(copy_pairs) > 0:
785
- cfg.delete_after = True
786
-
787
- if cfg.delete_removed and not cfg.delete_after:
788
- _do_deletes(local_list)
789
-
790
- def _download(remote_list, seq, total, total_size, dir_cache):
791
- file_list = remote_list.keys()
792
- file_list.sort()
793
- for file in file_list:
794
- seq += 1
795
- item = remote_list[file]
796
- uri = S3Uri(item['object_uri_str'])
797
- dst_file = item['local_filename']
798
- seq_label = "[%d of %d]" % (seq, total)
799
- try:
800
- dst_dir = os.path.dirname(dst_file)
801
- if not dir_cache.has_key(dst_dir):
802
- dir_cache[dst_dir] = Utils.mkdir_with_parents(dst_dir)
803
- if dir_cache[dst_dir] == False:
804
- warning(u"%s: destination directory not writable: %s" % (file, dst_dir))
805
- continue
806
- try:
807
- debug(u"dst_file=%s" % unicodise(dst_file))
808
- # create temporary files (of type .s3cmd.XXXX.tmp) in the same directory
809
- # for downloading and then rename once downloaded
810
- chkptfd, chkptfname = tempfile.mkstemp(".tmp",".s3cmd.",os.path.dirname(dst_file))
811
- debug(u"created chkptfname=%s" % unicodise(chkptfname))
812
- dst_stream = os.fdopen(chkptfd, "wb")
813
- response = s3.object_get(uri, dst_stream, extra_label = seq_label)
814
- dst_stream.close()
815
- # download completed, rename the file to destination
816
- os.rename(chkptfname, dst_file)
817
-
818
- # set permissions on destination file
819
- original_umask = os.umask(0);
820
- os.umask(original_umask);
821
- mode = 0777 - original_umask;
822
- debug(u"mode=%s" % oct(mode))
823
-
824
- os.chmod(dst_file, mode);
825
-
826
- debug(u"renamed chkptfname=%s to dst_file=%s" % (unicodise(chkptfname), unicodise(dst_file)))
827
- if response['headers'].has_key('x-amz-meta-s3cmd-attrs') and cfg.preserve_attrs:
828
- attrs = parse_attrs_header(response['headers']['x-amz-meta-s3cmd-attrs'])
829
- if attrs.has_key('mode'):
830
- os.chmod(dst_file, int(attrs['mode']))
831
- if attrs.has_key('mtime') or attrs.has_key('atime'):
832
- mtime = attrs.has_key('mtime') and int(attrs['mtime']) or int(time.time())
833
- atime = attrs.has_key('atime') and int(attrs['atime']) or int(time.time())
834
- os.utime(dst_file, (atime, mtime))
835
- ## FIXME: uid/gid / uname/gname handling comes here! TODO
836
- except OSError, e:
837
- try:
838
- dst_stream.close()
839
- os.remove(chkptfname)
840
- except: pass
841
- if e.errno == errno.EEXIST:
842
- warning(u"%s exists - not overwriting" % (dst_file))
843
- continue
844
- if e.errno in (errno.EPERM, errno.EACCES):
845
- warning(u"%s not writable: %s" % (dst_file, e.strerror))
846
- continue
847
- if e.errno == errno.EISDIR:
848
- warning(u"%s is a directory - skipping over" % dst_file)
849
- continue
850
- raise e
851
- except KeyboardInterrupt:
852
- try:
853
- dst_stream.close()
854
- os.remove(chkptfname)
855
- except: pass
856
- warning(u"Exiting after keyboard interrupt")
857
- return
858
- except Exception, e:
859
- try:
860
- dst_stream.close()
861
- os.remove(chkptfname)
862
- except: pass
863
- error(u"%s: %s" % (file, e))
864
- continue
865
- # We have to keep repeating this call because
866
- # Python 2.4 doesn't support try/except/finally
867
- # construction :-(
868
- try:
869
- dst_stream.close()
870
- os.remove(chkptfname)
871
- except: pass
872
- except S3DownloadError, e:
873
- error(u"%s: download failed too many times. Skipping that file." % file)
874
- continue
875
- speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
876
- if not Config().progress_meter:
877
- output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
878
- (uri, unicodise(dst_file), response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1],
879
- seq_label))
880
- total_size += response["size"]
881
- if Config().delete_after_fetch:
882
- s3.object_delete(uri)
883
- output(u"File '%s' removed after syncing" % (uri))
884
- return seq, total_size
712
+ if cfg.delete_removed:
713
+ for key in local_list:
714
+ os.unlink(local_list[key]['full_name'])
715
+ output(u"deleted: %s" % local_list[key]['full_name_unicode'])
885
716
 
886
717
  total_size = 0
887
718
  total_elapsed = 0.0
888
719
  timestamp_start = time.time()
889
- dir_cache = {}
890
720
  seq = 0
891
- seq, total_size = _download(remote_list, seq, remote_count + update_count, total_size, dir_cache)
892
- seq, total_size = _download(update_list, seq, remote_count + update_count, total_size, dir_cache)
893
-
894
- failed_copy_list = local_copy(copy_pairs, destination_base)
895
- _set_local_filename(failed_copy_list, destination_base)
896
- seq, total_size = _download(failed_copy_list, seq, len(failed_copy_list) + remote_count + update_count, total_size, dir_cache)
721
+ dir_cache = {}
722
+ file_list = remote_list.keys()
723
+ file_list.sort()
724
+ for file in file_list:
725
+ seq += 1
726
+ item = remote_list[file]
727
+ uri = S3Uri(item['object_uri_str'])
728
+ dst_file = item['local_filename']
729
+ seq_label = "[%d of %d]" % (seq, remote_count)
730
+ try:
731
+ dst_dir = os.path.dirname(dst_file)
732
+ if not dir_cache.has_key(dst_dir):
733
+ dir_cache[dst_dir] = Utils.mkdir_with_parents(dst_dir)
734
+ if dir_cache[dst_dir] == False:
735
+ warning(u"%s: destination directory not writable: %s" % (file, dst_dir))
736
+ continue
737
+ try:
738
+ open_flags = os.O_CREAT
739
+ open_flags |= os.O_TRUNC
740
+ # open_flags |= os.O_EXCL
741
+
742
+ debug(u"dst_file=%s" % unicodise(dst_file))
743
+ # This will have failed should the file exist
744
+ os.close(os.open(dst_file, open_flags))
745
+ # Yeah I know there is a race condition here. Sadly I don't know how to open() in exclusive mode.
746
+ dst_stream = open(dst_file, "wb")
747
+ response = s3.object_get(uri, dst_stream, extra_label = seq_label)
748
+ dst_stream.close()
749
+ if response['headers'].has_key('x-amz-meta-s3cmd-attrs') and cfg.preserve_attrs:
750
+ attrs = _parse_attrs_header(response['headers']['x-amz-meta-s3cmd-attrs'])
751
+ if attrs.has_key('mode'):
752
+ os.chmod(dst_file, int(attrs['mode']))
753
+ if attrs.has_key('mtime') or attrs.has_key('atime'):
754
+ mtime = attrs.has_key('mtime') and int(attrs['mtime']) or int(time.time())
755
+ atime = attrs.has_key('atime') and int(attrs['atime']) or int(time.time())
756
+ os.utime(dst_file, (atime, mtime))
757
+ ## FIXME: uid/gid / uname/gname handling comes here! TODO
758
+ except OSError, e:
759
+ try: dst_stream.close()
760
+ except: pass
761
+ if e.errno == errno.EEXIST:
762
+ warning(u"%s exists - not overwriting" % (dst_file))
763
+ continue
764
+ if e.errno in (errno.EPERM, errno.EACCES):
765
+ warning(u"%s not writable: %s" % (dst_file, e.strerror))
766
+ continue
767
+ if e.errno == errno.EISDIR:
768
+ warning(u"%s is a directory - skipping over" % dst_file)
769
+ continue
770
+ raise e
771
+ except KeyboardInterrupt:
772
+ try: dst_stream.close()
773
+ except: pass
774
+ warning(u"Exiting after keyboard interrupt")
775
+ return
776
+ except Exception, e:
777
+ try: dst_stream.close()
778
+ except: pass
779
+ error(u"%s: %s" % (file, e))
780
+ continue
781
+ # We have to keep repeating this call because
782
+ # Python 2.4 doesn't support try/except/finally
783
+ # construction :-(
784
+ try: dst_stream.close()
785
+ except: pass
786
+ except S3DownloadError, e:
787
+ error(u"%s: download failed too many times. Skipping that file." % file)
788
+ continue
789
+ speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
790
+ if not Config().progress_meter:
791
+ output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
792
+ (uri, unicodise(dst_file), response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1],
793
+ seq_label))
794
+ total_size += response["size"]
897
795
 
898
796
  total_elapsed = time.time() - timestamp_start
899
797
  speed_fmt = formatSize(total_size/total_elapsed, human_readable = True, floating_point = True)
@@ -906,282 +804,158 @@ def cmd_sync_remote2local(args):
906
804
  else:
907
805
  info(outstr)
908
806
 
909
- if cfg.delete_removed and cfg.delete_after:
910
- _do_deletes(local_list)
911
-
912
- def local_copy(copy_pairs, destination_base):
913
- # Do NOT hardlink local files by default, that'd be silly
914
- # For instance all empty files would become hardlinked together!
915
-
916
- failed_copy_list = FileDict()
917
- for (src_obj, dst1, relative_file) in copy_pairs:
918
- src_file = os.path.join(destination_base, dst1)
919
- dst_file = os.path.join(destination_base, relative_file)
920
- dst_dir = os.path.dirname(dst_file)
921
- try:
922
- if not os.path.isdir(dst_dir):
923
- debug("MKDIR %s" % dst_dir)
924
- os.makedirs(dst_dir)
925
- debug(u"Copying %s to %s" % (src_file, dst_file))
926
- shutil.copy2(src_file, dst_file)
927
- except (IOError, OSError), e:
928
- warning(u'Unable to hardlink or copy files %s -> %s: %s' % (src_file, dst_file, e))
929
- failed_copy_list[relative_file] = src_obj
930
- return failed_copy_list
931
-
932
- def remote_copy(s3, copy_pairs, destination_base):
933
- saved_bytes = 0
934
- for (src_obj, dst1, dst2) in copy_pairs:
935
- debug(u"Remote Copying from %s to %s" % (dst1, dst2))
936
- dst1_uri = S3Uri(destination_base + dst1)
937
- dst2_uri = S3Uri(destination_base + dst2)
938
- extra_headers = copy(cfg.extra_headers)
939
- try:
940
- s3.object_copy(dst1_uri, dst2_uri, extra_headers)
941
- info = s3.object_info(dst2_uri)
942
- saved_bytes = saved_bytes + int(info['headers']['content-length'])
943
- output(u"remote copy: %s -> %s" % (dst1, dst2))
944
- except:
945
- raise
946
- return (len(copy_pairs), saved_bytes)
947
-
948
-
949
807
  def cmd_sync_local2remote(args):
950
- def _build_attr_header(local_list, src):
808
+ def _build_attr_header(src):
951
809
  import pwd, grp
952
810
  attrs = {}
811
+ src = deunicodise(src)
812
+ try:
813
+ st = os.stat_result(os.stat(src))
814
+ except OSError, e:
815
+ raise InvalidFileError(u"%s: %s" % (unicodise(src), e.strerror))
953
816
  for attr in cfg.preserve_attrs_list:
954
817
  if attr == 'uname':
955
818
  try:
956
- val = pwd.getpwuid(local_list[src]['uid']).pw_name
819
+ val = pwd.getpwuid(st.st_uid).pw_name
957
820
  except KeyError:
958
821
  attr = "uid"
959
- val = local_list[src].get('uid')
960
- warning(u"%s: Owner username not known. Storing UID=%d instead." % (src, val))
822
+ val = st.st_uid
823
+ warning(u"%s: Owner username not known. Storing UID=%d instead." % (unicodise(src), val))
961
824
  elif attr == 'gname':
962
825
  try:
963
- val = grp.getgrgid(local_list[src].get('gid')).gr_name
826
+ val = grp.getgrgid(st.st_gid).gr_name
964
827
  except KeyError:
965
828
  attr = "gid"
966
- val = local_list[src].get('gid')
967
- warning(u"%s: Owner groupname not known. Storing GID=%d instead." % (src, val))
968
- elif attr == 'md5':
969
- try:
970
- val = local_list.get_md5(src)
971
- except IOError:
972
- val = None
829
+ val = st.st_gid
830
+ warning(u"%s: Owner groupname not known. Storing GID=%d instead." % (unicodise(src), val))
973
831
  else:
974
- val = getattr(local_list[src]['sr'], 'st_' + attr)
832
+ val = getattr(st, 'st_' + attr)
975
833
  attrs[attr] = val
976
-
977
- if 'md5' in attrs and attrs['md5'] is None:
978
- del attrs['md5']
979
-
980
834
  result = ""
981
835
  for k in attrs: result += "%s:%s/" % (k, attrs[k])
982
836
  return { 'x-amz-meta-s3cmd-attrs' : result[:-1] }
983
837
 
984
- def _do_deletes(s3, remote_list):
985
- for key in remote_list:
986
- uri = S3Uri(remote_list[key]['object_uri_str'])
987
- s3.object_delete(uri)
988
- output(u"deleted: '%s'" % uri)
989
-
990
- def _single_process(local_list):
991
- for dest in destinations:
992
- ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
993
- destination_base_uri = S3Uri(dest)
994
- if destination_base_uri.type != 's3':
995
- raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
996
- destination_base = str(destination_base_uri)
997
- _child(destination_base, local_list)
998
- return destination_base_uri
999
-
1000
- def _parent():
1001
- # Now that we've done all the disk I/O to look at the local file system and
1002
- # calculate the md5 for each file, fork for each destination to upload to them separately
1003
- # and in parallel
1004
- child_pids = []
1005
-
1006
- for dest in destinations:
1007
- ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
1008
- destination_base_uri = S3Uri(dest)
1009
- if destination_base_uri.type != 's3':
1010
- raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
1011
- destination_base = str(destination_base_uri)
1012
- child_pid = os.fork()
1013
- if child_pid == 0:
1014
- _child(destination_base, local_list)
1015
- os._exit(0)
1016
- else:
1017
- child_pids.append(child_pid)
1018
-
1019
- while len(child_pids):
1020
- (pid, status) = os.wait()
1021
- child_pids.remove(pid)
1022
-
1023
- return
838
+ s3 = S3(cfg)
1024
839
 
1025
- def _child(destination_base, local_list):
1026
- def _set_remote_uri(local_list, destination_base, single_file_local):
1027
- if len(local_list) > 0:
1028
- ## Populate 'remote_uri' only if we've got something to upload
1029
- if not destination_base.endswith("/"):
1030
- if not single_file_local:
1031
- raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
1032
- local_list[local_list.keys()[0]]['remote_uri'] = unicodise(destination_base)
1033
- else:
1034
- for key in local_list:
1035
- local_list[key]['remote_uri'] = unicodise(destination_base + key)
1036
-
1037
- def _upload(local_list, seq, total, total_size):
1038
- file_list = local_list.keys()
1039
- file_list.sort()
1040
- for file in file_list:
1041
- seq += 1
1042
- item = local_list[file]
1043
- src = item['full_name']
1044
- uri = S3Uri(item['remote_uri'])
1045
- seq_label = "[%d of %d]" % (seq, total)
1046
- extra_headers = copy(cfg.extra_headers)
1047
- try:
1048
- if cfg.preserve_attrs:
1049
- attr_header = _build_attr_header(local_list, file)
1050
- debug(u"attr_header: %s" % attr_header)
1051
- extra_headers.update(attr_header)
1052
- response = s3.object_put(src, uri, extra_headers, extra_label = seq_label)
1053
- except InvalidFileError, e:
1054
- warning(u"File can not be uploaded: %s" % e)
1055
- continue
1056
- except S3UploadError, e:
1057
- error(u"%s: upload failed too many times. Skipping that file." % item['full_name_unicode'])
1058
- continue
1059
- speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
1060
- if not cfg.progress_meter:
1061
- output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
1062
- (item['full_name_unicode'], uri, response["size"], response["elapsed"],
1063
- speed_fmt[0], speed_fmt[1], seq_label))
1064
- total_size += response["size"]
1065
- uploaded_objects_list.append(uri.object())
1066
- return seq, total_size
840
+ if cfg.encrypt:
841
+ error(u"S3cmd 'sync' doesn't yet support GPG encryption, sorry.")
842
+ error(u"Either use unconditional 's3cmd put --recursive'")
843
+ error(u"or disable encryption with --no-encrypt parameter.")
844
+ sys.exit(1)
1067
845
 
1068
- remote_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True)
846
+ ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
847
+ destination_base_uri = S3Uri(args[-1])
848
+ if destination_base_uri.type != 's3':
849
+ raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
850
+ destination_base = str(destination_base_uri)
1069
851
 
1070
- local_count = len(local_list)
1071
- remote_count = len(remote_list)
852
+ local_list, single_file_local = fetch_local_list(args[:-1], recursive = True)
853
+ remote_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True)
1072
854
 
1073
- info(u"Found %d local files, %d remote files" % (local_count, remote_count))
855
+ local_count = len(local_list)
856
+ remote_count = len(remote_list)
1074
857
 
1075
- local_list, exclude_list = filter_exclude_include(local_list)
858
+ info(u"Found %d local files, %d remote files" % (local_count, remote_count))
1076
859
 
1077
- if single_file_local and len(local_list) == 1 and len(remote_list) == 1:
1078
- ## Make remote_key same as local_key for comparison if we're dealing with only one file
1079
- remote_list_entry = remote_list[remote_list.keys()[0]]
1080
- # Flush remote_list, by the way
1081
- remote_list = FileDict()
1082
- remote_list[local_list.keys()[0]] = remote_list_entry
860
+ local_list, exclude_list = filter_exclude_include(local_list)
1083
861
 
1084
- local_list, remote_list, update_list, copy_pairs = compare_filelists(local_list, remote_list, src_remote = False, dst_remote = True, delay_updates = cfg.delay_updates)
862
+ if single_file_local and len(local_list) == 1 and len(remote_list) == 1:
863
+ ## Make remote_key same as local_key for comparison if we're dealing with only one file
864
+ remote_list_entry = remote_list[remote_list.keys()[0]]
865
+ # Flush remote_list, by the way
866
+ remote_list = { local_list.keys()[0] : remote_list_entry }
1085
867
 
1086
- local_count = len(local_list)
1087
- update_count = len(update_list)
1088
- copy_count = len(copy_pairs)
1089
- remote_count = len(remote_list)
868
+ local_list, remote_list, existing_list = compare_filelists(local_list, remote_list, src_remote = False, dst_remote = True)
1090
869
 
1091
- info(u"Summary: %d local files to upload, %d files to remote copy, %d remote files to delete" % (local_count + update_count, copy_count, remote_count))
870
+ local_count = len(local_list)
871
+ remote_count = len(remote_list)
1092
872
 
1093
- _set_remote_uri(local_list, destination_base, single_file_local)
1094
- _set_remote_uri(update_list, destination_base, single_file_local)
873
+ info(u"Summary: %d local files to upload, %d remote files to delete" % (local_count, remote_count))
1095
874
 
1096
- if cfg.dry_run:
1097
- for key in exclude_list:
1098
- output(u"exclude: %s" % unicodise(key))
875
+ if local_count > 0:
876
+ ## Populate 'remote_uri' only if we've got something to upload
877
+ if not destination_base.endswith("/"):
878
+ if not single_file_local:
879
+ raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
880
+ local_list[local_list.keys()[0]]['remote_uri'] = unicodise(destination_base)
881
+ else:
1099
882
  for key in local_list:
1100
- output(u"upload: %s -> %s" % (local_list[key]['full_name_unicode'], local_list[key]['remote_uri']))
1101
- for key in update_list:
1102
- output(u"upload: %s -> %s" % (update_list[key]['full_name_unicode'], update_list[key]['remote_uri']))
1103
- for (src_obj, dst1, dst2) in copy_pairs:
1104
- output(u"remote copy: %s -> %s" % (dst1, dst2))
1105
- if cfg.delete_removed:
1106
- for key in remote_list:
1107
- output(u"delete: %s" % remote_list[key]['object_uri_str'])
1108
-
1109
- warning(u"Exiting now because of --dry-run")
1110
- return
883
+ local_list[key]['remote_uri'] = unicodise(destination_base + key)
1111
884
 
1112
- # if there are copy pairs, we can't do delete_before, on the chance
1113
- # we need one of the to-be-deleted files as a copy source.
1114
- if len(copy_pairs) > 0:
1115
- cfg.delete_after = True
1116
-
1117
- if cfg.delete_removed and not cfg.delete_after:
1118
- _do_deletes(s3, remote_list)
1119
-
1120
- total_size = 0
1121
- total_elapsed = 0.0
1122
- timestamp_start = time.time()
1123
- n, total_size = _upload(local_list, 0, local_count, total_size)
1124
- n, total_size = _upload(update_list, n, local_count, total_size)
1125
- n_copies, saved_bytes = remote_copy(s3, copy_pairs, destination_base)
1126
- if cfg.delete_removed and cfg.delete_after:
1127
- _do_deletes(s3, remote_list)
1128
- total_elapsed = time.time() - timestamp_start
1129
- total_speed = total_elapsed and total_size/total_elapsed or 0.0
1130
- speed_fmt = formatSize(total_speed, human_readable = True, floating_point = True)
1131
-
1132
- # Only print out the result if any work has been done or
1133
- # if the user asked for verbose output
1134
- outstr = "Done. Uploaded %d bytes in %0.1f seconds, %0.2f %sB/s. Copied %d files saving %d bytes transfer." % (total_size, total_elapsed, speed_fmt[0], speed_fmt[1], n_copies, saved_bytes)
1135
- if total_size + saved_bytes > 0:
1136
- output(outstr)
1137
- else:
1138
- info(outstr)
885
+ if cfg.dry_run:
886
+ for key in exclude_list:
887
+ output(u"exclude: %s" % unicodise(key))
888
+ if cfg.delete_removed:
889
+ for key in remote_list:
890
+ output(u"delete: %s" % remote_list[key]['object_uri_str'])
891
+ for key in local_list:
892
+ output(u"upload: %s -> %s" % (local_list[key]['full_name_unicode'], local_list[key]['remote_uri']))
1139
893
 
894
+ warning(u"Exitting now because of --dry-run")
1140
895
  return
1141
896
 
1142
- def _invalidate_on_cf(destination_base_uri):
1143
- cf = CloudFront(cfg)
1144
- default_index_file = None
1145
- if cfg.invalidate_default_index_on_cf or cfg.invalidate_default_index_root_on_cf:
1146
- info_response = s3.website_info(destination_base_uri, cfg.bucket_location)
1147
- if info_response:
1148
- default_index_file = info_response['index_document']
1149
- if len(default_index_file) < 1:
1150
- default_index_file = None
1151
-
1152
- result = cf.InvalidateObjects(destination_base_uri, uploaded_objects_list, default_index_file, cfg.invalidate_default_index_on_cf, cfg.invalidate_default_index_root_on_cf)
1153
- if result['status'] == 201:
1154
- output("Created invalidation request for %d paths" % len(uploaded_objects_list))
1155
- output("Check progress with: s3cmd cfinvalinfo cf://%s/%s" % (result['dist_id'], result['request_id']))
1156
-
897
+ if cfg.delete_removed:
898
+ for key in remote_list:
899
+ uri = S3Uri(remote_list[key]['object_uri_str'])
900
+ s3.object_delete(uri)
901
+ output(u"deleted: '%s'" % uri)
1157
902
 
1158
- # main execution
1159
- s3 = S3(cfg)
1160
903
  uploaded_objects_list = []
904
+ total_size = 0
905
+ total_elapsed = 0.0
906
+ timestamp_start = time.time()
907
+ seq = 0
908
+ file_list = local_list.keys()
909
+ file_list.sort()
910
+ for file in file_list:
911
+ seq += 1
912
+ item = local_list[file]
913
+ src = item['full_name']
914
+ uri = S3Uri(item['remote_uri'])
915
+ seq_label = "[%d of %d]" % (seq, local_count)
916
+ extra_headers = copy(cfg.extra_headers)
917
+ try:
918
+ if cfg.preserve_attrs:
919
+ attr_header = _build_attr_header(src)
920
+ debug(u"attr_header: %s" % attr_header)
921
+ extra_headers.update(attr_header)
922
+ response = s3.object_put(src, uri, extra_headers, extra_label = seq_label)
923
+ except InvalidFileError, e:
924
+ warning(u"File can not be uploaded: %s" % e)
925
+ continue
926
+ except S3UploadError, e:
927
+ error(u"%s: upload failed too many times. Skipping that file." % item['full_name_unicode'])
928
+ continue
929
+ speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
930
+ if not cfg.progress_meter:
931
+ output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
932
+ (item['full_name_unicode'], uri, response["size"], response["elapsed"],
933
+ speed_fmt[0], speed_fmt[1], seq_label))
934
+ total_size += response["size"]
935
+ uploaded_objects_list.append(uri.object())
1161
936
 
1162
- if cfg.encrypt:
1163
- error(u"S3cmd 'sync' doesn't yet support GPG encryption, sorry.")
1164
- error(u"Either use unconditional 's3cmd put --recursive'")
1165
- error(u"or disable encryption with --no-encrypt parameter.")
1166
- sys.exit(1)
1167
-
1168
- local_list, single_file_local = fetch_local_list(args[:-1], recursive = True)
1169
-
1170
- destinations = [args[-1]]
1171
- if cfg.additional_destinations:
1172
- destinations = destinations + cfg.additional_destinations
937
+ total_elapsed = time.time() - timestamp_start
938
+ total_speed = total_elapsed and total_size/total_elapsed or 0.0
939
+ speed_fmt = formatSize(total_speed, human_readable = True, floating_point = True)
1173
940
 
1174
- if 'fork' not in os.__all__ or len(destinations) < 2:
1175
- destination_base_uri = _single_process(local_list)
1176
- if cfg.invalidate_on_cf:
1177
- if len(uploaded_objects_list) == 0:
1178
- info("Nothing to invalidate in CloudFront")
1179
- else:
1180
- _invalidate_on_cf(destination_base_uri)
941
+ # Only print out the result if any work has been done or
942
+ # if the user asked for verbose output
943
+ outstr = "Done. Uploaded %d bytes in %0.1f seconds, %0.2f %sB/s" % (total_size, total_elapsed, speed_fmt[0], speed_fmt[1])
944
+ if total_size > 0:
945
+ output(outstr)
1181
946
  else:
1182
- _parent()
1183
- if cfg.invalidate_on_cf:
1184
- error(u"You cannot use both --cf-invalidate and --add-destination.")
947
+ info(outstr)
948
+
949
+ if cfg.invalidate_on_cf:
950
+ if len(uploaded_objects_list) == 0:
951
+ info("Nothing to invalidate in CloudFront")
952
+ else:
953
+ # 'uri' from the last iteration is still valid at this point
954
+ cf = CloudFront(cfg)
955
+ result = cf.InvalidateObjects(uri, uploaded_objects_list)
956
+ if result['status'] == 201:
957
+ output("Created invalidation request for %d paths" % len(uploaded_objects_list))
958
+ output("Check progress with: s3cmd cfinvalinfo cf://%s/%s" % (result['dist_id'], result['request_id']))
1185
959
 
1186
960
  def cmd_sync(args):
1187
961
  if (len(args) < 2):
@@ -1196,6 +970,45 @@ def cmd_sync(args):
1196
970
  raise ParameterError("Invalid source/destination: '%s'" % "' '".join(args))
1197
971
 
1198
972
  def cmd_setacl(args):
973
+ def _update_acl(uri, seq_label = ""):
974
+ something_changed = False
975
+ acl = s3.get_acl(uri)
976
+ debug(u"acl: %s - %r" % (uri, acl.grantees))
977
+ if cfg.acl_public == True:
978
+ if acl.isAnonRead():
979
+ info(u"%s: already Public, skipping %s" % (uri, seq_label))
980
+ else:
981
+ acl.grantAnonRead()
982
+ something_changed = True
983
+ elif cfg.acl_public == False: # we explicitely check for False, because it could be None
984
+ if not acl.isAnonRead():
985
+ info(u"%s: already Private, skipping %s" % (uri, seq_label))
986
+ else:
987
+ acl.revokeAnonRead()
988
+ something_changed = True
989
+
990
+ # update acl with arguments
991
+ # grant first and revoke later, because revoke has priority
992
+ if cfg.acl_grants:
993
+ something_changed = True
994
+ for grant in cfg.acl_grants:
995
+ acl.grant(**grant);
996
+
997
+ if cfg.acl_revokes:
998
+ something_changed = True
999
+ for revoke in cfg.acl_revokes:
1000
+ acl.revoke(**revoke);
1001
+
1002
+ if not something_changed:
1003
+ return
1004
+
1005
+ retsponse = s3.set_acl(uri, acl)
1006
+ if retsponse['status'] == 200:
1007
+ if cfg.acl_public in (True, False):
1008
+ output(u"%s: ACL set to %s %s" % (uri, set_to_acl, seq_label))
1009
+ else:
1010
+ output(u"%s: ACL updated" % uri)
1011
+
1199
1012
  s3 = S3(cfg)
1200
1013
 
1201
1014
  set_to_acl = cfg.acl_public and "Public" or "Private"
@@ -1211,7 +1024,7 @@ def cmd_setacl(args):
1211
1024
  else:
1212
1025
  info("Setting bucket-level ACL for %s" % (uri.uri()))
1213
1026
  if not cfg.dry_run:
1214
- update_acl(s3, uri)
1027
+ _update_acl(uri)
1215
1028
  else:
1216
1029
  args.append(arg)
1217
1030
 
@@ -1228,7 +1041,7 @@ def cmd_setacl(args):
1228
1041
  for key in remote_list:
1229
1042
  output(u"setacl: %s" % remote_list[key]['object_uri_str'])
1230
1043
 
1231
- warning(u"Exiting now because of --dry-run")
1044
+ warning(u"Exitting now because of --dry-run")
1232
1045
  return
1233
1046
 
1234
1047
  seq = 0
@@ -1236,34 +1049,7 @@ def cmd_setacl(args):
1236
1049
  seq += 1
1237
1050
  seq_label = "[%d of %d]" % (seq, remote_count)
1238
1051
  uri = S3Uri(remote_list[key]['object_uri_str'])
1239
- update_acl(s3, uri, seq_label)
1240
-
1241
- def cmd_setpolicy(args):
1242
- s3 = S3(cfg)
1243
- uri = S3Uri(args[1])
1244
- policy_file = args[0]
1245
- policy = open(policy_file, 'r').read()
1246
-
1247
- if cfg.dry_run: return
1248
-
1249
- response = s3.set_policy(uri, policy)
1250
-
1251
- #if retsponse['status'] == 200:
1252
- debug(u"response - %s" % response['status'])
1253
- if response['status'] == 204:
1254
- output(u"%s: Policy updated" % uri)
1255
-
1256
- def cmd_delpolicy(args):
1257
- s3 = S3(cfg)
1258
- uri = S3Uri(args[0])
1259
- if cfg.dry_run: return
1260
-
1261
- response = s3.delete_policy(uri)
1262
-
1263
- #if retsponse['status'] == 200:
1264
- debug(u"response - %s" % response['status'])
1265
- output(u"%s: Policy deleted" % uri)
1266
-
1052
+ _update_acl(uri, seq_label)
1267
1053
 
1268
1054
  def cmd_accesslog(args):
1269
1055
  s3 = S3(cfg)
@@ -1292,15 +1078,6 @@ def cmd_sign(args):
1292
1078
  signature = Utils.sign_string(string_to_sign)
1293
1079
  output("Signature: %s" % signature)
1294
1080
 
1295
- def cmd_signurl(args):
1296
- expiry = args.pop()
1297
- url_to_sign = S3Uri(args.pop())
1298
- if url_to_sign.type != 's3':
1299
- raise ParameterError("Must be S3Uri. Got: %s" % url_to_sign)
1300
- debug("url to sign: %r" % url_to_sign)
1301
- signed_url = Utils.sign_url(url_to_sign, expiry)
1302
- output(signed_url)
1303
-
1304
1081
  def cmd_fixbucket(args):
1305
1082
  def _unescape(text):
1306
1083
  ##
@@ -1615,13 +1392,8 @@ def get_commands_list():
1615
1392
  {"cmd":"cp", "label":"Copy object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_cp, "argc":2},
1616
1393
  {"cmd":"mv", "label":"Move object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_mv, "argc":2},
1617
1394
  {"cmd":"setacl", "label":"Modify Access control list for Bucket or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_setacl, "argc":1},
1618
-
1619
- {"cmd":"setpolicy", "label":"Modify Bucket Policy", "param":"FILE s3://BUCKET", "func":cmd_setpolicy, "argc":2},
1620
- {"cmd":"delpolicy", "label":"Delete Bucket Policy", "param":"s3://BUCKET", "func":cmd_delpolicy, "argc":1},
1621
-
1622
1395
  {"cmd":"accesslog", "label":"Enable/disable bucket access logging", "param":"s3://BUCKET", "func":cmd_accesslog, "argc":1},
1623
1396
  {"cmd":"sign", "label":"Sign arbitrary string using the secret key", "param":"STRING-TO-SIGN", "func":cmd_sign, "argc":1},
1624
- {"cmd":"signurl", "label":"Sign an S3 URL to provide limited public access with expiry", "param":"s3://BUCKET/OBJECT expiry_epoch", "func":cmd_signurl, "argc":2},
1625
1397
  {"cmd":"fixbucket", "label":"Fix invalid file names in a bucket", "param":"s3://BUCKET[/PREFIX]", "func":cmd_fixbucket, "argc":1},
1626
1398
 
1627
1399
  ## Website commands
@@ -1645,47 +1417,6 @@ def format_commands(progname, commands_list):
1645
1417
  help += " %s\n %s %s %s\n" % (cmd["label"], progname, cmd["cmd"], cmd["param"])
1646
1418
  return help
1647
1419
 
1648
-
1649
- def update_acl(s3, uri, seq_label=""):
1650
- something_changed = False
1651
- acl = s3.get_acl(uri)
1652
- debug(u"acl: %s - %r" % (uri, acl.grantees))
1653
- if cfg.acl_public == True:
1654
- if acl.isAnonRead():
1655
- info(u"%s: already Public, skipping %s" % (uri, seq_label))
1656
- else:
1657
- acl.grantAnonRead()
1658
- something_changed = True
1659
- elif cfg.acl_public == False: # we explicitely check for False, because it could be None
1660
- if not acl.isAnonRead():
1661
- info(u"%s: already Private, skipping %s" % (uri, seq_label))
1662
- else:
1663
- acl.revokeAnonRead()
1664
- something_changed = True
1665
-
1666
- # update acl with arguments
1667
- # grant first and revoke later, because revoke has priority
1668
- if cfg.acl_grants:
1669
- something_changed = True
1670
- for grant in cfg.acl_grants:
1671
- acl.grant(**grant)
1672
-
1673
- if cfg.acl_revokes:
1674
- something_changed = True
1675
- for revoke in cfg.acl_revokes:
1676
- acl.revoke(**revoke)
1677
-
1678
- if not something_changed:
1679
- return
1680
-
1681
- retsponse = s3.set_acl(uri, acl)
1682
- if retsponse['status'] == 200:
1683
- if cfg.acl_public in (True, False):
1684
- set_to_acl = cfg.acl_public and "Public" or "Private"
1685
- output(u"%s: ACL set to %s %s" % (uri, set_to_acl, seq_label))
1686
- else:
1687
- output(u"%s: ACL updated" % uri)
1688
-
1689
1420
  class OptionMimeType(Option):
1690
1421
  def check_mimetype(option, opt, value):
1691
1422
  if re.compile("^[a-z0-9]+/[a-z0-9+\.-]+(;.*)?$", re.IGNORECASE).match(value):
@@ -1750,8 +1481,6 @@ def main():
1750
1481
  optparser.add_option( "--configure", dest="run_configure", action="store_true", help="Invoke interactive (re)configuration tool. Optionally use as '--configure s3://come-bucket' to test access to a specific bucket instead of attempting to list them all.")
1751
1482
  optparser.add_option("-c", "--config", dest="config", metavar="FILE", help="Config file name. Defaults to %default")
1752
1483
  optparser.add_option( "--dump-config", dest="dump_config", action="store_true", help="Dump current configuration after parsing config files and command line options and exit.")
1753
- optparser.add_option( "--access_key", dest="access_key", help="AWS Access Key")
1754
- optparser.add_option( "--secret_key", dest="secret_key", help="AWS Secret Key")
1755
1484
 
1756
1485
  optparser.add_option("-n", "--dry-run", dest="dry_run", action="store_true", help="Only show what should be uploaded or downloaded but don't actually do it. May still perform S3 requests to get bucket listings and other information though (only for file transfer commands)")
1757
1486
 
@@ -1770,10 +1499,6 @@ def main():
1770
1499
 
1771
1500
  optparser.add_option( "--delete-removed", dest="delete_removed", action="store_true", help="Delete remote objects with no corresponding local file [sync]")
1772
1501
  optparser.add_option( "--no-delete-removed", dest="delete_removed", action="store_false", help="Don't delete remote objects.")
1773
- optparser.add_option( "--delete-after", dest="delete_after", action="store_true", help="Perform deletes after new uploads [sync]")
1774
- optparser.add_option( "--delay-updates", dest="delay_updates", action="store_true", help="Put all updated files into place at end [sync]")
1775
- optparser.add_option( "--add-destination", dest="additional_destinations", action="append", help="Additional destination for parallel uploads, in addition to last arg. May be repeated.")
1776
- optparser.add_option( "--delete-after-fetch", dest="delete_after_fetch", action="store_true", help="Delete remote objects after fetching to local file (only for [get] and [sync] commands).")
1777
1502
  optparser.add_option("-p", "--preserve", dest="preserve_attrs", action="store_true", help="Preserve filesystem attributes (mode, ownership, timestamps). Default for [sync] command.")
1778
1503
  optparser.add_option( "--no-preserve", dest="preserve_attrs", action="store_false", help="Don't store FS attributes")
1779
1504
  optparser.add_option( "--exclude", dest="exclude", action="append", metavar="GLOB", help="Filenames and paths matching GLOB will be excluded from sync")
@@ -1785,21 +1510,20 @@ def main():
1785
1510
  optparser.add_option( "--rinclude", dest="rinclude", action="append", metavar="REGEXP", help="Same as --include but uses REGEXP (regular expression) instead of GLOB")
1786
1511
  optparser.add_option( "--rinclude-from", dest="rinclude_from", action="append", metavar="FILE", help="Read --rinclude REGEXPs from FILE")
1787
1512
 
1788
- optparser.add_option( "--bucket-location", dest="bucket_location", help="Datacentre to create bucket in. As of now the datacenters are: US (default), EU, ap-northeast-1, ap-southeast-1, sa-east-1, us-west-1 and us-west-2")
1513
+ optparser.add_option( "--bucket-location", dest="bucket_location", help="Datacentre to create bucket in. As of now the datacenters are: US (default), EU, us-west-1, and ap-southeast-1")
1789
1514
  optparser.add_option( "--reduced-redundancy", "--rr", dest="reduced_redundancy", action="store_true", help="Store object with 'Reduced redundancy'. Lower per-GB price. [put, cp, mv]")
1790
1515
 
1791
1516
  optparser.add_option( "--access-logging-target-prefix", dest="log_target_prefix", help="Target prefix for access logs (S3 URI) (for [cfmodify] and [accesslog] commands)")
1792
1517
  optparser.add_option( "--no-access-logging", dest="log_target_prefix", action="store_false", help="Disable access logging (for [cfmodify] and [accesslog] commands)")
1793
1518
 
1794
1519
  optparser.add_option( "--default-mime-type", dest="default_mime_type", action="store_true", help="Default MIME-type for stored objects. Application default is binary/octet-stream.")
1795
- optparser.add_option("-M", "--guess-mime-type", dest="guess_mime_type", action="store_true", help="Guess MIME-type of files by their extension or mime magic. Fall back to default MIME-Type as specified by --default-mime-type option")
1520
+ optparser.add_option( "--guess-mime-type", dest="guess_mime_type", action="store_true", help="Guess MIME-type of files by their extension or mime magic. Fall back to default MIME-Type as specified by --default-mime-type option")
1796
1521
  optparser.add_option( "--no-guess-mime-type", dest="guess_mime_type", action="store_false", help="Don't guess MIME-type and use the default type instead.")
1797
1522
  optparser.add_option("-m", "--mime-type", dest="mime_type", type="mimetype", metavar="MIME/TYPE", help="Force MIME-type. Override both --default-mime-type and --guess-mime-type.")
1798
1523
 
1799
1524
  optparser.add_option( "--add-header", dest="add_header", action="append", metavar="NAME:VALUE", help="Add a given HTTP header to the upload request. Can be used multiple times. For instance set 'Expires' or 'Cache-Control' headers (or both) using this options if you like.")
1800
1525
 
1801
1526
  optparser.add_option( "--encoding", dest="encoding", metavar="ENCODING", help="Override autodetected terminal and filesystem encoding (character set). Autodetected: %s" % preferred_encoding)
1802
- optparser.add_option( "--add-encoding-exts", dest="add_encoding_exts", metavar="EXTENSIONs", help="Add encoding to these comma delimited extensions i.e. (css,js,html) when uploading to S3 )")
1803
1527
  optparser.add_option( "--verbatim", dest="urlencoding_mode", action="store_const", const="verbatim", help="Use the S3 name as given on the command line. No pre-processing, encoding, etc. Use with caution!")
1804
1528
 
1805
1529
  optparser.add_option( "--disable-multipart", dest="enable_multipart", action="store_false", help="Disable multipart upload on files bigger than --multipart-chunk-size-mb")
@@ -1816,10 +1540,6 @@ def main():
1816
1540
  optparser.add_option( "--enable", dest="enable", action="store_true", help="Enable given CloudFront distribution (only for [cfmodify] command)")
1817
1541
  optparser.add_option( "--disable", dest="enable", action="store_false", help="Enable given CloudFront distribution (only for [cfmodify] command)")
1818
1542
  optparser.add_option( "--cf-invalidate", dest="invalidate_on_cf", action="store_true", help="Invalidate the uploaded filed in CloudFront. Also see [cfinval] command.")
1819
- # joseprio: adding options to invalidate the default index and the default
1820
- # index root
1821
- optparser.add_option( "--cf-invalidate-default-index", dest="invalidate_default_index_on_cf", action="store_true", help="When using Custom Origin and S3 static website, invalidate the default index file.")
1822
- optparser.add_option( "--cf-no-invalidate-default-index-root", dest="invalidate_default_index_root_on_cf", action="store_false", help="When using Custom Origin and S3 static website, don't invalidate the path to the default index file.")
1823
1543
  optparser.add_option( "--cf-add-cname", dest="cf_cnames_add", action="append", metavar="CNAME", help="Add given CNAME to a CloudFront distribution (only for [cfcreate] and [cfmodify] commands)")
1824
1544
  optparser.add_option( "--cf-remove-cname", dest="cf_cnames_remove", action="append", metavar="CNAME", help="Remove given CNAME from a CloudFront distribution (only for [cfmodify] command)")
1825
1545
  optparser.add_option( "--cf-comment", dest="cf_comment", action="store", metavar="COMMENT", help="Set COMMENT for a given CloudFront distribution (only for [cfcreate] and [cfmodify] commands)")
@@ -1828,8 +1548,6 @@ def main():
1828
1548
  optparser.add_option("-d", "--debug", dest="verbosity", action="store_const", const=logging.DEBUG, help="Enable debug output.")
1829
1549
  optparser.add_option( "--version", dest="show_version", action="store_true", help="Show s3cmd version (%s) and exit." % (PkgInfo.version))
1830
1550
  optparser.add_option("-F", "--follow-symlinks", dest="follow_symlinks", action="store_true", default=False, help="Follow symbolic links as if they are regular files")
1831
- optparser.add_option( "--cache-file", dest="cache_file", action="store", default="", metavar="FILE", help="Cache FILE containing local source MD5 values")
1832
- optparser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False, help="Silence output on stdout")
1833
1551
 
1834
1552
  optparser.set_usage(optparser.usage + " COMMAND [parameters]")
1835
1553
  optparser.set_description('S3cmd is a tool for managing objects in '+
@@ -1852,14 +1570,6 @@ def main():
1852
1570
  output(u"s3cmd version %s" % PkgInfo.version)
1853
1571
  sys.exit(0)
1854
1572
 
1855
- if options.quiet:
1856
- try:
1857
- f = open("/dev/null", "w")
1858
- sys.stdout.close()
1859
- sys.stdout = f
1860
- except IOError:
1861
- warning(u"Unable to open /dev/null: --quiet disabled.")
1862
-
1863
1573
  ## Now finally parse the config file
1864
1574
  if not options.config:
1865
1575
  error(u"Can't find a config file. Please use --config option.")
@@ -1964,9 +1674,6 @@ def main():
1964
1674
  ## Some CloudFront.Cmd.Options() options are not settable from command line
1965
1675
  pass
1966
1676
 
1967
- if options.additional_destinations:
1968
- cfg.additional_destinations = options.additional_destinations
1969
-
1970
1677
  ## Set output and filesystem encoding for printing out filenames.
1971
1678
  sys.stdout = codecs.getwriter(cfg.encoding)(sys.stdout, "replace")
1972
1679
  sys.stderr = codecs.getwriter(cfg.encoding)(sys.stderr, "replace")
@@ -2027,7 +1734,7 @@ def main():
2027
1734
  sys.exit(1)
2028
1735
 
2029
1736
  if len(args) < commands[command]["argc"]:
2030
- error(u"Not enough parameters for command '%s'" % command)
1737
+ error(u"Not enough paramters for command '%s'" % command)
2031
1738
  sys.exit(1)
2032
1739
 
2033
1740
  try:
@@ -2081,7 +1788,6 @@ if __name__ == '__main__':
2081
1788
  from S3.S3 import S3
2082
1789
  from S3.Config import Config
2083
1790
  from S3.SortedDict import SortedDict
2084
- from S3.FileDict import FileDict
2085
1791
  from S3.S3Uri import S3Uri
2086
1792
  from S3 import Utils
2087
1793
  from S3.Utils import *