s3_cmd_bin 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. data/lib/s3_cmd_bin/version.rb +1 -1
  2. data/resources/ChangeLog +0 -0
  3. data/resources/INSTALL +0 -0
  4. data/resources/MANIFEST.in +1 -0
  5. data/resources/NEWS +1 -40
  6. data/resources/README +0 -0
  7. data/resources/S3/ACL.py +0 -0
  8. data/resources/S3/AccessLog.py +0 -0
  9. data/resources/S3/BidirMap.py +0 -0
  10. data/resources/S3/CloudFront.py +8 -37
  11. data/resources/S3/Config.py +1 -88
  12. data/resources/S3/Exceptions.py +1 -1
  13. data/resources/S3/FileLists.py +100 -272
  14. data/resources/S3/MultiPart.py +21 -45
  15. data/resources/S3/PkgInfo.py +1 -1
  16. data/resources/S3/Progress.py +0 -17
  17. data/resources/S3/S3.py +52 -148
  18. data/resources/S3/S3Uri.py +2 -3
  19. data/resources/S3/SimpleDB.py +0 -3
  20. data/resources/S3/SortedDict.py +0 -3
  21. data/resources/S3/Utils.py +3 -80
  22. data/resources/S3/__init__.py +0 -0
  23. data/resources/TODO +0 -0
  24. data/resources/artwork/AtomicClockRadio.ttf +0 -0
  25. data/resources/artwork/TypeRa.ttf +0 -0
  26. data/resources/artwork/site-top-full-size.xcf +0 -0
  27. data/resources/artwork/site-top-label-download.png +0 -0
  28. data/resources/artwork/site-top-label-s3cmd.png +0 -0
  29. data/resources/artwork/site-top-label-s3sync.png +0 -0
  30. data/resources/artwork/site-top-s3tools-logo.png +0 -0
  31. data/resources/artwork/site-top.jpg +0 -0
  32. data/resources/artwork/site-top.png +0 -0
  33. data/resources/artwork/site-top.xcf +0 -0
  34. data/resources/run-tests.py +2 -2
  35. data/resources/s3cmd +306 -600
  36. data/resources/s3cmd.1 +97 -84
  37. data/resources/setup.cfg +0 -0
  38. data/resources/setup.py +0 -0
  39. data/resources/testsuite.tar.gz +0 -0
  40. metadata +2 -26
  41. data/resources/LICENSE +0 -339
  42. data/resources/Makefile +0 -4
  43. data/resources/S3/ACL.pyc +0 -0
  44. data/resources/S3/AccessLog.pyc +0 -0
  45. data/resources/S3/BidirMap.pyc +0 -0
  46. data/resources/S3/CloudFront.pyc +0 -0
  47. data/resources/S3/Config.pyc +0 -0
  48. data/resources/S3/ConnMan.py +0 -71
  49. data/resources/S3/ConnMan.pyc +0 -0
  50. data/resources/S3/Exceptions.pyc +0 -0
  51. data/resources/S3/FileDict.py +0 -53
  52. data/resources/S3/FileDict.pyc +0 -0
  53. data/resources/S3/FileLists.pyc +0 -0
  54. data/resources/S3/HashCache.py +0 -53
  55. data/resources/S3/HashCache.pyc +0 -0
  56. data/resources/S3/MultiPart.pyc +0 -0
  57. data/resources/S3/PkgInfo.pyc +0 -0
  58. data/resources/S3/Progress.pyc +0 -0
  59. data/resources/S3/S3.pyc +0 -0
  60. data/resources/S3/S3Uri.pyc +0 -0
  61. data/resources/S3/SortedDict.pyc +0 -0
  62. data/resources/S3/Utils.pyc +0 -0
  63. data/resources/S3/__init__.pyc +0 -0
  64. data/resources/magic +0 -63
@@ -42,56 +42,32 @@ class MultiPartUpload(object):
42
42
  if not self.upload_id:
43
43
  raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")
44
44
 
45
+ size_left = file_size = os.stat(self.file.name)[ST_SIZE]
45
46
  self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
46
-
47
- if self.file.name != "<stdin>":
48
- size_left = file_size = os.stat(self.file.name)[ST_SIZE]
49
- nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
50
- debug("MultiPart: Uploading %s in %d parts" % (self.file.name, nr_parts))
51
- else:
52
- debug("MultiPart: Uploading from %s" % (self.file.name))
47
+ nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
48
+ debug("MultiPart: Uploading %s in %d parts" % (self.file.name, nr_parts))
53
49
 
54
50
  seq = 1
55
- if self.file.name != "<stdin>":
56
- while size_left > 0:
57
- offset = self.chunk_size * (seq - 1)
58
- current_chunk_size = min(file_size - offset, self.chunk_size)
59
- size_left -= current_chunk_size
60
- labels = {
61
- 'source' : unicodise(self.file.name),
62
- 'destination' : unicodise(self.uri.uri()),
63
- 'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
64
- }
65
- try:
66
- self.upload_part(seq, offset, current_chunk_size, labels)
67
- except:
68
- error(u"Upload of '%s' part %d failed. Aborting multipart upload." % (self.file.name, seq))
69
- self.abort_upload()
70
- raise
71
- seq += 1
72
- else:
73
- while True:
74
- buffer = self.file.read(self.chunk_size)
75
- offset = self.chunk_size * (seq - 1)
76
- current_chunk_size = len(buffer)
77
- labels = {
78
- 'source' : unicodise(self.file.name),
79
- 'destination' : unicodise(self.uri.uri()),
80
- 'extra' : "[part %d, %s]" % (seq, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
81
- }
82
- if len(buffer) == 0: # EOF
83
- break
84
- try:
85
- self.upload_part(seq, offset, current_chunk_size, labels, buffer)
86
- except:
87
- error(u"Upload of '%s' part %d failed. Aborting multipart upload." % (self.file.name, seq))
88
- self.abort_upload()
89
- raise
90
- seq += 1
51
+ while size_left > 0:
52
+ offset = self.chunk_size * (seq - 1)
53
+ current_chunk_size = min(file_size - offset, self.chunk_size)
54
+ size_left -= current_chunk_size
55
+ labels = {
56
+ 'source' : unicodise(self.file.name),
57
+ 'destination' : unicodise(self.uri.uri()),
58
+ 'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
59
+ }
60
+ try:
61
+ self.upload_part(seq, offset, current_chunk_size, labels)
62
+ except:
63
+ error(u"Upload of '%s' part %d failed. Aborting multipart upload." % (self.file.name, seq))
64
+ self.abort_upload()
65
+ raise
66
+ seq += 1
91
67
 
92
68
  debug("MultiPart: Upload finished: %d parts", seq - 1)
93
69
 
94
- def upload_part(self, seq, offset, chunk_size, labels, buffer = ''):
70
+ def upload_part(self, seq, offset, chunk_size, labels):
95
71
  """
96
72
  Upload a file chunk
97
73
  http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadUploadPart.html
@@ -101,7 +77,7 @@ class MultiPartUpload(object):
101
77
  headers = { "content-length": chunk_size }
102
78
  query_string = "?partNumber=%i&uploadId=%s" % (seq, self.upload_id)
103
79
  request = self.s3.create_request("OBJECT_PUT", uri = self.uri, headers = headers, extra = query_string)
104
- response = self.s3.send_file(request, self.file, labels, buffer, offset = offset, chunk_size = chunk_size)
80
+ response = self.s3.send_file(request, self.file, labels, offset = offset, chunk_size = chunk_size)
105
81
  self.parts[seq] = response["headers"]["etag"]
106
82
  return response
107
83
 
@@ -1,5 +1,5 @@
1
1
  package = "s3cmd"
2
- version = "1.5.0-alpha3"
2
+ version = "1.1.0-beta3"
3
3
  url = "http://s3tools.org"
4
4
  license = "GPL version 2"
5
5
  short_description = "Command line tool for managing Amazon S3 and CloudFront services"
@@ -5,12 +5,10 @@
5
5
 
6
6
  import sys
7
7
  import datetime
8
- import time
9
8
  import Utils
10
9
 
11
10
  class Progress(object):
12
11
  _stdout = sys.stdout
13
- _last_display = 0
14
12
 
15
13
  def __init__(self, labels, total_size):
16
14
  self._stdout = sys.stdout
@@ -50,13 +48,6 @@ class Progress(object):
50
48
  self._stdout.write(u"%(source)s -> %(destination)s %(extra)s\n" % self.labels)
51
49
  self._stdout.flush()
52
50
 
53
- def _display_needed(self):
54
- # We only need to update the display every so often.
55
- if time.time() - self._last_display > 1:
56
- self._last_display = time.time()
57
- return True
58
- return False
59
-
60
51
  def display(self, new_file = False, done_message = None):
61
52
  """
62
53
  display(new_file = False[/True], done = False[/True])
@@ -107,10 +98,6 @@ class ProgressANSI(Progress):
107
98
  self._stdout.flush()
108
99
  return
109
100
 
110
- # Only display progress every so often
111
- if not (new_file or done_message) and not self._display_needed():
112
- return
113
-
114
101
  timedelta = self.time_current - self.time_start
115
102
  sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
116
103
  if (sec_elapsed > 0):
@@ -145,10 +132,6 @@ class ProgressCR(Progress):
145
132
  self.output_labels()
146
133
  return
147
134
 
148
- # Only display progress every so often
149
- if not (new_file or done_message) and not self._display_needed():
150
- return
151
-
152
135
  timedelta = self.time_current - self.time_start
153
136
  sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
154
137
  if (sec_elapsed > 0):
@@ -27,50 +27,20 @@ from Config import Config
27
27
  from Exceptions import *
28
28
  from MultiPart import MultiPartUpload
29
29
  from S3Uri import S3Uri
30
- from ConnMan import ConnMan
31
30
 
32
31
  try:
33
- import magic, gzip
32
+ import magic
34
33
  try:
35
34
  ## https://github.com/ahupp/python-magic
36
35
  magic_ = magic.Magic(mime=True)
37
- def mime_magic_file(file):
36
+ def mime_magic(file):
38
37
  return magic_.from_file(file)
39
- def mime_magic_buffer(buffer):
40
- return magic_.from_buffer(buffer)
41
- except TypeError:
42
- ## http://pypi.python.org/pypi/filemagic
43
- try:
44
- magic_ = magic.Magic(flags=magic.MAGIC_MIME)
45
- def mime_magic_file(file):
46
- return magic_.id_filename(file)
47
- def mime_magic_buffer(buffer):
48
- return magic_.id_buffer(buffer)
49
- except TypeError:
50
- ## file-5.11 built-in python bindings
51
- magic_ = magic.open(magic.MAGIC_MIME)
52
- magic_.load()
53
- def mime_magic_file(file):
54
- return magic_.file(file)
55
- def mime_magic_buffer(buffer):
56
- return magic_.buffer(buffer)
57
-
58
- except AttributeError:
38
+ except (TypeError, AttributeError):
59
39
  ## Older python-magic versions
60
40
  magic_ = magic.open(magic.MAGIC_MIME)
61
41
  magic_.load()
62
- def mime_magic_file(file):
42
+ def mime_magic(file):
63
43
  return magic_.file(file)
64
- def mime_magic_buffer(buffer):
65
- return magic_.buffer(buffer)
66
-
67
- def mime_magic(file):
68
- type = mime_magic_file(file)
69
- if type != "application/x-gzip; charset=binary":
70
- return (type, None)
71
- else:
72
- return (mime_magic_buffer(gzip.open(file).read(8192)), 'gzip')
73
-
74
44
  except ImportError, e:
75
45
  if str(e).find("magic") >= 0:
76
46
  magic_message = "Module python-magic is not available."
@@ -83,19 +53,13 @@ except ImportError, e:
83
53
  if (not magic_warned):
84
54
  warning(magic_message)
85
55
  magic_warned = True
86
- return mimetypes.guess_type(file)
56
+ return mimetypes.guess_type(file)[0]
87
57
 
88
58
  __all__ = []
89
59
  class S3Request(object):
90
60
  def __init__(self, s3, method_string, resource, headers, params = {}):
91
61
  self.s3 = s3
92
62
  self.headers = SortedDict(headers or {}, ignore_case = True)
93
- # Add in any extra headers from s3 config object
94
- if self.s3.config.extra_headers:
95
- self.headers.update(self.s3.config.extra_headers)
96
- if len(self.s3.config.access_token)>0:
97
- self.s3.config.role_refresh()
98
- self.headers['x-amz-security-token']=self.s3.config.access_token
99
63
  self.resource = resource
100
64
  self.method_string = method_string
101
65
  self.params = params
@@ -191,6 +155,15 @@ class S3(object):
191
155
  def __init__(self, config):
192
156
  self.config = config
193
157
 
158
+ def get_connection(self, bucket):
159
+ if self.config.proxy_host != "":
160
+ return httplib.HTTPConnection(self.config.proxy_host, self.config.proxy_port)
161
+ else:
162
+ if self.config.use_https:
163
+ return httplib.HTTPSConnection(self.get_hostname(bucket))
164
+ else:
165
+ return httplib.HTTPConnection(self.get_hostname(bucket))
166
+
194
167
  def get_hostname(self, bucket):
195
168
  if bucket and check_bucket_name_dns_conformity(bucket):
196
169
  if self.redir_map.has_key(bucket):
@@ -366,36 +339,17 @@ class S3(object):
366
339
 
367
340
  return response
368
341
 
369
- def add_encoding(self, filename, content_type):
370
- if content_type.find("charset=") != -1:
371
- return False
372
- exts = self.config.add_encoding_exts.split(',')
373
- if exts[0]=='':
374
- return False
375
- parts = filename.rsplit('.',2)
376
- if len(parts) < 2:
377
- return False
378
- ext = parts[1]
379
- if ext in exts:
380
- return True
381
- else:
382
- return False
383
-
384
342
  def object_put(self, filename, uri, extra_headers = None, extra_label = ""):
385
343
  # TODO TODO
386
344
  # Make it consistent with stream-oriented object_get()
387
345
  if uri.type != "s3":
388
346
  raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
389
347
 
390
- if filename != "-" and not os.path.isfile(filename):
348
+ if not os.path.isfile(filename):
391
349
  raise InvalidFileError(u"%s is not a regular file" % unicodise(filename))
392
350
  try:
393
- if filename == "-":
394
- file = sys.stdin
395
- size = 0
396
- else:
397
- file = open(filename, "rb")
398
- size = os.stat(filename)[ST_SIZE]
351
+ file = open(filename, "rb")
352
+ size = os.stat(filename)[ST_SIZE]
399
353
  except (IOError, OSError), e:
400
354
  raise InvalidFileError(u"%s: %s" % (unicodise(filename), e.strerror))
401
355
 
@@ -405,21 +359,12 @@ class S3(object):
405
359
 
406
360
  ## MIME-type handling
407
361
  content_type = self.config.mime_type
408
- content_encoding = None
409
- if filename != "-" and not content_type and self.config.guess_mime_type:
410
- (content_type, content_encoding) = mime_magic(filename)
362
+ if not content_type and self.config.guess_mime_type:
363
+ content_type = mime_magic(filename)
411
364
  if not content_type:
412
365
  content_type = self.config.default_mime_type
413
- if not content_encoding:
414
- content_encoding = self.config.encoding.upper()
415
-
416
- ## add charset to content type
417
- if self.add_encoding(filename, content_type) and content_encoding is not None:
418
- content_type = content_type + "; charset=" + content_encoding
419
-
366
+ debug("Content-Type set to '%s'" % content_type)
420
367
  headers["content-type"] = content_type
421
- if content_encoding is not None:
422
- headers["content-encoding"] = content_encoding
423
368
 
424
369
  ## Other Amazon S3 attributes
425
370
  if self.config.acl_public:
@@ -429,10 +374,8 @@ class S3(object):
429
374
 
430
375
  ## Multipart decision
431
376
  multipart = False
432
- if not self.config.enable_multipart and filename == "-":
433
- raise ParameterError("Multi-part upload is required to upload from stdin")
434
377
  if self.config.enable_multipart:
435
- if size > self.config.multipart_chunk_size_mb * 1024 * 1024 or filename == "-":
378
+ if size > self.config.multipart_chunk_size_mb * 1024 * 1024:
436
379
  multipart = True
437
380
  if multipart:
438
381
  # Multipart requests are quite different... drop here
@@ -513,29 +456,6 @@ class S3(object):
513
456
  response = self.send_request(request, body)
514
457
  return response
515
458
 
516
- def get_policy(self, uri):
517
- request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?policy")
518
- response = self.send_request(request)
519
- return response['data']
520
-
521
- def set_policy(self, uri, policy):
522
- headers = {}
523
- # TODO check policy is proper json string
524
- headers['content-type'] = 'application/json'
525
- request = self.create_request("BUCKET_CREATE", uri = uri,
526
- extra = "?policy", headers=headers)
527
- body = policy
528
- debug(u"set_policy(%s): policy-json: %s" % (uri, body))
529
- request.sign()
530
- response = self.send_request(request, body=body)
531
- return response
532
-
533
- def delete_policy(self, uri):
534
- request = self.create_request("BUCKET_DELETE", uri = uri, extra = "?policy")
535
- debug(u"delete_policy(%s)" % uri)
536
- response = self.send_request(request)
537
- return response
538
-
539
459
  def get_accesslog(self, uri):
540
460
  request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?logging")
541
461
  response = self.send_request(request)
@@ -660,20 +580,18 @@ class S3(object):
660
580
  # "Stringify" all headers
661
581
  for header in headers.keys():
662
582
  headers[header] = str(headers[header])
663
- conn = ConnMan.get(self.get_hostname(resource['bucket']))
583
+ conn = self.get_connection(resource['bucket'])
664
584
  uri = self.format_uri(resource)
665
585
  debug("Sending request method_string=%r, uri=%r, headers=%r, body=(%i bytes)" % (method_string, uri, headers, len(body or "")))
666
- conn.c.request(method_string, uri, body, headers)
586
+ conn.request(method_string, uri, body, headers)
667
587
  response = {}
668
- http_response = conn.c.getresponse()
588
+ http_response = conn.getresponse()
669
589
  response["status"] = http_response.status
670
590
  response["reason"] = http_response.reason
671
591
  response["headers"] = convertTupleListToDict(http_response.getheaders())
672
592
  response["data"] = http_response.read()
673
593
  debug("Response: " + str(response))
674
- ConnMan.put(conn)
675
- except ParameterError, e:
676
- raise
594
+ conn.close()
677
595
  except Exception, e:
678
596
  if retries:
679
597
  warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
@@ -707,7 +625,7 @@ class S3(object):
707
625
 
708
626
  return response
709
627
 
710
- def send_file(self, request, file, labels, buffer = '', throttle = 0, retries = _max_retries, offset = 0, chunk_size = -1):
628
+ def send_file(self, request, file, labels, throttle = 0, retries = _max_retries, offset = 0, chunk_size = -1):
711
629
  method_string, resource, headers = request.get_triplet()
712
630
  size_left = size_total = headers.get("content-length")
713
631
  if self.config.progress_meter:
@@ -716,13 +634,12 @@ class S3(object):
716
634
  info("Sending file '%s', please wait..." % file.name)
717
635
  timestamp_start = time.time()
718
636
  try:
719
- conn = ConnMan.get(self.get_hostname(resource['bucket']))
720
- conn.c.putrequest(method_string, self.format_uri(resource))
637
+ conn = self.get_connection(resource['bucket'])
638
+ conn.connect()
639
+ conn.putrequest(method_string, self.format_uri(resource))
721
640
  for header in headers.keys():
722
- conn.c.putheader(header, str(headers[header]))
723
- conn.c.endheaders()
724
- except ParameterError, e:
725
- raise
641
+ conn.putheader(header, str(headers[header]))
642
+ conn.endheaders()
726
643
  except Exception, e:
727
644
  if self.config.progress_meter:
728
645
  progress.done("failed")
@@ -731,21 +648,17 @@ class S3(object):
731
648
  warning("Waiting %d sec..." % self._fail_wait(retries))
732
649
  time.sleep(self._fail_wait(retries))
733
650
  # Connection error -> same throttle value
734
- return self.send_file(request, file, labels, buffer, throttle, retries - 1, offset, chunk_size)
651
+ return self.send_file(request, file, labels, throttle, retries - 1, offset, chunk_size)
735
652
  else:
736
653
  raise S3UploadError("Upload failed for: %s" % resource['uri'])
737
- if buffer == '':
738
- file.seek(offset)
654
+ file.seek(offset)
739
655
  md5_hash = md5()
740
656
  try:
741
657
  while (size_left > 0):
742
- #debug("SendFile: Reading up to %d bytes from '%s' - remaining bytes: %s" % (self.config.send_chunk, file.name, size_left))
743
- if buffer == '':
744
- data = file.read(min(self.config.send_chunk, size_left))
745
- else:
746
- data = buffer
658
+ #debug("SendFile: Reading up to %d bytes from '%s'" % (self.config.send_chunk, file.name))
659
+ data = file.read(min(self.config.send_chunk, size_left))
747
660
  md5_hash.update(data)
748
- conn.c.send(data)
661
+ conn.send(data)
749
662
  if self.config.progress_meter:
750
663
  progress.update(delta_position = len(data))
751
664
  size_left -= len(data)
@@ -753,16 +666,14 @@ class S3(object):
753
666
  time.sleep(throttle)
754
667
  md5_computed = md5_hash.hexdigest()
755
668
  response = {}
756
- http_response = conn.c.getresponse()
669
+ http_response = conn.getresponse()
757
670
  response["status"] = http_response.status
758
671
  response["reason"] = http_response.reason
759
672
  response["headers"] = convertTupleListToDict(http_response.getheaders())
760
673
  response["data"] = http_response.read()
761
674
  response["size"] = size_total
762
- ConnMan.put(conn)
675
+ conn.close()
763
676
  debug(u"Response: %s" % response)
764
- except ParameterError, e:
765
- raise
766
677
  except Exception, e:
767
678
  if self.config.progress_meter:
768
679
  progress.done("failed")
@@ -774,7 +685,7 @@ class S3(object):
774
685
  warning("Waiting %d sec..." % self._fail_wait(retries))
775
686
  time.sleep(self._fail_wait(retries))
776
687
  # Connection error -> same throttle value
777
- return self.send_file(request, file, labels, buffer, throttle, retries - 1, offset, chunk_size)
688
+ return self.send_file(request, file, labels, throttle, retries - 1, offset, chunk_size)
778
689
  else:
779
690
  debug("Giving up on '%s' %s" % (file.name, e))
780
691
  raise S3UploadError("Upload failed for: %s" % resource['uri'])
@@ -784,7 +695,7 @@ class S3(object):
784
695
  response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
785
696
 
786
697
  if self.config.progress_meter:
787
- ## Finalising the upload takes some time -> update() progress meter
698
+ ## The above conn.close() takes some time -> update() progress meter
788
699
  ## to correct the average speed. Otherwise people will complain that
789
700
  ## 'progress' and response["speed"] are inconsistent ;-)
790
701
  progress.update()
@@ -796,7 +707,7 @@ class S3(object):
796
707
  redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
797
708
  self.set_hostname(redir_bucket, redir_hostname)
798
709
  warning("Redirected to: %s" % (redir_hostname))
799
- return self.send_file(request, file, labels, buffer, offset = offset, chunk_size = chunk_size)
710
+ return self.send_file(request, file, labels, offset = offset, chunk_size = chunk_size)
800
711
 
801
712
  # S3 from time to time doesn't send ETag back in a response :-(
802
713
  # Force re-upload here.
@@ -819,7 +730,7 @@ class S3(object):
819
730
  warning("Upload failed: %s (%s)" % (resource['uri'], S3Error(response)))
820
731
  warning("Waiting %d sec..." % self._fail_wait(retries))
821
732
  time.sleep(self._fail_wait(retries))
822
- return self.send_file(request, file, labels, buffer, throttle, retries - 1, offset, chunk_size)
733
+ return self.send_file(request, file, labels, throttle, retries - 1, offset, chunk_size)
823
734
  else:
824
735
  warning("Too many failures. Giving up on '%s'" % (file.name))
825
736
  raise S3UploadError
@@ -832,7 +743,7 @@ class S3(object):
832
743
  warning("MD5 Sums don't match!")
833
744
  if retries:
834
745
  warning("Retrying upload of %s" % (file.name))
835
- return self.send_file(request, file, labels, buffer, throttle, retries - 1, offset, chunk_size)
746
+ return self.send_file(request, file, labels, throttle, retries - 1, offset, chunk_size)
836
747
  else:
837
748
  warning("Too many failures. Giving up on '%s'" % (file.name))
838
749
  raise S3UploadError
@@ -841,14 +752,11 @@ class S3(object):
841
752
 
842
753
  def send_file_multipart(self, file, headers, uri, size):
843
754
  chunk_size = self.config.multipart_chunk_size_mb * 1024 * 1024
844
- timestamp_start = time.time()
845
755
  upload = MultiPartUpload(self, file, uri, headers)
846
756
  upload.upload_all_parts()
847
757
  response = upload.complete_multipart_upload()
848
- timestamp_end = time.time()
849
- response["elapsed"] = timestamp_end - timestamp_start
758
+ response["speed"] = 0 # XXX
850
759
  response["size"] = size
851
- response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
852
760
  return response
853
761
 
854
762
  def recv_file(self, request, stream, labels, start_position = 0, retries = _max_retries):
@@ -859,22 +767,21 @@ class S3(object):
859
767
  info("Receiving file '%s', please wait..." % stream.name)
860
768
  timestamp_start = time.time()
861
769
  try:
862
- conn = ConnMan.get(self.get_hostname(resource['bucket']))
863
- conn.c.putrequest(method_string, self.format_uri(resource))
770
+ conn = self.get_connection(resource['bucket'])
771
+ conn.connect()
772
+ conn.putrequest(method_string, self.format_uri(resource))
864
773
  for header in headers.keys():
865
- conn.c.putheader(header, str(headers[header]))
774
+ conn.putheader(header, str(headers[header]))
866
775
  if start_position > 0:
867
776
  debug("Requesting Range: %d .. end" % start_position)
868
- conn.c.putheader("Range", "bytes=%d-" % start_position)
869
- conn.c.endheaders()
777
+ conn.putheader("Range", "bytes=%d-" % start_position)
778
+ conn.endheaders()
870
779
  response = {}
871
- http_response = conn.c.getresponse()
780
+ http_response = conn.getresponse()
872
781
  response["status"] = http_response.status
873
782
  response["reason"] = http_response.reason
874
783
  response["headers"] = convertTupleListToDict(http_response.getheaders())
875
784
  debug("Response: %s" % response)
876
- except ParameterError, e:
877
- raise
878
785
  except Exception, e:
879
786
  if self.config.progress_meter:
880
787
  progress.done("failed")
@@ -916,9 +823,6 @@ class S3(object):
916
823
  while (current_position < size_total):
917
824
  this_chunk = size_left > self.config.recv_chunk and self.config.recv_chunk or size_left
918
825
  data = http_response.read(this_chunk)
919
- if len(data) == 0:
920
- raise S3Error("EOF from S3!")
921
-
922
826
  stream.write(data)
923
827
  if start_position == 0:
924
828
  md5_hash.update(data)
@@ -926,7 +830,7 @@ class S3(object):
926
830
  ## Call progress meter from here...
927
831
  if self.config.progress_meter:
928
832
  progress.update(delta_position = len(data))
929
- ConnMan.put(conn)
833
+ conn.close()
930
834
  except Exception, e:
931
835
  if self.config.progress_meter:
932
836
  progress.done("failed")