fakes3-ruby18 0.2.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +8 -0
- data/Gemfile +4 -0
- data/Gemfile.lock +49 -0
- data/MIT-LICENSE +20 -0
- data/README.md +60 -0
- data/Rakefile +18 -0
- data/bin/fakes3 +6 -0
- data/fakes3.gemspec +32 -0
- data/lib/fakes3.rb +3 -0
- data/lib/fakes3/bucket.rb +65 -0
- data/lib/fakes3/bucket_query.rb +11 -0
- data/lib/fakes3/cli.rb +70 -0
- data/lib/fakes3/errors.rb +46 -0
- data/lib/fakes3/file_store.rb +278 -0
- data/lib/fakes3/rate_limitable_file.rb +21 -0
- data/lib/fakes3/s3_object.rb +19 -0
- data/lib/fakes3/server.rb +541 -0
- data/lib/fakes3/sorted_object_list.rb +137 -0
- data/lib/fakes3/unsupported_operation.rb +4 -0
- data/lib/fakes3/version.rb +3 -0
- data/lib/fakes3/xml_adapter.rb +222 -0
- data/test/aws_sdk_commands_test.rb +31 -0
- data/test/boto_test.rb +25 -0
- data/test/botocmd.py +87 -0
- data/test/local_s3_cfg +34 -0
- data/test/post_test.rb +54 -0
- data/test/right_aws_commands_test.rb +192 -0
- data/test/s3_commands_test.rb +209 -0
- data/test/s3cmd_test.rb +52 -0
- data/test/test_helper.rb +4 -0
- metadata +198 -0
@@ -0,0 +1,137 @@
|
|
1
|
+
require 'set'
|
2
|
+
module FakeS3
|
3
|
+
class S3MatchSet
|
4
|
+
attr_accessor :matches,:is_truncated,:common_prefixes
|
5
|
+
def initialize
|
6
|
+
@matches = []
|
7
|
+
@is_truncated = false
|
8
|
+
@common_prefixes = []
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
# This class has some of the semantics necessary for how buckets can return
|
13
|
+
# their items
|
14
|
+
#
|
15
|
+
# It is currently implemented naively as a sorted set + hash If you are going
|
16
|
+
# to try to put massive lists inside buckets and ls them, you will be sorely
|
17
|
+
# disappointed about this performance.
|
18
|
+
class SortedObjectList
|
19
|
+
|
20
|
+
def initialize
|
21
|
+
@sorted_set = SortedSet.new
|
22
|
+
@object_map = {}
|
23
|
+
@mutex = Mutex.new
|
24
|
+
end
|
25
|
+
|
26
|
+
def count
|
27
|
+
@sorted_set.count
|
28
|
+
end
|
29
|
+
|
30
|
+
def find(object_name)
|
31
|
+
@object_map[object_name]
|
32
|
+
end
|
33
|
+
|
34
|
+
# Add an S3 object into the sorted list
|
35
|
+
def add(s3_object)
|
36
|
+
return if !s3_object
|
37
|
+
|
38
|
+
@object_map[s3_object.name] = s3_object
|
39
|
+
@sorted_set << s3_object
|
40
|
+
end
|
41
|
+
|
42
|
+
def remove(s3_object)
|
43
|
+
return if !s3_object
|
44
|
+
|
45
|
+
@object_map.delete(s3_object.name)
|
46
|
+
@sorted_set.delete(s3_object)
|
47
|
+
end
|
48
|
+
|
49
|
+
# Return back a set of matches based on the passed in options
|
50
|
+
#
|
51
|
+
# options:
|
52
|
+
#
|
53
|
+
# :marker : a string to start the lexographical search (it is not included
|
54
|
+
# in the result)
|
55
|
+
# :max_keys : a maximum number of results
|
56
|
+
# :prefix : a string to filter the results by
|
57
|
+
# :delimiter : not supported yet
|
58
|
+
def list(options)
|
59
|
+
marker = options[:marker]
|
60
|
+
prefix = options[:prefix]
|
61
|
+
max_keys = options[:max_keys] || 1000
|
62
|
+
delimiter = options[:delimiter]
|
63
|
+
|
64
|
+
ms = S3MatchSet.new
|
65
|
+
|
66
|
+
marker_found = true
|
67
|
+
pseudo = nil
|
68
|
+
if marker
|
69
|
+
marker_found = false
|
70
|
+
if !@object_map[marker]
|
71
|
+
pseudo = S3Object.new
|
72
|
+
pseudo.name = marker
|
73
|
+
@sorted_set << pseudo
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
if delimiter
|
78
|
+
if prefix
|
79
|
+
base_prefix = prefix
|
80
|
+
else
|
81
|
+
base_prefix = ""
|
82
|
+
end
|
83
|
+
prefix_offset = base_prefix.length
|
84
|
+
end
|
85
|
+
|
86
|
+
count = 0
|
87
|
+
last_chunk = nil
|
88
|
+
@sorted_set.each do |s3_object|
|
89
|
+
if marker_found && (!prefix or s3_object.name.index(prefix) == 0)
|
90
|
+
if delimiter
|
91
|
+
name = s3_object.name
|
92
|
+
remainder = name.slice(prefix_offset, name.length)
|
93
|
+
chunks = remainder.split(delimiter, 2)
|
94
|
+
if chunks.length > 1
|
95
|
+
if (last_chunk != chunks[0])
|
96
|
+
# "All of the keys rolled up in a common prefix count as
|
97
|
+
# a single return when calculating the number of
|
98
|
+
# returns. See MaxKeys."
|
99
|
+
# (http://awsdocs.s3.amazonaws.com/S3/latest/s3-api.pdf)
|
100
|
+
count += 1
|
101
|
+
if count <= max_keys
|
102
|
+
ms.common_prefixes << base_prefix + chunks[0] + delimiter
|
103
|
+
last_chunk = chunks[0]
|
104
|
+
else
|
105
|
+
is_truncated = true
|
106
|
+
break
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
# Continue to the next key, since this one has a
|
111
|
+
# delimiter.
|
112
|
+
next
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
count += 1
|
117
|
+
if count <= max_keys
|
118
|
+
ms.matches << s3_object
|
119
|
+
else
|
120
|
+
is_truncated = true
|
121
|
+
break
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
if marker and marker == s3_object.name
|
126
|
+
marker_found = true
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
if pseudo
|
131
|
+
@sorted_set.delete(pseudo)
|
132
|
+
end
|
133
|
+
|
134
|
+
return ms
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
@@ -0,0 +1,222 @@
|
|
1
|
+
require 'builder'
|
2
|
+
require 'time'
|
3
|
+
|
4
|
+
module FakeS3
|
5
|
+
class XmlAdapter
|
6
|
+
def self.buckets(bucket_objects)
|
7
|
+
output = ""
|
8
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
9
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
10
|
+
xml.ListAllMyBucketsResult(:xmlns => "http://s3.amazonaws.com/doc/2006-03-01/") { |lam|
|
11
|
+
lam.Owner { |owner|
|
12
|
+
owner.ID("123")
|
13
|
+
owner.DisplayName("FakeS3")
|
14
|
+
}
|
15
|
+
lam.Buckets { |buckets|
|
16
|
+
bucket_objects.each do |bucket|
|
17
|
+
buckets.Bucket do |b|
|
18
|
+
b.Name(bucket.name)
|
19
|
+
b.CreationDate(bucket.creation_date.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
|
20
|
+
end
|
21
|
+
end
|
22
|
+
}
|
23
|
+
}
|
24
|
+
output
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.error(error)
|
28
|
+
output = ""
|
29
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
30
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
31
|
+
xml.Error { |err|
|
32
|
+
err.Code(error.code)
|
33
|
+
err.Message(error.message)
|
34
|
+
err.Resource(error.resource)
|
35
|
+
err.RequestId(1)
|
36
|
+
}
|
37
|
+
output
|
38
|
+
end
|
39
|
+
|
40
|
+
# <?xml version="1.0" encoding="UTF-8"?>
|
41
|
+
#<Error>
|
42
|
+
# <Code>NoSuchKey</Code>
|
43
|
+
# <Message>The resource you requested does not exist</Message>
|
44
|
+
# <Resource>/mybucket/myfoto.jpg</Resource>
|
45
|
+
# <RequestId>4442587FB7D0A2F9</RequestId>
|
46
|
+
#</Error>
|
47
|
+
#
|
48
|
+
def self.error_no_such_bucket(name)
|
49
|
+
output = ""
|
50
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
51
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
52
|
+
xml.Error { |err|
|
53
|
+
err.Code("NoSuchBucket")
|
54
|
+
err.Message("The resource you requested does not exist")
|
55
|
+
err.Resource(name)
|
56
|
+
err.RequestId(1)
|
57
|
+
}
|
58
|
+
output
|
59
|
+
end
|
60
|
+
|
61
|
+
def self.error_bucket_not_empty(name)
|
62
|
+
output = ""
|
63
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
64
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
65
|
+
xml.Error { |err|
|
66
|
+
err.Code("BucketNotEmpty")
|
67
|
+
err.Message("The bucket you tried to delete is not empty.")
|
68
|
+
err.Resource(name)
|
69
|
+
err.RequestId(1)
|
70
|
+
}
|
71
|
+
output
|
72
|
+
end
|
73
|
+
|
74
|
+
def self.error_no_such_key(name)
|
75
|
+
output = ""
|
76
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
77
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
78
|
+
xml.Error { |err|
|
79
|
+
err.Code("NoSuchKey")
|
80
|
+
err.Message("The specified key does not exist")
|
81
|
+
err.Key(name)
|
82
|
+
err.RequestId(1)
|
83
|
+
err.HostId(2)
|
84
|
+
}
|
85
|
+
output
|
86
|
+
end
|
87
|
+
|
88
|
+
def self.bucket(bucket)
|
89
|
+
output = ""
|
90
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
91
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
92
|
+
xml.ListBucketResult(:xmlns => "http://s3.amazonaws.com/doc/2006-03-01/") { |lbr|
|
93
|
+
lbr.Name(bucket.name)
|
94
|
+
lbr.Prefix
|
95
|
+
lbr.Marker
|
96
|
+
lbr.MaxKeys("1000")
|
97
|
+
lbr.IsTruncated("false")
|
98
|
+
}
|
99
|
+
output
|
100
|
+
end
|
101
|
+
|
102
|
+
# A bucket query gives back the bucket along with contents
|
103
|
+
# <Contents>
|
104
|
+
#<Key>Nelson</Key>
|
105
|
+
# <LastModified>2006-01-01T12:00:00.000Z</LastModified>
|
106
|
+
# <ETag>"828ef3fdfa96f00ad9f27c383fc9ac7f"</ETag>
|
107
|
+
# <Size>5</Size>
|
108
|
+
# <StorageClass>STANDARD</StorageClass>
|
109
|
+
# <Owner>
|
110
|
+
# <ID>bcaf161ca5fb16fd081034f</ID>
|
111
|
+
# <DisplayName>webfile</DisplayName>
|
112
|
+
# </Owner>
|
113
|
+
# </Contents>
|
114
|
+
|
115
|
+
def self.append_objects_to_list_bucket_result(lbr,objects)
|
116
|
+
return if objects.nil? or objects.size == 0
|
117
|
+
|
118
|
+
if objects.index(nil)
|
119
|
+
require 'ruby-debug'
|
120
|
+
Debugger.start
|
121
|
+
debugger
|
122
|
+
end
|
123
|
+
|
124
|
+
objects.each do |s3_object|
|
125
|
+
lbr.Contents { |contents|
|
126
|
+
contents.Key(s3_object.name)
|
127
|
+
contents.LastModified(s3_object.modified_date)
|
128
|
+
contents.ETag("\"#{s3_object.md5}\"")
|
129
|
+
contents.Size(s3_object.size)
|
130
|
+
contents.StorageClass("STANDARD")
|
131
|
+
|
132
|
+
contents.Owner { |owner|
|
133
|
+
owner.ID("abc")
|
134
|
+
owner.DisplayName("You")
|
135
|
+
}
|
136
|
+
}
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
def self.append_common_prefixes_to_list_bucket_result(lbr, prefixes)
|
141
|
+
return if prefixes.nil? or prefixes.size == 0
|
142
|
+
|
143
|
+
prefixes.each do |common_prefix|
|
144
|
+
lbr.CommonPrefixes { |contents| contents.Prefix(common_prefix) }
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
def self.bucket_query(bucket_query)
|
149
|
+
output = ""
|
150
|
+
bucket = bucket_query.bucket
|
151
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
152
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
153
|
+
xml.ListBucketResult(:xmlns => "http://s3.amazonaws.com/doc/2006-03-01/") { |lbr|
|
154
|
+
lbr.Name(bucket.name)
|
155
|
+
lbr.Prefix(bucket_query.prefix)
|
156
|
+
lbr.Marker(bucket_query.marker)
|
157
|
+
lbr.MaxKeys(bucket_query.max_keys)
|
158
|
+
lbr.IsTruncated(bucket_query.is_truncated?)
|
159
|
+
append_objects_to_list_bucket_result(lbr,bucket_query.matches)
|
160
|
+
append_common_prefixes_to_list_bucket_result(lbr, bucket_query.common_prefixes)
|
161
|
+
}
|
162
|
+
output
|
163
|
+
end
|
164
|
+
|
165
|
+
# ACL xml
|
166
|
+
def self.acl(object = nil)
|
167
|
+
output = ""
|
168
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
169
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
170
|
+
xml.AccessControlPolicy(:xmlns => "http://s3.amazonaws.com/doc/2006-03-01/") { |acp|
|
171
|
+
acp.Owner do |owner|
|
172
|
+
owner.ID("abc")
|
173
|
+
owner.DisplayName("You")
|
174
|
+
end
|
175
|
+
acp.AccessControlList do |acl|
|
176
|
+
acl.Grant do |grant|
|
177
|
+
grant.Grantee("xmlns:xsi" => "http://www.w3.org/2001/XMLSchema-instance", "xsi:type" => "CanonicalUser") do |grantee|
|
178
|
+
grantee.ID("abc")
|
179
|
+
grantee.DisplayName("You")
|
180
|
+
end
|
181
|
+
grant.Permission("FULL_CONTROL")
|
182
|
+
end
|
183
|
+
end
|
184
|
+
}
|
185
|
+
output
|
186
|
+
end
|
187
|
+
|
188
|
+
# <CopyObjectResult>
|
189
|
+
# <LastModified>2009-10-28T22:32:00</LastModified>
|
190
|
+
# <ETag>"9b2cf535f27731c974343645a3985328"</ETag>
|
191
|
+
# </CopyObjectResult>
|
192
|
+
def self.copy_object_result(object)
|
193
|
+
output = ""
|
194
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
195
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
196
|
+
xml.CopyObjectResult { |result|
|
197
|
+
result.LastModified(object.modified_date)
|
198
|
+
result.ETag("\"#{object.md5}\"")
|
199
|
+
}
|
200
|
+
output
|
201
|
+
end
|
202
|
+
|
203
|
+
# <CompleteMultipartUploadResult>
|
204
|
+
# <Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>
|
205
|
+
# <Bucket>Example-Bucket</Bucket>
|
206
|
+
# <Key>Example-Object</Key>
|
207
|
+
# <ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>
|
208
|
+
# </CompleteMultipartUploadResult>
|
209
|
+
def self.complete_multipart_result(object)
|
210
|
+
output = ""
|
211
|
+
xml = Builder::XmlMarkup.new(:target => output)
|
212
|
+
xml.instruct! :xml, :version=>"1.0", :encoding=>"UTF-8"
|
213
|
+
xml.CompleteMultipartUploadResult { |result|
|
214
|
+
result.Location("TODO: implement")
|
215
|
+
result.Bucket("TODO: implement")
|
216
|
+
result.Key(object.name)
|
217
|
+
result.ETag("\"#{object.md5}\"")
|
218
|
+
}
|
219
|
+
output
|
220
|
+
end
|
221
|
+
end
|
222
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
require 'test/test_helper'
|
2
|
+
require 'aws-sdk-v1'
|
3
|
+
|
4
|
+
class AwsSdkCommandsTest < Test::Unit::TestCase
|
5
|
+
def setup
|
6
|
+
@s3 = AWS::S3.new(:access_key_id => '123',
|
7
|
+
:secret_access_key => 'abc',
|
8
|
+
:s3_endpoint => 'localhost',
|
9
|
+
:s3_port => 10453,
|
10
|
+
:use_ssl => false)
|
11
|
+
end
|
12
|
+
|
13
|
+
def test_copy_to
|
14
|
+
bucket = @s3.buckets["test_copy_to"]
|
15
|
+
object = bucket.objects["key1"]
|
16
|
+
object.write("asdf")
|
17
|
+
|
18
|
+
assert object.exists?
|
19
|
+
object.copy_to("key2")
|
20
|
+
|
21
|
+
assert_equal 2, bucket.objects.count
|
22
|
+
end
|
23
|
+
|
24
|
+
def test_multipart_upload
|
25
|
+
bucket = @s3.buckets["test_multipart_upload"]
|
26
|
+
object = bucket.objects["key1"]
|
27
|
+
object.write("thisisaverybigfile", :multipart_threshold => 5)
|
28
|
+
assert object.exists?
|
29
|
+
assert_equal "thisisaverybigfile", object.read
|
30
|
+
end
|
31
|
+
end
|
data/test/boto_test.rb
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
require 'test/test_helper'
|
2
|
+
require 'fileutils'
|
3
|
+
|
4
|
+
class BotoTest < Test::Unit::TestCase
|
5
|
+
def setup
|
6
|
+
cmdpath = File.expand_path(File.join(File.dirname(__FILE__),'botocmd.py'))
|
7
|
+
@botocmd = "python #{cmdpath} -t localhost -p 10453"
|
8
|
+
end
|
9
|
+
|
10
|
+
def teardown
|
11
|
+
end
|
12
|
+
|
13
|
+
def test_store
|
14
|
+
File.open(__FILE__,'rb') do |input|
|
15
|
+
File.open("/tmp/fakes3_upload",'wb') do |output|
|
16
|
+
output << input.read
|
17
|
+
end
|
18
|
+
end
|
19
|
+
output = `#{@botocmd} put /tmp/fakes3_upload s3://s3cmd_bucket/upload`
|
20
|
+
assert_match(/stored/,output)
|
21
|
+
|
22
|
+
FileUtils.rm("/tmp/fakes3_upload")
|
23
|
+
end
|
24
|
+
|
25
|
+
end
|
data/test/botocmd.py
ADDED
@@ -0,0 +1,87 @@
|
|
1
|
+
#!/usr/bin/python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# fakes3cmd.py -- an s3cmd-like script that accepts a custom host and portname
|
4
|
+
import re
|
5
|
+
import os
|
6
|
+
from optparse import OptionParser
|
7
|
+
|
8
|
+
try:
|
9
|
+
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
|
10
|
+
from boto.s3.key import Key
|
11
|
+
except ImportError:
|
12
|
+
raise Exception('You must install the boto package for python')
|
13
|
+
|
14
|
+
|
15
|
+
class FakeS3Cmd(object):
|
16
|
+
COMMANDS = ['mb', 'rb', 'put', ]
|
17
|
+
def __init__(self, host, port):
|
18
|
+
self.host = host
|
19
|
+
self.port = port
|
20
|
+
self.conn = None
|
21
|
+
self._connect()
|
22
|
+
|
23
|
+
def _connect(self):
|
24
|
+
print 'Connecting: %s:%s' % (self.host, self.port)
|
25
|
+
self.conn = S3Connection(is_secure=False,
|
26
|
+
calling_format=OrdinaryCallingFormat(),
|
27
|
+
aws_access_key_id='',
|
28
|
+
aws_secret_access_key='',
|
29
|
+
port=self.port, host=self.host)
|
30
|
+
|
31
|
+
|
32
|
+
@staticmethod
|
33
|
+
def _parse_uri(path):
|
34
|
+
match = re.match(r's3://([^/]+)(?:/(.*))?', path, re.I)
|
35
|
+
## (bucket, key)
|
36
|
+
return match.groups()
|
37
|
+
|
38
|
+
def mb(self, path, *args):
|
39
|
+
if not self.conn:
|
40
|
+
self._connect()
|
41
|
+
|
42
|
+
bucket, _ = self._parse_uri(path)
|
43
|
+
self.conn.create_bucket(bucket)
|
44
|
+
print 'made bucket: [%s]' % bucket
|
45
|
+
|
46
|
+
def rb(self, path, *args):
|
47
|
+
if not self.conn:
|
48
|
+
self._connect()
|
49
|
+
|
50
|
+
bucket, _ = self._parse_uri(path)
|
51
|
+
self.conn.delete_bucket(bucket)
|
52
|
+
print 'removed bucket: [%s]' % bucket
|
53
|
+
|
54
|
+
def put(self, *args):
|
55
|
+
if not self.conn:
|
56
|
+
self._connect()
|
57
|
+
|
58
|
+
args = list(args)
|
59
|
+
path = args.pop()
|
60
|
+
bucket_name, prefix = self._parse_uri(path)
|
61
|
+
bucket = self.conn.create_bucket(bucket_name)
|
62
|
+
for src_file in args:
|
63
|
+
key = Key(bucket)
|
64
|
+
key.key = os.path.join(prefix, os.path.basename(src_file))
|
65
|
+
key.set_contents_from_filename(src_file)
|
66
|
+
print 'stored: [%s]' % key.key
|
67
|
+
|
68
|
+
|
69
|
+
if __name__ == "__main__":
|
70
|
+
# check for options. TODO: This requires a more verbose help message
|
71
|
+
# to explain how the positional arguments work.
|
72
|
+
parser = OptionParser()
|
73
|
+
parser.add_option("-t", "--host", type="string", default='localhost')
|
74
|
+
parser.add_option("-p", "--port", type='int', default=80)
|
75
|
+
o, args = parser.parse_args()
|
76
|
+
|
77
|
+
if len(args) < 2:
|
78
|
+
raise ValueError('you must minimally supply a desired command and s3 uri')
|
79
|
+
|
80
|
+
cmd = args.pop(0)
|
81
|
+
|
82
|
+
if cmd not in FakeS3Cmd.COMMANDS:
|
83
|
+
raise ValueError('%s is not a valid command' % cmd)
|
84
|
+
|
85
|
+
fs3 = FakeS3Cmd(o.host, o.port)
|
86
|
+
handler = getattr(fs3, cmd)
|
87
|
+
handler(*args)
|