fakes3-ruby18 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: ed6b8ac05078cf35306777ef3c4a386203a2e6d7
4
+ data.tar.gz: 46d72b7bde80978d2044a56a2a66fb50738c44d4
5
+ SHA512:
6
+ metadata.gz: 6ee628e08b7bf881f67ab1c289cb9509461e05b39b9dc72e7314ec97a9f8e29ff863171698a8b1335ba0c059e7fe17cf87721870362c5bece6c96329226e703f
7
+ data.tar.gz: 18299477d6d67be52f62c84608f23fbf4fceeca45d725b896384693183fffd68994fa9016e8be2ce01664f76250a4c14e1be7caf8ff0864eec5813c9a0536953
@@ -0,0 +1,8 @@
1
+ pkg/*
2
+ *.gem
3
+ .bundle
4
+ tmp
5
+ test_root
6
+
7
+ # Don't check in RVM/rbenv files
8
+ .ruby-version
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'https://rubygems.org'
2
+ gem 'fakes3', :path => '.' # for dev and test, use local fakes3
3
+ # Specify your gem's dependencies in fakes3.gemspec
4
+ gemspec
@@ -0,0 +1,49 @@
1
+ PATH
2
+ remote: .
3
+ specs:
4
+ fakes3 (0.2.1)
5
+ builder
6
+ thor
7
+
8
+ GEM
9
+ remote: https://rubygems.org/
10
+ specs:
11
+ aws-s3 (0.6.3)
12
+ builder
13
+ mime-types
14
+ xml-simple
15
+ aws-sdk-v1 (1.59.0)
16
+ json (~> 1.4)
17
+ nokogiri (>= 1.4.4)
18
+ builder (3.2.2)
19
+ byebug (4.0.1)
20
+ columnize (= 0.9.0)
21
+ rb-readline (= 0.5.2)
22
+ columnize (0.9.0)
23
+ json (1.8.1)
24
+ mime-types (1.25)
25
+ mini_portile (0.6.1)
26
+ nokogiri (1.6.4.1)
27
+ mini_portile (~> 0.6.0)
28
+ rake (10.1.0)
29
+ rb-readline (0.5.2)
30
+ rest-client (1.6.7)
31
+ mime-types (>= 1.16)
32
+ right_aws (3.1.0)
33
+ right_http_connection (>= 1.2.5)
34
+ right_http_connection (1.4.0)
35
+ thor (0.18.1)
36
+ xml-simple (1.1.2)
37
+
38
+ PLATFORMS
39
+ ruby
40
+
41
+ DEPENDENCIES
42
+ aws-s3
43
+ aws-sdk-v1
44
+ bundler (>= 1.0.0)
45
+ byebug
46
+ fakes3!
47
+ rake
48
+ rest-client
49
+ right_aws
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2011,2012 Curtis W Spencer (@jubos) and Spool
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,60 @@
1
+ ## Introduction
2
+ FakeS3 is a lightweight server that responds to the same calls Amazon S3 responds to.
3
+ It is extremely useful for testing of S3 in a sandbox environment without actually
4
+ making calls to Amazon, which not only require network, but also cost you precious dollars.
5
+
6
+ The goal of Fake S3 is to minimize runtime dependencies and be more of a
7
+ development tool to test S3 calls in your code rather than a production server
8
+ looking to duplicate S3 functionality. Trying RiakCS, ParkPlace/Boardwalk, or
9
+ Ceph might be a place to start if that is your goal.
10
+
11
+ FakeS3 doesn't support all of the S3 command set, but the basic ones like put, get,
12
+ list, copy, and make bucket are supported. More coming soon.
13
+
14
+ ## Installation
15
+
16
+ gem install fakes3
17
+
18
+ ## Running
19
+
20
+ To run a fakes3 server, you just specify a root and a port.
21
+
22
+ fakes3 -r /mnt/fakes3_root -p 4567
23
+
24
+ ## Connecting to FakeS3
25
+
26
+ Take a look at the test cases to see client example usage. For now, FakeS3 is
27
+ mainly tested with s3cmd, aws-s3 gem, and right_aws. There are plenty more
28
+ libraries out there, and please do mention if other clients work or not.
29
+
30
+ Here is a running list of [supported clients](https://github.com/jubos/fake-s3/wiki/Supported-Clients "Supported Clients")
31
+
32
+ ## Running Tests
33
+
34
+ There are some pre-requesites to actually being able to run the unit/integration tests
35
+
36
+ ### On OSX
37
+
38
+ Edit your /etc/hosts and add the following line:
39
+
40
+ 127.0.0.1 posttest.localhost
41
+
42
+ Then ensure that the following packages are installed (boto, s3cmd)
43
+
44
+ > pip install boto
45
+ > brew install s3cmd
46
+
47
+
48
+ Start the test server using
49
+
50
+ rake test_server
51
+
52
+ Then in another terminal window run
53
+
54
+ rake test
55
+
56
+ It is a still a TODO to get this to be just one command
57
+
58
+ ## More Information
59
+
60
+ Check out the [wiki](https://github.com/jubos/fake-s3/wiki)
@@ -0,0 +1,18 @@
1
+ require 'rubygems'
2
+ require 'bundler'
3
+ require 'rake/testtask'
4
+ include Rake::DSL
5
+ Bundler::GemHelper.install_tasks
6
+
7
+ Rake::TestTask.new(:test) do |t|
8
+ t.libs << "."
9
+ t.test_files =
10
+ FileList['test/*_test.rb'].exclude('test/s3_commands_test.rb')
11
+ end
12
+
13
+ desc "Run the test_server"
14
+ task :test_server do |t|
15
+ system("bundle exec bin/fakes3 --port 10453 --root test_root")
16
+ end
17
+
18
+ task :default => :test
@@ -0,0 +1,6 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ $: << './lib'
4
+
5
+ require 'fakes3/cli'
6
+ FakeS3::CLI.start
@@ -0,0 +1,32 @@
1
+ # -*- encoding: utf-8 -*-
2
+ require File.join(File.dirname(__FILE__), 'lib', 'fakes3', 'version')
3
+
4
+ Gem::Specification.new do |s|
5
+ s.name = "fakes3-ruby18"
6
+ s.version = FakeS3::VERSION
7
+ s.platform = Gem::Platform::RUBY
8
+ s.authors = ["Curtis Spencer"]
9
+ s.email = ["thorin@gmail.com"]
10
+ s.homepage = "https://github.com/jubos/fake-s3"
11
+ s.summary = %q{FakeS3 is a server that simulates S3 commands so you can test your S3 functionality in your projects}
12
+ s.description = %q{Use FakeS3 to test basic S3 functionality without actually connecting to S3}
13
+ s.license = "MIT"
14
+
15
+ s.rubyforge_project = "fakes3"
16
+
17
+ s.add_development_dependency "bundler", ">= 1.0.0"
18
+ s.add_development_dependency "aws-s3"
19
+ s.add_development_dependency "right_aws"
20
+ s.add_development_dependency "rest-client"
21
+ s.add_development_dependency "rake"
22
+ s.add_development_dependency "aws-sdk-v1"
23
+ #s.add_development_dependency "ruby-debug"
24
+ #s.add_development_dependency "debugger"
25
+ s.add_dependency "thor"
26
+ s.add_dependency "builder"
27
+
28
+ s.files = `git ls-files`.split("\n")
29
+ s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
30
+ s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
31
+ s.require_paths = ["lib"]
32
+ end
@@ -0,0 +1,3 @@
1
+ require 'fakes3/version'
2
+ require 'fakes3/file_store'
3
+ require 'fakes3/server'
@@ -0,0 +1,65 @@
1
+ require 'builder'
2
+ require 'thread'
3
+ require 'fakes3/s3_object'
4
+ require 'fakes3/sorted_object_list'
5
+
6
+ module FakeS3
7
+ class Bucket
8
+ attr_accessor :name,:creation_date,:objects
9
+
10
+ def initialize(name,creation_date,objects)
11
+ @name = name
12
+ @creation_date = creation_date
13
+ @objects = SortedObjectList.new
14
+ objects.each do |obj|
15
+ @objects.add(obj)
16
+ end
17
+ @mutex = Mutex.new
18
+ end
19
+
20
+ def find(object_name)
21
+ @mutex.synchronize do
22
+ @objects.find(object_name)
23
+ end
24
+ end
25
+
26
+ def add(object)
27
+ # Unfortunately have to synchronize here since the our SortedObjectList
28
+ # not thread safe. Probably can get finer granularity if performance is
29
+ # important
30
+ @mutex.synchronize do
31
+ @objects.add(object)
32
+ end
33
+ end
34
+
35
+ def remove(object)
36
+ @mutex.synchronize do
37
+ @objects.remove(object)
38
+ end
39
+ end
40
+
41
+ def query_for_range(options)
42
+ marker = options[:marker]
43
+ prefix = options[:prefix]
44
+ max_keys = options[:max_keys] || 1000
45
+ delimiter = options[:delimiter]
46
+
47
+ match_set = nil
48
+ @mutex.synchronize do
49
+ match_set = @objects.list(options)
50
+ end
51
+
52
+ bq = BucketQuery.new
53
+ bq.bucket = self
54
+ bq.marker = marker
55
+ bq.prefix = prefix
56
+ bq.max_keys = max_keys
57
+ bq.delimiter = delimiter
58
+ bq.matches = match_set.matches
59
+ bq.is_truncated = match_set.is_truncated
60
+ bq.common_prefixes = match_set.common_prefixes
61
+ return bq
62
+ end
63
+
64
+ end
65
+ end
@@ -0,0 +1,11 @@
1
+ module FakeS3
2
+ class BucketQuery
3
+ attr_accessor :prefix,:matches,:marker,:max_keys,
4
+ :delimiter,:bucket,:is_truncated,:common_prefixes
5
+
6
+ # Syntactic sugar
7
+ def is_truncated?
8
+ @is_truncated
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,70 @@
1
+ require 'thor'
2
+ require 'fakes3/server'
3
+ require 'fakes3/version'
4
+
5
+ module FakeS3
6
+ class CLI < Thor
7
+ default_task("server")
8
+
9
+ desc "server", "Run a server on a particular hostname"
10
+ method_option :root, :type => :string, :aliases => '-r', :required => true
11
+ method_option :port, :type => :numeric, :aliases => '-p', :required => true
12
+ method_option :address, :type => :string, :aliases => '-a', :required => false, :desc => "Bind to this address. Defaults to 0.0.0.0"
13
+ method_option :hostname, :type => :string, :aliases => '-H', :desc => "The root name of the host. Defaults to s3.amazonaws.com."
14
+ method_option :limit, :aliases => '-l', :type => :string, :desc => 'Rate limit for serving (ie. 50K, 1.0M)'
15
+ method_option :sslcert, :type => :string, :desc => 'Path to SSL certificate'
16
+ method_option :sslkey, :type => :string, :desc => 'Path to SSL certificate key'
17
+
18
+ def server
19
+ store = nil
20
+ if options[:root]
21
+ root = File.expand_path(options[:root])
22
+ # TODO Do some sanity checking here
23
+ store = FileStore.new(root)
24
+ end
25
+
26
+ if store.nil?
27
+ abort "You must specify a root to use a file store (the current default)"
28
+ end
29
+
30
+ hostname = 's3.amazonaws.com'
31
+ if options[:hostname]
32
+ hostname = options[:hostname]
33
+ # In case the user has put a port on the hostname
34
+ if hostname =~ /:(\d+)/
35
+ hostname = hostname.split(":")[0]
36
+ end
37
+ end
38
+
39
+ if options[:limit]
40
+ begin
41
+ store.rate_limit = options[:limit]
42
+ rescue
43
+ abort $!.message
44
+ end
45
+ end
46
+
47
+ address = options[:address] || '0.0.0.0'
48
+ ssl_cert_path = options[:sslcert]
49
+ ssl_key_path = options[:sslkey]
50
+
51
+ if (ssl_cert_path.nil? && !ssl_key_path.nil?) || (!ssl_cert_path.nil? && ssl_key_path.nil?)
52
+ abort "If you specify an SSL certificate you must also specify an SSL certificate key"
53
+ end
54
+
55
+ puts "Loading FakeS3 with #{root} on port #{options[:port]} with hostname #{hostname}"
56
+ server = FakeS3::Server.new(address,options[:port],store,hostname,ssl_cert_path,ssl_key_path)
57
+ server.serve
58
+ end
59
+
60
+ desc "version", "Report the current fakes3 version"
61
+ def version
62
+ puts <<"EOF"
63
+ ======================
64
+ FakeS3 #{FakeS3::VERSION}
65
+
66
+ Copyright 2012, Curtis Spencer (@jubos)
67
+ EOF
68
+ end
69
+ end
70
+ end
@@ -0,0 +1,46 @@
1
+ module FakeS3
2
+ class FakeS3Exception < RuntimeError
3
+ attr_accessor :resource,:request_id
4
+
5
+ def self.metaclass; class << self; self; end; end
6
+
7
+ def self.traits(*arr)
8
+ return @traits if arr.empty?
9
+ attr_accessor *arr
10
+
11
+ arr.each do |a|
12
+ metaclass.instance_eval do
13
+ define_method( a ) do |val|
14
+ @traits ||= {}
15
+ @traits[a] = val
16
+ end
17
+ end
18
+ end
19
+
20
+ class_eval do
21
+ define_method( :initialize ) do
22
+ self.class.traits.each do |k,v|
23
+ instance_variable_set("@#{k}", v)
24
+ end
25
+ end
26
+ end
27
+ end
28
+
29
+ traits :message,:http_status
30
+
31
+ def code
32
+ self.class.to_s
33
+ end
34
+ end
35
+
36
+ class NoSuchBucket < FakeS3Exception
37
+ message "The bucket you tried to delete is not empty."
38
+ http_status "404"
39
+ end
40
+
41
+ class BucketNotEmpty < FakeS3Exception
42
+ message "The bucket you tried to delete is not empty."
43
+ http_status "409"
44
+ end
45
+
46
+ end
@@ -0,0 +1,278 @@
1
+ require 'fileutils'
2
+ require 'time'
3
+ require 'fakes3/s3_object'
4
+ require 'fakes3/bucket'
5
+ require 'fakes3/rate_limitable_file'
6
+ require 'digest/md5'
7
+ require 'yaml'
8
+
9
+ module FakeS3
10
+ class FileStore
11
+ SHUCK_METADATA_DIR = ".fakes3_metadataFFF"
12
+ # S3 clients with overly strict date parsing fails to parse ISO 8601 dates
13
+ # without any sub second precision (e.g. jets3t v0.7.2), and the examples
14
+ # given in the official AWS S3 documentation specify three (3) decimals for
15
+ # sub second precision.
16
+ SUBSECOND_PRECISION = 3
17
+
18
+ def initialize(root)
19
+ @root = root
20
+ @buckets = []
21
+ @bucket_hash = {}
22
+ Dir[File.join(root,"*")].each do |bucket|
23
+ bucket_name = File.basename(bucket)
24
+ bucket_obj = Bucket.new(bucket_name,Time.now,[])
25
+ @buckets << bucket_obj
26
+ @bucket_hash[bucket_name] = bucket_obj
27
+ end
28
+ end
29
+
30
+ # Pass a rate limit in bytes per second
31
+ def rate_limit=(rate_limit)
32
+ if rate_limit.is_a?(String)
33
+ if rate_limit =~ /^(\d+)$/
34
+ RateLimitableFile.rate_limit = rate_limit.to_i
35
+ elsif rate_limit =~ /^(.*)K$/
36
+ RateLimitableFile.rate_limit = $1.to_f * 1000
37
+ elsif rate_limit =~ /^(.*)M$/
38
+ RateLimitableFile.rate_limit = $1.to_f * 1000000
39
+ elsif rate_limit =~ /^(.*)G$/
40
+ RateLimitableFile.rate_limit = $1.to_f * 1000000000
41
+ else
42
+ raise "Invalid Rate Limit Format: Valid values include (1000,10K,1.1M)"
43
+ end
44
+ else
45
+ RateLimitableFile.rate_limit = nil
46
+ end
47
+ end
48
+
49
+ def buckets
50
+ @buckets
51
+ end
52
+
53
+ def get_bucket_folder(bucket)
54
+ File.join(@root,bucket.name)
55
+ end
56
+
57
+ def get_bucket(bucket)
58
+ @bucket_hash[bucket]
59
+ end
60
+
61
+ def create_bucket(bucket)
62
+ FileUtils.mkdir_p(File.join(@root,bucket))
63
+ bucket_obj = Bucket.new(bucket,Time.now,[])
64
+ if !@bucket_hash[bucket]
65
+ @buckets << bucket_obj
66
+ @bucket_hash[bucket] = bucket_obj
67
+ end
68
+ bucket_obj
69
+ end
70
+
71
+ def delete_bucket(bucket_name)
72
+ bucket = get_bucket(bucket_name)
73
+ raise NoSuchBucket if !bucket
74
+ raise BucketNotEmpty if bucket.objects.count > 0
75
+ FileUtils.rm_r(get_bucket_folder(bucket))
76
+ @bucket_hash.delete(bucket_name)
77
+ end
78
+
79
+ def get_object(bucket,object_name, request)
80
+ begin
81
+ real_obj = S3Object.new
82
+ obj_root = File.join(@root,bucket,object_name,SHUCK_METADATA_DIR)
83
+ metadata = YAML.load(File.open(File.join(obj_root,"metadata"),'rb'))
84
+ real_obj.name = object_name
85
+ real_obj.md5 = metadata[:md5]
86
+ real_obj.content_type = metadata.fetch(:content_type) { "application/octet-stream" }
87
+ #real_obj.io = File.open(File.join(obj_root,"content"),'rb')
88
+ real_obj.io = RateLimitableFile.open(File.join(obj_root,"content"),'rb')
89
+ real_obj.size = metadata.fetch(:size) { 0 }
90
+ real_obj.creation_date = File.ctime(obj_root).utc.iso8601(SUBSECOND_PRECISION)
91
+ real_obj.modified_date = metadata.fetch(:modified_date) do
92
+ File.mtime(File.join(obj_root,"content")).utc.iso8601(SUBSECOND_PRECISION)
93
+ end
94
+ real_obj.custom_metadata = metadata.fetch(:custom_metadata) { {} }
95
+ return real_obj
96
+ rescue
97
+ puts $!
98
+ $!.backtrace.each { |line| puts line }
99
+ return nil
100
+ end
101
+ end
102
+
103
+ def object_metadata(bucket,object)
104
+ end
105
+
106
+ def copy_object(src_bucket_name, src_name, dst_bucket_name, dst_name, request)
107
+ src_root = File.join(@root,src_bucket_name,src_name,SHUCK_METADATA_DIR)
108
+ src_metadata_filename = File.join(src_root,"metadata")
109
+ src_metadata = YAML.load(File.open(src_metadata_filename,'rb').read)
110
+ src_content_filename = File.join(src_root,"content")
111
+
112
+ dst_filename= File.join(@root,dst_bucket_name,dst_name)
113
+ FileUtils.mkdir_p(dst_filename)
114
+
115
+ metadata_dir = File.join(dst_filename,SHUCK_METADATA_DIR)
116
+ FileUtils.mkdir_p(metadata_dir)
117
+
118
+ content = File.join(metadata_dir,"content")
119
+ metadata = File.join(metadata_dir,"metadata")
120
+
121
+ if src_bucket_name != dst_bucket_name || src_name != dst_name
122
+ File.open(content,'wb') do |f|
123
+ File.open(src_content_filename,'rb') do |input|
124
+ f << input.read
125
+ end
126
+ end
127
+
128
+ File.open(metadata,'w') do |f|
129
+ File.open(src_metadata_filename,'r') do |input|
130
+ f << input.read
131
+ end
132
+ end
133
+ end
134
+
135
+ metadata_directive = request.header["x-amz-metadata-directive"].first
136
+ if metadata_directive == "REPLACE"
137
+ metadata_struct = create_metadata(content,request)
138
+ File.open(metadata,'w') do |f|
139
+ f << YAML::dump(metadata_struct)
140
+ end
141
+ end
142
+
143
+ src_bucket = get_bucket(src_bucket_name) || create_bucket(src_bucket_name)
144
+ dst_bucket = get_bucket(dst_bucket_name) || create_bucket(dst_bucket_name)
145
+
146
+ obj = S3Object.new
147
+ obj.name = dst_name
148
+ obj.md5 = src_metadata[:md5]
149
+ obj.content_type = src_metadata[:content_type]
150
+ obj.size = src_metadata[:size]
151
+ obj.modified_date = src_metadata[:modified_date]
152
+
153
+ src_obj = src_bucket.find(src_name)
154
+ dst_bucket.add(obj)
155
+ return obj
156
+ end
157
+
158
+ def store_object(bucket, object_name, request)
159
+ filedata = ""
160
+
161
+ # TODO put a tmpfile here first and mv it over at the end
162
+ content_type = request.content_type || ""
163
+
164
+ match = content_type.match(/^multipart\/form-data; boundary=(.+)/)
165
+ boundary = match[1] if match
166
+ if boundary
167
+ boundary = WEBrick::HTTPUtils::dequote(boundary)
168
+ form_data = WEBrick::HTTPUtils::parse_form_data(request.body, boundary)
169
+
170
+ if form_data['file'] == nil or form_data['file'] == ""
171
+ raise WEBrick::HTTPStatus::BadRequest
172
+ end
173
+
174
+ filedata = form_data['file']
175
+ else
176
+ request.body { |chunk| filedata << chunk }
177
+ end
178
+
179
+ do_store_object(bucket, object_name, filedata, request)
180
+ end
181
+
182
+ def do_store_object(bucket, object_name, filedata, request)
183
+ begin
184
+ filename = File.join(@root,bucket.name,object_name)
185
+ FileUtils.mkdir_p(filename)
186
+
187
+ metadata_dir = File.join(filename,SHUCK_METADATA_DIR)
188
+ FileUtils.mkdir_p(metadata_dir)
189
+
190
+ content = File.join(filename,SHUCK_METADATA_DIR,"content")
191
+ metadata = File.join(filename,SHUCK_METADATA_DIR,"metadata")
192
+
193
+ File.open(content,'wb') { |f| f << filedata }
194
+
195
+ metadata_struct = create_metadata(content,request)
196
+ File.open(metadata,'w') do |f|
197
+ f << YAML::dump(metadata_struct)
198
+ end
199
+
200
+ obj = S3Object.new
201
+ obj.name = object_name
202
+ obj.md5 = metadata_struct[:md5]
203
+ obj.content_type = metadata_struct[:content_type]
204
+ obj.size = metadata_struct[:size]
205
+ obj.modified_date = metadata_struct[:modified_date]
206
+
207
+ bucket.add(obj)
208
+ return obj
209
+ rescue
210
+ puts $!
211
+ $!.backtrace.each { |line| puts line }
212
+ return nil
213
+ end
214
+ end
215
+
216
+ def combine_object_parts(bucket, upload_id, object_name, parts, request)
217
+ upload_path = File.join(@root, bucket.name)
218
+ base_path = File.join(upload_path, "#{upload_id}_#{object_name}")
219
+
220
+ complete_file = ""
221
+ chunk = ""
222
+ part_paths = []
223
+
224
+ parts.sort_by { |part| part[:number] }.each do |part|
225
+ part_path = "#{base_path}_part#{part[:number]}"
226
+ content_path = File.join(part_path, SHUCK_METADATA_DIR, 'content')
227
+
228
+ File.open(content_path, 'rb') { |f| chunk = f.read }
229
+ etag = Digest::MD5.hexdigest(chunk)
230
+
231
+ raise new Error "invalid file chunk" unless part[:etag] == etag
232
+ complete_file << chunk
233
+ part_paths << part_path
234
+ end
235
+
236
+ object = do_store_object(bucket, object_name, complete_file, request)
237
+
238
+ # clean up parts
239
+ part_paths.each do |path|
240
+ FileUtils.remove_dir(path)
241
+ end
242
+
243
+ object
244
+ end
245
+
246
+ def delete_object(bucket,object_name,request)
247
+ begin
248
+ filename = File.join(@root,bucket.name,object_name)
249
+ FileUtils.rm_rf(filename)
250
+ object = bucket.find(object_name)
251
+ bucket.remove(object)
252
+ rescue
253
+ puts $!
254
+ $!.backtrace.each { |line| puts line }
255
+ return nil
256
+ end
257
+ end
258
+
259
+ # TODO: abstract getting meta data from request.
260
+ def create_metadata(content,request)
261
+ metadata = {}
262
+ metadata[:md5] = Digest::MD5.file(content).hexdigest
263
+ metadata[:content_type] = request.header["content-type"].first
264
+ metadata[:size] = File.size(content)
265
+ metadata[:modified_date] = File.mtime(content).utc.iso8601(SUBSECOND_PRECISION)
266
+ metadata[:custom_metadata] = {}
267
+
268
+ # Add custom metadata from the request header
269
+ request.header.each do |key, value|
270
+ match = /^x-amz-meta-(.*)$/.match(key)
271
+ if match && (match_key = match[1])
272
+ metadata[:custom_metadata][match_key] = value.join(', ')
273
+ end
274
+ end
275
+ return metadata
276
+ end
277
+ end
278
+ end