fakes3-docker 0.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +14 -0
- data/Gemfile +5 -0
- data/MIT-LICENSE +20 -0
- data/README.md +62 -0
- data/Rakefile +18 -0
- data/bin/fakes3 +6 -0
- data/fakes3.gemspec +33 -0
- data/lib/fakes3.rb +3 -0
- data/lib/fakes3/bucket.rb +65 -0
- data/lib/fakes3/bucket_query.rb +11 -0
- data/lib/fakes3/cli.rb +70 -0
- data/lib/fakes3/errors.rb +46 -0
- data/lib/fakes3/file_store.rb +282 -0
- data/lib/fakes3/rate_limitable_file.rb +21 -0
- data/lib/fakes3/s3_object.rb +19 -0
- data/lib/fakes3/server.rb +546 -0
- data/lib/fakes3/sorted_object_list.rb +137 -0
- data/lib/fakes3/unsupported_operation.rb +4 -0
- data/lib/fakes3/version.rb +3 -0
- data/lib/fakes3/xml_adapter.rb +222 -0
- data/test/aws_sdk_commands_test.rb +59 -0
- data/test/boto_test.rb +25 -0
- data/test/botocmd.py +87 -0
- data/test/local_s3_cfg +34 -0
- data/test/post_test.rb +54 -0
- data/test/right_aws_commands_test.rb +192 -0
- data/test/s3_commands_test.rb +209 -0
- data/test/s3cmd_test.rb +52 -0
- data/test/test_helper.rb +4 -0
- metadata +211 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: c3de236d260469450faeff406eb2f80085732bc6
|
4
|
+
data.tar.gz: 8ca413e3c812847298b2c85cab432b8ae1fbaac3
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: fb245968df5e0f5970b08e87acc8aa7ef81b825144d4564bc6197bb0e807c9d019eff55202868344a759b484f70b2089dc02f0621e1fe044c274924deac88fc0
|
7
|
+
data.tar.gz: 20add51c0e285b595774aa88dd6b8ed5c410d23d26112dcef599e3ad49c48cfaa1889bde6690e91fb9aede6e252a1e158d43d9705496e74d8f43694df9e29e5a
|
data/.gitignore
ADDED
data/Gemfile
ADDED
data/MIT-LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2011,2012 Curtis W Spencer (@jubos) and Spool
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,62 @@
|
|
1
|
+
## Introduction
|
2
|
+
FakeS3 is a lightweight server that responds to the same calls Amazon S3 responds to.
|
3
|
+
It is extremely useful for testing of S3 in a sandbox environment without actually
|
4
|
+
making calls to Amazon, which not only require network, but also cost you precious dollars.
|
5
|
+
|
6
|
+
The goal of Fake S3 is to minimize runtime dependencies and be more of a
|
7
|
+
development tool to test S3 calls in your code rather than a production server
|
8
|
+
looking to duplicate S3 functionality. Trying RiakCS, ParkPlace/Boardwalk, or
|
9
|
+
Ceph might be a place to start if that is your goal.
|
10
|
+
|
11
|
+
FakeS3 doesn't support all of the S3 command set, but the basic ones like put, get,
|
12
|
+
list, copy, and make bucket are supported. More coming soon.
|
13
|
+
|
14
|
+
## Installation
|
15
|
+
|
16
|
+
gem install fakes3
|
17
|
+
|
18
|
+
## Running
|
19
|
+
|
20
|
+
To run a fakes3 server, you just specify a root and a port.
|
21
|
+
|
22
|
+
fakes3 -r /mnt/fakes3_root -p 4567
|
23
|
+
|
24
|
+
## Connecting to FakeS3
|
25
|
+
|
26
|
+
Take a look at the test cases to see client example usage. For now, FakeS3 is
|
27
|
+
mainly tested with s3cmd, aws-s3 gem, and right_aws. There are plenty more
|
28
|
+
libraries out there, and please do mention if other clients work or not.
|
29
|
+
|
30
|
+
Here is a running list of [supported clients](https://github.com/jubos/fake-s3/wiki/Supported-Clients "Supported Clients")
|
31
|
+
|
32
|
+
## Running Tests
|
33
|
+
|
34
|
+
There are some pre-requesites to actually being able to run the unit/integration tests
|
35
|
+
|
36
|
+
### On OSX
|
37
|
+
|
38
|
+
Edit your /etc/hosts and add the following line:
|
39
|
+
|
40
|
+
127.0.0.1 posttest.localhost
|
41
|
+
|
42
|
+
Then ensure that the following packages are installed (boto, s3cmd)
|
43
|
+
|
44
|
+
> pip install boto
|
45
|
+
> brew install s3cmd
|
46
|
+
|
47
|
+
|
48
|
+
Start the test server using
|
49
|
+
|
50
|
+
rake test_server
|
51
|
+
|
52
|
+
Then in another terminal window run
|
53
|
+
|
54
|
+
rake test
|
55
|
+
|
56
|
+
It is a still a TODO to get this to be just one command
|
57
|
+
|
58
|
+
## More Information
|
59
|
+
|
60
|
+
Check out the [wiki](https://github.com/jubos/fake-s3/wiki)
|
61
|
+
|
62
|
+
[](https://gitter.im/jubos/fake-s3?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
data/Rakefile
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'bundler'
|
3
|
+
require 'rake/testtask'
|
4
|
+
include Rake::DSL
|
5
|
+
Bundler::GemHelper.install_tasks
|
6
|
+
|
7
|
+
Rake::TestTask.new(:test) do |t|
|
8
|
+
t.libs << "."
|
9
|
+
t.test_files =
|
10
|
+
FileList['test/*_test.rb'].exclude('test/s3_commands_test.rb')
|
11
|
+
end
|
12
|
+
|
13
|
+
desc "Run the test_server"
|
14
|
+
task :test_server do |t|
|
15
|
+
system("bundle exec bin/fakes3 --port 10453 --root test_root")
|
16
|
+
end
|
17
|
+
|
18
|
+
task :default => :test
|
data/bin/fakes3
ADDED
data/fakes3.gemspec
ADDED
@@ -0,0 +1,33 @@
|
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
require File.join(File.dirname(__FILE__), 'lib', 'fakes3', 'version')
|
3
|
+
|
4
|
+
Gem::Specification.new do |s|
|
5
|
+
s.name = "fakes3-docker"
|
6
|
+
s.version = FakeS3::VERSION
|
7
|
+
s.platform = Gem::Platform::RUBY
|
8
|
+
s.authors = ["Curtis Spencer"]
|
9
|
+
s.email = ["thorin@gmail.com"]
|
10
|
+
s.homepage = "https://github.com/jubos/fake-s3"
|
11
|
+
s.summary = %q{FakeS3 is a server that simulates S3 commands so you can test your S3 functionality in your projects}
|
12
|
+
s.description = %q{Use FakeS3 to test basic S3 functionality without actually connecting to S3}
|
13
|
+
s.license = "MIT"
|
14
|
+
|
15
|
+
s.rubyforge_project = "fakes3-docker"
|
16
|
+
|
17
|
+
s.add_development_dependency "bundler", ">= 1.0.0"
|
18
|
+
s.add_development_dependency "aws-s3"
|
19
|
+
s.add_development_dependency "right_aws"
|
20
|
+
s.add_development_dependency "rest-client"
|
21
|
+
s.add_development_dependency "rake"
|
22
|
+
s.add_development_dependency "aws-sdk-v1"
|
23
|
+
s.add_development_dependency "test-unit"
|
24
|
+
#s.add_development_dependency "ruby-debug"
|
25
|
+
#s.add_development_dependency "debugger"
|
26
|
+
s.add_dependency "thor"
|
27
|
+
s.add_dependency "builder"
|
28
|
+
|
29
|
+
s.files = `git ls-files`.split("\n")
|
30
|
+
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
|
31
|
+
s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
|
32
|
+
s.require_paths = ["lib"]
|
33
|
+
end
|
data/lib/fakes3.rb
ADDED
@@ -0,0 +1,65 @@
|
|
1
|
+
require 'builder'
|
2
|
+
require 'thread'
|
3
|
+
require 'fakes3/s3_object'
|
4
|
+
require 'fakes3/sorted_object_list'
|
5
|
+
|
6
|
+
module FakeS3
|
7
|
+
class Bucket
|
8
|
+
attr_accessor :name,:creation_date,:objects
|
9
|
+
|
10
|
+
def initialize(name,creation_date,objects)
|
11
|
+
@name = name
|
12
|
+
@creation_date = creation_date
|
13
|
+
@objects = SortedObjectList.new
|
14
|
+
objects.each do |obj|
|
15
|
+
@objects.add(obj)
|
16
|
+
end
|
17
|
+
@mutex = Mutex.new
|
18
|
+
end
|
19
|
+
|
20
|
+
def find(object_name)
|
21
|
+
@mutex.synchronize do
|
22
|
+
@objects.find(object_name)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
def add(object)
|
27
|
+
# Unfortunately have to synchronize here since the our SortedObjectList
|
28
|
+
# not thread safe. Probably can get finer granularity if performance is
|
29
|
+
# important
|
30
|
+
@mutex.synchronize do
|
31
|
+
@objects.add(object)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def remove(object)
|
36
|
+
@mutex.synchronize do
|
37
|
+
@objects.remove(object)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
def query_for_range(options)
|
42
|
+
marker = options[:marker]
|
43
|
+
prefix = options[:prefix]
|
44
|
+
max_keys = options[:max_keys] || 1000
|
45
|
+
delimiter = options[:delimiter]
|
46
|
+
|
47
|
+
match_set = nil
|
48
|
+
@mutex.synchronize do
|
49
|
+
match_set = @objects.list(options)
|
50
|
+
end
|
51
|
+
|
52
|
+
bq = BucketQuery.new
|
53
|
+
bq.bucket = self
|
54
|
+
bq.marker = marker
|
55
|
+
bq.prefix = prefix
|
56
|
+
bq.max_keys = max_keys
|
57
|
+
bq.delimiter = delimiter
|
58
|
+
bq.matches = match_set.matches
|
59
|
+
bq.is_truncated = match_set.is_truncated
|
60
|
+
bq.common_prefixes = match_set.common_prefixes
|
61
|
+
return bq
|
62
|
+
end
|
63
|
+
|
64
|
+
end
|
65
|
+
end
|
data/lib/fakes3/cli.rb
ADDED
@@ -0,0 +1,70 @@
|
|
1
|
+
require 'thor'
|
2
|
+
require 'fakes3/server'
|
3
|
+
require 'fakes3/version'
|
4
|
+
|
5
|
+
module FakeS3
|
6
|
+
class CLI < Thor
|
7
|
+
default_task("server")
|
8
|
+
|
9
|
+
desc "server", "Run a server on a particular hostname"
|
10
|
+
method_option :root, :type => :string, :aliases => '-r', :required => true
|
11
|
+
method_option :port, :type => :numeric, :aliases => '-p', :required => true
|
12
|
+
method_option :address, :type => :string, :aliases => '-a', :required => false, :desc => "Bind to this address. Defaults to 0.0.0.0"
|
13
|
+
method_option :hostname, :type => :string, :aliases => '-H', :desc => "The root name of the host. Defaults to s3.amazonaws.com."
|
14
|
+
method_option :limit, :aliases => '-l', :type => :string, :desc => 'Rate limit for serving (ie. 50K, 1.0M)'
|
15
|
+
method_option :sslcert, :type => :string, :desc => 'Path to SSL certificate'
|
16
|
+
method_option :sslkey, :type => :string, :desc => 'Path to SSL certificate key'
|
17
|
+
|
18
|
+
def server
|
19
|
+
store = nil
|
20
|
+
if options[:root]
|
21
|
+
root = File.expand_path(options[:root])
|
22
|
+
# TODO Do some sanity checking here
|
23
|
+
store = FileStore.new(root)
|
24
|
+
end
|
25
|
+
|
26
|
+
if store.nil?
|
27
|
+
abort "You must specify a root to use a file store (the current default)"
|
28
|
+
end
|
29
|
+
|
30
|
+
hostname = 's3.amazonaws.com'
|
31
|
+
if options[:hostname]
|
32
|
+
hostname = options[:hostname]
|
33
|
+
# In case the user has put a port on the hostname
|
34
|
+
if hostname =~ /:(\d+)/
|
35
|
+
hostname = hostname.split(":")[0]
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
if options[:limit]
|
40
|
+
begin
|
41
|
+
store.rate_limit = options[:limit]
|
42
|
+
rescue
|
43
|
+
abort $!.message
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
address = options[:address] || '0.0.0.0'
|
48
|
+
ssl_cert_path = options[:sslcert]
|
49
|
+
ssl_key_path = options[:sslkey]
|
50
|
+
|
51
|
+
if (ssl_cert_path.nil? && !ssl_key_path.nil?) || (!ssl_cert_path.nil? && ssl_key_path.nil?)
|
52
|
+
abort "If you specify an SSL certificate you must also specify an SSL certificate key"
|
53
|
+
end
|
54
|
+
|
55
|
+
puts "Loading FakeS3 with #{root} on port #{options[:port]} with hostname #{hostname}"
|
56
|
+
server = FakeS3::Server.new(address,options[:port],store,hostname,ssl_cert_path,ssl_key_path)
|
57
|
+
server.serve
|
58
|
+
end
|
59
|
+
|
60
|
+
desc "version", "Report the current fakes3 version"
|
61
|
+
def version
|
62
|
+
puts <<"EOF"
|
63
|
+
======================
|
64
|
+
FakeS3 #{FakeS3::VERSION}
|
65
|
+
|
66
|
+
Copyright 2012, Curtis Spencer (@jubos)
|
67
|
+
EOF
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
module FakeS3
|
2
|
+
class FakeS3Exception < RuntimeError
|
3
|
+
attr_accessor :resource,:request_id
|
4
|
+
|
5
|
+
def self.metaclass; class << self; self; end; end
|
6
|
+
|
7
|
+
def self.traits(*arr)
|
8
|
+
return @traits if arr.empty?
|
9
|
+
attr_accessor *arr
|
10
|
+
|
11
|
+
arr.each do |a|
|
12
|
+
metaclass.instance_eval do
|
13
|
+
define_method( a ) do |val|
|
14
|
+
@traits ||= {}
|
15
|
+
@traits[a] = val
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
class_eval do
|
21
|
+
define_method( :initialize ) do
|
22
|
+
self.class.traits.each do |k,v|
|
23
|
+
instance_variable_set("@#{k}", v)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
traits :message,:http_status
|
30
|
+
|
31
|
+
def code
|
32
|
+
self.class.to_s
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
class NoSuchBucket < FakeS3Exception
|
37
|
+
message "The bucket you tried to delete is not empty."
|
38
|
+
http_status "404"
|
39
|
+
end
|
40
|
+
|
41
|
+
class BucketNotEmpty < FakeS3Exception
|
42
|
+
message "The bucket you tried to delete is not empty."
|
43
|
+
http_status "409"
|
44
|
+
end
|
45
|
+
|
46
|
+
end
|
@@ -0,0 +1,282 @@
|
|
1
|
+
require 'fileutils'
|
2
|
+
require 'time'
|
3
|
+
require 'fakes3/s3_object'
|
4
|
+
require 'fakes3/bucket'
|
5
|
+
require 'fakes3/rate_limitable_file'
|
6
|
+
require 'digest/md5'
|
7
|
+
require 'yaml'
|
8
|
+
|
9
|
+
module FakeS3
|
10
|
+
class FileStore
|
11
|
+
SHUCK_METADATA_DIR = ".fakes3_metadataFFF"
|
12
|
+
# S3 clients with overly strict date parsing fails to parse ISO 8601 dates
|
13
|
+
# without any sub second precision (e.g. jets3t v0.7.2), and the examples
|
14
|
+
# given in the official AWS S3 documentation specify three (3) decimals for
|
15
|
+
# sub second precision.
|
16
|
+
SUBSECOND_PRECISION = 3
|
17
|
+
|
18
|
+
def initialize(root)
|
19
|
+
@root = root
|
20
|
+
@buckets = []
|
21
|
+
@bucket_hash = {}
|
22
|
+
Dir[File.join(root,"*")].each do |bucket|
|
23
|
+
bucket_name = File.basename(bucket)
|
24
|
+
bucket_obj = Bucket.new(bucket_name,Time.now,[])
|
25
|
+
@buckets << bucket_obj
|
26
|
+
@bucket_hash[bucket_name] = bucket_obj
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
# Pass a rate limit in bytes per second
|
31
|
+
def rate_limit=(rate_limit)
|
32
|
+
if rate_limit.is_a?(String)
|
33
|
+
if rate_limit =~ /^(\d+)$/
|
34
|
+
RateLimitableFile.rate_limit = rate_limit.to_i
|
35
|
+
elsif rate_limit =~ /^(.*)K$/
|
36
|
+
RateLimitableFile.rate_limit = $1.to_f * 1000
|
37
|
+
elsif rate_limit =~ /^(.*)M$/
|
38
|
+
RateLimitableFile.rate_limit = $1.to_f * 1000000
|
39
|
+
elsif rate_limit =~ /^(.*)G$/
|
40
|
+
RateLimitableFile.rate_limit = $1.to_f * 1000000000
|
41
|
+
else
|
42
|
+
raise "Invalid Rate Limit Format: Valid values include (1000,10K,1.1M)"
|
43
|
+
end
|
44
|
+
else
|
45
|
+
RateLimitableFile.rate_limit = nil
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def buckets
|
50
|
+
@buckets
|
51
|
+
end
|
52
|
+
|
53
|
+
def get_bucket_folder(bucket)
|
54
|
+
File.join(@root,bucket.name)
|
55
|
+
end
|
56
|
+
|
57
|
+
def get_bucket(bucket)
|
58
|
+
@bucket_hash[bucket]
|
59
|
+
end
|
60
|
+
|
61
|
+
def create_bucket(bucket)
|
62
|
+
FileUtils.mkdir_p(File.join(@root,bucket))
|
63
|
+
bucket_obj = Bucket.new(bucket,Time.now,[])
|
64
|
+
if !@bucket_hash[bucket]
|
65
|
+
@buckets << bucket_obj
|
66
|
+
@bucket_hash[bucket] = bucket_obj
|
67
|
+
end
|
68
|
+
bucket_obj
|
69
|
+
end
|
70
|
+
|
71
|
+
def delete_bucket(bucket_name)
|
72
|
+
bucket = get_bucket(bucket_name)
|
73
|
+
raise NoSuchBucket if !bucket
|
74
|
+
raise BucketNotEmpty if bucket.objects.count > 0
|
75
|
+
FileUtils.rm_r(get_bucket_folder(bucket))
|
76
|
+
@bucket_hash.delete(bucket_name)
|
77
|
+
end
|
78
|
+
|
79
|
+
def get_object(bucket,object_name, request)
|
80
|
+
begin
|
81
|
+
real_obj = S3Object.new
|
82
|
+
obj_root = File.join(@root,bucket,object_name,SHUCK_METADATA_DIR)
|
83
|
+
metadata = YAML.load(File.open(File.join(obj_root,"metadata"),'rb'))
|
84
|
+
real_obj.name = object_name
|
85
|
+
real_obj.md5 = metadata[:md5]
|
86
|
+
real_obj.content_type = metadata.fetch(:content_type) { "application/octet-stream" }
|
87
|
+
#real_obj.io = File.open(File.join(obj_root,"content"),'rb')
|
88
|
+
real_obj.io = RateLimitableFile.open(File.join(obj_root,"content"),'rb')
|
89
|
+
real_obj.size = metadata.fetch(:size) { 0 }
|
90
|
+
real_obj.creation_date = File.ctime(obj_root).utc.iso8601(SUBSECOND_PRECISION)
|
91
|
+
real_obj.modified_date = metadata.fetch(:modified_date) do
|
92
|
+
File.mtime(File.join(obj_root,"content")).utc.iso8601(SUBSECOND_PRECISION)
|
93
|
+
end
|
94
|
+
real_obj.custom_metadata = metadata.fetch(:custom_metadata) { {} }
|
95
|
+
return real_obj
|
96
|
+
rescue
|
97
|
+
puts $!
|
98
|
+
$!.backtrace.each { |line| puts line }
|
99
|
+
return nil
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
def object_metadata(bucket,object)
|
104
|
+
end
|
105
|
+
|
106
|
+
def copy_object(src_bucket_name, src_name, dst_bucket_name, dst_name, request)
|
107
|
+
src_root = File.join(@root,src_bucket_name,src_name,SHUCK_METADATA_DIR)
|
108
|
+
src_metadata_filename = File.join(src_root,"metadata")
|
109
|
+
src_metadata = YAML.load(File.open(src_metadata_filename,'rb').read)
|
110
|
+
src_content_filename = File.join(src_root,"content")
|
111
|
+
|
112
|
+
dst_filename= File.join(@root,dst_bucket_name,dst_name)
|
113
|
+
FileUtils.mkdir_p(dst_filename)
|
114
|
+
|
115
|
+
metadata_dir = File.join(dst_filename,SHUCK_METADATA_DIR)
|
116
|
+
FileUtils.mkdir_p(metadata_dir)
|
117
|
+
|
118
|
+
content = File.join(metadata_dir,"content")
|
119
|
+
metadata = File.join(metadata_dir,"metadata")
|
120
|
+
|
121
|
+
if src_bucket_name != dst_bucket_name || src_name != dst_name
|
122
|
+
File.open(content,'wb') do |f|
|
123
|
+
File.open(src_content_filename,'rb') do |input|
|
124
|
+
f << input.read
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
File.open(metadata,'w') do |f|
|
129
|
+
File.open(src_metadata_filename,'r') do |input|
|
130
|
+
f << input.read
|
131
|
+
end
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
metadata_directive = request.header["x-amz-metadata-directive"].first
|
136
|
+
if metadata_directive == "REPLACE"
|
137
|
+
metadata_struct = create_metadata(content,request)
|
138
|
+
File.open(metadata,'w') do |f|
|
139
|
+
f << YAML::dump(metadata_struct)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
src_bucket = get_bucket(src_bucket_name) || create_bucket(src_bucket_name)
|
144
|
+
dst_bucket = get_bucket(dst_bucket_name) || create_bucket(dst_bucket_name)
|
145
|
+
|
146
|
+
obj = S3Object.new
|
147
|
+
obj.name = dst_name
|
148
|
+
obj.md5 = src_metadata[:md5]
|
149
|
+
obj.content_type = src_metadata[:content_type]
|
150
|
+
obj.size = src_metadata[:size]
|
151
|
+
obj.modified_date = src_metadata[:modified_date]
|
152
|
+
|
153
|
+
src_obj = src_bucket.find(src_name)
|
154
|
+
dst_bucket.add(obj)
|
155
|
+
return obj
|
156
|
+
end
|
157
|
+
|
158
|
+
def store_object(bucket, object_name, request)
|
159
|
+
filedata = ""
|
160
|
+
|
161
|
+
# TODO put a tmpfile here first and mv it over at the end
|
162
|
+
content_type = request.content_type || ""
|
163
|
+
|
164
|
+
match = content_type.match(/^multipart\/form-data; boundary=(.+)/)
|
165
|
+
boundary = match[1] if match
|
166
|
+
if boundary
|
167
|
+
boundary = WEBrick::HTTPUtils::dequote(boundary)
|
168
|
+
form_data = WEBrick::HTTPUtils::parse_form_data(request.body, boundary)
|
169
|
+
|
170
|
+
if form_data['file'] == nil or form_data['file'] == ""
|
171
|
+
raise WEBrick::HTTPStatus::BadRequest
|
172
|
+
end
|
173
|
+
|
174
|
+
filedata = form_data['file']
|
175
|
+
else
|
176
|
+
request.body { |chunk| filedata << chunk }
|
177
|
+
end
|
178
|
+
|
179
|
+
do_store_object(bucket, object_name, filedata, request)
|
180
|
+
end
|
181
|
+
|
182
|
+
def do_store_object(bucket, object_name, filedata, request)
|
183
|
+
begin
|
184
|
+
filename = File.join(@root,bucket.name,object_name)
|
185
|
+
FileUtils.mkdir_p(filename)
|
186
|
+
|
187
|
+
metadata_dir = File.join(filename,SHUCK_METADATA_DIR)
|
188
|
+
FileUtils.mkdir_p(metadata_dir)
|
189
|
+
|
190
|
+
content = File.join(filename,SHUCK_METADATA_DIR,"content")
|
191
|
+
metadata = File.join(filename,SHUCK_METADATA_DIR,"metadata")
|
192
|
+
|
193
|
+
File.open(content,'wb') { |f| f << filedata }
|
194
|
+
|
195
|
+
metadata_struct = create_metadata(content,request)
|
196
|
+
File.open(metadata,'w') do |f|
|
197
|
+
f << YAML::dump(metadata_struct)
|
198
|
+
end
|
199
|
+
|
200
|
+
obj = S3Object.new
|
201
|
+
obj.name = object_name
|
202
|
+
obj.md5 = metadata_struct[:md5]
|
203
|
+
obj.content_type = metadata_struct[:content_type]
|
204
|
+
obj.size = metadata_struct[:size]
|
205
|
+
obj.modified_date = metadata_struct[:modified_date]
|
206
|
+
|
207
|
+
bucket.add(obj)
|
208
|
+
return obj
|
209
|
+
rescue
|
210
|
+
puts $!
|
211
|
+
$!.backtrace.each { |line| puts line }
|
212
|
+
return nil
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
def combine_object_parts(bucket, upload_id, object_name, parts, request)
|
217
|
+
upload_path = File.join(@root, bucket.name)
|
218
|
+
base_path = File.join(upload_path, "#{upload_id}_#{object_name}")
|
219
|
+
|
220
|
+
complete_file = ""
|
221
|
+
chunk = ""
|
222
|
+
part_paths = []
|
223
|
+
|
224
|
+
parts.sort_by { |part| part[:number] }.each do |part|
|
225
|
+
part_path = "#{base_path}_part#{part[:number]}"
|
226
|
+
content_path = File.join(part_path, SHUCK_METADATA_DIR, 'content')
|
227
|
+
|
228
|
+
File.open(content_path, 'rb') { |f| chunk = f.read }
|
229
|
+
etag = Digest::MD5.hexdigest(chunk)
|
230
|
+
|
231
|
+
raise new Error "invalid file chunk" unless part[:etag] == etag
|
232
|
+
complete_file << chunk
|
233
|
+
part_paths << part_path
|
234
|
+
end
|
235
|
+
|
236
|
+
object = do_store_object(bucket, object_name, complete_file, request)
|
237
|
+
|
238
|
+
# clean up parts
|
239
|
+
part_paths.each do |path|
|
240
|
+
FileUtils.remove_dir(path)
|
241
|
+
end
|
242
|
+
|
243
|
+
object
|
244
|
+
end
|
245
|
+
|
246
|
+
def delete_object(bucket,object_name,request)
|
247
|
+
begin
|
248
|
+
filename = File.join(@root,bucket.name,object_name)
|
249
|
+
FileUtils.rm_rf(filename)
|
250
|
+
object = bucket.find(object_name)
|
251
|
+
bucket.remove(object)
|
252
|
+
rescue
|
253
|
+
puts $!
|
254
|
+
$!.backtrace.each { |line| puts line }
|
255
|
+
return nil
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
# TODO: abstract getting meta data from request.
|
260
|
+
def create_metadata(content,request)
|
261
|
+
metadata = {}
|
262
|
+
metadata[:md5] = Digest::MD5.file(content).hexdigest
|
263
|
+
metadata[:content_type] = request.header["content-type"].first
|
264
|
+
metadata[:size] = File.size(content)
|
265
|
+
metadata[:modified_date] = File.mtime(content).utc.iso8601(SUBSECOND_PRECISION)
|
266
|
+
metadata[:amazon_metadata] = {}
|
267
|
+
metadata[:custom_metadata] = {}
|
268
|
+
|
269
|
+
# Add custom metadata from the request header
|
270
|
+
request.header.each do |key, value|
|
271
|
+
match = /^x-amz-([^-]+)-(.*)$/.match(key)
|
272
|
+
next unless match
|
273
|
+
if match[1].eql?('meta') && (match_key = match[2])
|
274
|
+
metadata[:custom_metadata][match_key] = value.join(', ')
|
275
|
+
next
|
276
|
+
end
|
277
|
+
metadata[:amazon_metadata][key.gsub(/^x-amz-/, '')] = value.join(', ')
|
278
|
+
end
|
279
|
+
return metadata
|
280
|
+
end
|
281
|
+
end
|
282
|
+
end
|