rack_fake_s3 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/.gitignore ADDED
@@ -0,0 +1,5 @@
1
+ pkg/*
2
+ *.gem
3
+ .bundle
4
+ tmp
5
+ test_root
data/Gemfile ADDED
@@ -0,0 +1,2 @@
1
+ source :rubygems
2
+ gemspec
data/Gemfile.lock ADDED
@@ -0,0 +1,37 @@
1
+ PATH
2
+ remote: .
3
+ specs:
4
+ rack_fake_s3 (0.2.0)
5
+ builder
6
+ rack
7
+ thor
8
+
9
+ GEM
10
+ remote: http://rubygems.org/
11
+ specs:
12
+ aws-s3 (0.6.2)
13
+ builder
14
+ mime-types
15
+ xml-simple
16
+ builder (3.0.0)
17
+ mime-types (1.18)
18
+ rack (1.4.1)
19
+ rake (0.9.2.2)
20
+ rest-client (1.6.7)
21
+ mime-types (>= 1.16)
22
+ right_aws (3.0.4)
23
+ right_http_connection (>= 1.2.5)
24
+ right_http_connection (1.3.0)
25
+ thor (0.16.0)
26
+ xml-simple (1.1.1)
27
+
28
+ PLATFORMS
29
+ ruby
30
+
31
+ DEPENDENCIES
32
+ aws-s3
33
+ bundler (>= 1.0.0)
34
+ rack_fake_s3!
35
+ rake
36
+ rest-client
37
+ right_aws
data/MIT-LICENSE ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2011,2012 Curtis W Spencer (@jubos) and Spool
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,39 @@
1
+ ## Introduction
2
+
3
+ This library is a modified version of the FakeS3 gem.
4
+ You can find the original implementation here: [https://github.com/jubos/fake-s3](https://github.com/jubos/fake-s3)
5
+
6
+ This version is modified to run on rack.
7
+
8
+ ## What it does
9
+
10
+ Rack Fake S3 is a lightweight server that responds to the same calls Amazon S3 responds to.
11
+ It is extremely useful for testing of S3 in a sandbox environment without actually
12
+ making calls to Amazon, which not only require network, but also cost you precious dollars.
13
+
14
+ The goal of Rack Fake S3 is to minimize runtime dependencies and be more of a
15
+ development tool to test S3 calls in your code rather than a production server
16
+ looking to duplicate S3 functionality. Trying RiakCS, ParkPlace/Boardwalk, or
17
+ Ceph might be a place to start if that is your goal.
18
+
19
+ Rack Fake S3 doesn't support all of the S3 command set, but the basic ones like put, get,
20
+ list, copy, and make bucket are supported. More coming soon.
21
+
22
+ ## Installation
23
+
24
+ ### Rails (as a mounted rack app)
25
+
26
+ Add rack_fake_s3 to your Gemfile and bundle
27
+
28
+ # Gemfile
29
+ gem 'rack_fake_s3'
30
+
31
+ Mount the app to your routes
32
+
33
+ # config/routes.rb
34
+
35
+ constraints :host => domain do
36
+ mount RackFakeS3::App.new(root_path, domain)
37
+ end
38
+
39
+ Setup your app to point to the specified domain instead of the real AWS S3.
data/Rakefile ADDED
@@ -0,0 +1,12 @@
1
+ require 'rubygems'
2
+ require 'bundler'
3
+ require 'rake/testtask'
4
+ include Rake::DSL
5
+ Bundler::GemHelper.install_tasks
6
+
7
+ Rake::TestTask.new(:test) do |t|
8
+ t.libs << "."
9
+ t.test_files = FileList['test/*_test.rb']
10
+ end
11
+
12
+ task :default => :test
data/config.ru ADDED
@@ -0,0 +1,4 @@
1
+ $:.unshift File.expand_path("../lib", __FILE__)
2
+ require 'rack_fake_s3'
3
+
4
+ run RackFakeS3::App.new(RackFakeS3::FileStore.new(ENV['FAKE_S3_ROOT']), ENV['FAKE_S3_HOSTNAME'])
@@ -0,0 +1,64 @@
1
+ require 'builder'
2
+ require 'thread'
3
+ require 'rack_fake_s3/s3_object'
4
+ require 'rack_fake_s3/sorted_object_list'
5
+
6
+ module RackFakeS3
7
+ class Bucket
8
+ attr_accessor :name,:creation_date,:objects
9
+
10
+ def initialize(name,creation_date,objects)
11
+ @name = name
12
+ @creation_date = creation_date
13
+ @objects = SortedObjectList.new
14
+ objects.each do |obj|
15
+ @objects.add(obj)
16
+ end
17
+ @mutex = Mutex.new
18
+ end
19
+
20
+ def find(object_name)
21
+ @mutex.synchronize do
22
+ @objects.find(object_name)
23
+ end
24
+ end
25
+
26
+ def add(object)
27
+ # Unfortunately have to synchronize here since the our SortedObjectList
28
+ # not thread safe. Probably can get finer granularity if performance is
29
+ # important
30
+ @mutex.synchronize do
31
+ @objects.add(object)
32
+ end
33
+ end
34
+
35
+ def remove(object)
36
+ @mutex.synchronize do
37
+ @objects.remove(object)
38
+ end
39
+ end
40
+
41
+ def query_for_range(options)
42
+ marker = options[:marker]
43
+ prefix = options[:prefix]
44
+ max_keys = options[:max_keys] || 1000
45
+ delimiter = options[:delimiter]
46
+
47
+ match_set = nil
48
+ @mutex.synchronize do
49
+ match_set = @objects.list(options)
50
+ end
51
+
52
+ bq = BucketQuery.new
53
+ bq.bucket = self
54
+ bq.marker = marker
55
+ bq.prefix = prefix
56
+ bq.max_keys = max_keys
57
+ bq.delimiter = delimiter
58
+ bq.matches = match_set.matches
59
+ bq.is_truncated = match_set.is_truncated
60
+ return bq
61
+ end
62
+
63
+ end
64
+ end
@@ -0,0 +1,11 @@
1
+ module RackFakeS3
2
+ class BucketQuery
3
+ attr_accessor :prefix,:matches,:marker,:max_keys,
4
+ :delimiter,:bucket,:is_truncated
5
+
6
+ # Syntactic sugar
7
+ def is_truncated?
8
+ @is_truncated
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,46 @@
1
+ module RackFakeS3
2
+ class RackFakeS3Exception < RuntimeError
3
+ attr_accessor :resource,:request_id
4
+
5
+ def self.metaclass; class << self; self; end; end
6
+
7
+ def self.traits(*arr)
8
+ return @traits if arr.empty?
9
+ attr_accessor *arr
10
+
11
+ arr.each do |a|
12
+ metaclass.instance_eval do
13
+ define_method( a ) do |val|
14
+ @traits ||= {}
15
+ @traits[a] = val
16
+ end
17
+ end
18
+ end
19
+
20
+ class_eval do
21
+ define_method( :initialize ) do
22
+ self.class.traits.each do |k,v|
23
+ instance_variable_set("@#{k}", v)
24
+ end
25
+ end
26
+ end
27
+ end
28
+
29
+ traits :message,:http_status
30
+
31
+ def code
32
+ self.class.to_s
33
+ end
34
+ end
35
+
36
+ class NoSuchBucket < RackFakeS3Exception
37
+ message "The bucket you tried to delete is not empty."
38
+ http_status "404"
39
+ end
40
+
41
+ class BucketNotEmpty < RackFakeS3Exception
42
+ message "The bucket you tried to delete is not empty."
43
+ http_status "409"
44
+ end
45
+
46
+ end
@@ -0,0 +1,211 @@
1
+ require 'fileutils'
2
+ require 'time'
3
+ require 'rack_fake_s3/s3_object'
4
+ require 'rack_fake_s3/bucket'
5
+ require 'rack_fake_s3/rate_limitable_file'
6
+ require 'digest/md5'
7
+ require 'yaml'
8
+
9
+ module RackFakeS3
10
+ class FileStore
11
+ SHUCK_METADATA_DIR = ".rack_fake_s3_metadataFFF"
12
+
13
+ def initialize(root)
14
+ @root = root
15
+ @buckets = []
16
+ @bucket_hash = {}
17
+ Dir[File.join(root,"*")].each do |bucket|
18
+ bucket_name = File.basename(bucket)
19
+ bucket_obj = Bucket.new(bucket_name,Time.now,[])
20
+ @buckets << bucket_obj
21
+ @bucket_hash[bucket_name] = bucket_obj
22
+
23
+ # FIXME: this is not the best place to do this
24
+ # Dir[File.join(bucket, "*")].each do |s3object|
25
+ # obj = get_object(bucket_name, File.basename(s3object), nil)
26
+ # bucket_obj.add obj
27
+ # end
28
+ end
29
+ end
30
+
31
+ # Pass a rate limit in bytes per second
32
+ def rate_limit=(rate_limit)
33
+ if rate_limit.is_a?(String)
34
+ if rate_limit =~ /^(\d+)$/
35
+ RateLimitableFile.rate_limit = rate_limit.to_i
36
+ elsif rate_limit =~ /^(.*)K$/
37
+ RateLimitableFile.rate_limit = $1.to_f * 1000
38
+ elsif rate_limit =~ /^(.*)M$/
39
+ RateLimitableFile.rate_limit = $1.to_f * 1000000
40
+ elsif rate_limit =~ /^(.*)G$/
41
+ RateLimitableFile.rate_limit = $1.to_f * 1000000000
42
+ else
43
+ raise "Invalid Rate Limit Format: Valid values include (1000,10K,1.1M)"
44
+ end
45
+ else
46
+ RateLimitableFile.rate_limit = nil
47
+ end
48
+ end
49
+
50
+ def buckets
51
+ @buckets
52
+ end
53
+
54
+ def get_bucket_folder(bucket)
55
+ File.join(@root,bucket.name)
56
+ end
57
+
58
+ def get_bucket(bucket)
59
+ @bucket_hash[bucket]
60
+ end
61
+
62
+ def create_bucket(bucket)
63
+ FileUtils.mkdir_p(File.join(@root,bucket))
64
+ bucket_obj = Bucket.new(bucket,Time.now,[])
65
+ if !@bucket_hash[bucket]
66
+ @buckets << bucket_obj
67
+ @bucket_hash[bucket] = bucket_obj
68
+ end
69
+ bucket_obj
70
+ end
71
+
72
+ def delete_bucket(bucket_name)
73
+ bucket = get_bucket(bucket_name)
74
+ raise NoSuchBucket if !bucket
75
+ raise BucketNotEmpty if bucket.objects.count > 0
76
+ FileUtils.rm_r(get_bucket_folder(bucket))
77
+ @bucket_hash.delete(bucket_name)
78
+ end
79
+
80
+ def get_object(bucket,object_name, request)
81
+ begin
82
+ real_obj = S3Object.new
83
+ obj_root = File.join(@root,bucket,object_name,SHUCK_METADATA_DIR)
84
+ metadata = YAML.load(File.open(File.join(obj_root,"metadata"),'rb'))
85
+ real_obj.name = object_name
86
+ real_obj.md5 = metadata[:md5]
87
+ real_obj.content_type = metadata.fetch(:content_type) { "application/octet-stream" }
88
+ #real_obj.io = File.open(File.join(obj_root,"content"),'rb')
89
+ real_obj.io = RateLimitableFile.open(File.join(obj_root,"content"),'rb')
90
+ return real_obj
91
+ rescue
92
+ puts $!
93
+ $!.backtrace.each { |line| puts line }
94
+ return nil
95
+ end
96
+ end
97
+
98
+ def object_metadata(bucket,object)
99
+ end
100
+
101
+ def copy_object(src_bucket_name,src_name,dst_bucket_name,dst_name)
102
+ src_root = File.join(@root,src_bucket_name,src_name,SHUCK_METADATA_DIR)
103
+ src_metadata_filename = File.join(src_root,"metadata")
104
+ src_metadata = YAML.load(File.open(src_metadata_filename,'rb').read)
105
+ src_content_filename = File.join(src_root,"content")
106
+
107
+ dst_filename= File.join(@root,dst_bucket_name,dst_name)
108
+ FileUtils.mkdir_p(dst_filename)
109
+
110
+ metadata_dir = File.join(dst_filename,SHUCK_METADATA_DIR)
111
+ FileUtils.mkdir_p(metadata_dir)
112
+
113
+ content = File.join(metadata_dir,"content")
114
+ metadata = File.join(metadata_dir,"metadata")
115
+
116
+ File.open(content,'wb') do |f|
117
+ File.open(src_content_filename,'rb') do |input|
118
+ f << input.read
119
+ end
120
+ end
121
+
122
+ File.open(metadata,'w') do |f|
123
+ File.open(src_metadata_filename,'r') do |input|
124
+ f << input.read
125
+ end
126
+ end
127
+
128
+ src_bucket = self.get_bucket(src_bucket_name)
129
+ dst_bucket = self.get_bucket(dst_bucket_name)
130
+
131
+ obj = S3Object.new
132
+ obj.name = dst_name
133
+ obj.md5 = src_metadata[:md5]
134
+ obj.content_type = src_metadata[:content_type]
135
+
136
+ src_obj = src_bucket.find(src_name)
137
+ dst_bucket.add(obj)
138
+ src_bucket.remove(src_obj)
139
+ return obj
140
+ end
141
+
142
+ def store_object(bucket,object_name,request)
143
+ begin
144
+ filename = File.join(@root,bucket.name,object_name)
145
+ FileUtils.mkdir_p(filename)
146
+
147
+ metadata_dir = File.join(filename,SHUCK_METADATA_DIR)
148
+ FileUtils.mkdir_p(metadata_dir)
149
+
150
+ content = File.join(filename,SHUCK_METADATA_DIR,"content")
151
+ metadata = File.join(filename,SHUCK_METADATA_DIR,"metadata")
152
+
153
+ md5 = Digest::MD5.new
154
+ # TODO put a tmpfile here first and mv it over at the end
155
+
156
+ match=request.content_type.match(/^multipart\/form-data; boundary=(.+)/)
157
+ boundary = match[1] if match
158
+ if boundary
159
+ boundary = WEBrick::HTTPUtils::dequote(boundary)
160
+ filedata = WEBrick::HTTPUtils::parse_form_data(request.body, boundary)
161
+ raise HTTPStatus::BadRequest if filedata['file'].empty?
162
+ File.open(content, 'wb') do |f|
163
+ f<<filedata['file']
164
+ md5<<filedata['file']
165
+ end
166
+ else
167
+ File.open(content,'wb') do |f|
168
+ request.body do |chunk|
169
+ f << chunk
170
+ md5 << chunk
171
+ end
172
+ end
173
+ end
174
+
175
+ metadata_struct = {}
176
+ metadata_struct[:md5] = md5.hexdigest
177
+ metadata_struct[:content_type] = request.media_type || ""
178
+
179
+ yaml = YAML::dump(metadata_struct)
180
+ File.open(metadata,'w') do |f|
181
+ f << yaml
182
+ end
183
+
184
+ obj = S3Object.new
185
+ obj.name = object_name
186
+ obj.md5 = metadata_struct[:md5]
187
+ obj.content_type = metadata_struct[:content_type]
188
+
189
+ bucket.add(obj)
190
+ return obj
191
+ rescue
192
+ puts $!
193
+ $!.backtrace.each { |line| puts line }
194
+ return nil
195
+ end
196
+ end
197
+
198
+ def delete_object(bucket,object_name,request)
199
+ begin
200
+ filename = File.join(@root,bucket.name,object_name)
201
+ FileUtils.rm_rf(filename)
202
+ object = bucket.find(object_name)
203
+ bucket.remove(object)
204
+ rescue
205
+ puts $!
206
+ $!.backtrace.each { |line| puts line }
207
+ return nil
208
+ end
209
+ end
210
+ end
211
+ end
@@ -0,0 +1,21 @@
1
+ module RackFakeS3
2
+ class RateLimitableFile < File
3
+ @@rate_limit = nil
4
+ # Specify a rate limit in bytes per second
5
+ def self.rate_limit
6
+ @@rate_limit
7
+ end
8
+
9
+ def self.rate_limit=(rate_limit)
10
+ @@rate_limit = rate_limit
11
+ end
12
+
13
+ def read(args)
14
+ if @@rate_limit
15
+ time_to_sleep = args / @@rate_limit
16
+ sleep(time_to_sleep)
17
+ end
18
+ return super(args)
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,19 @@
1
+ module RackFakeS3
2
+ class S3Object
3
+ include Comparable
4
+ attr_accessor :name,:size,:creation_date,:md5,:io,:content_type
5
+
6
+ def hash
7
+ @name.hash
8
+ end
9
+
10
+ def eql?(object)
11
+ @name == object.name
12
+ end
13
+
14
+ # Sort by the object's name
15
+ def <=>(object)
16
+ @name <=> object.name
17
+ end
18
+ end
19
+ end