aliyun-sdk 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/README.md +364 -0
- data/lib/aliyun/oss.rb +17 -0
- data/lib/aliyun/oss/bucket.rb +555 -0
- data/lib/aliyun/oss/client.rb +100 -0
- data/lib/aliyun/oss/config.rb +34 -0
- data/lib/aliyun/oss/download.rb +216 -0
- data/lib/aliyun/oss/exception.rb +116 -0
- data/lib/aliyun/oss/http.rb +282 -0
- data/lib/aliyun/oss/iterator.rb +74 -0
- data/lib/aliyun/oss/logging.rb +43 -0
- data/lib/aliyun/oss/multipart.rb +60 -0
- data/lib/aliyun/oss/object.rb +15 -0
- data/lib/aliyun/oss/protocol.rb +1432 -0
- data/lib/aliyun/oss/struct.rb +199 -0
- data/lib/aliyun/oss/upload.rb +195 -0
- data/lib/aliyun/oss/util.rb +88 -0
- data/lib/aliyun/oss/version.rb +9 -0
- data/spec/aliyun/oss/bucket_spec.rb +595 -0
- data/spec/aliyun/oss/client/bucket_spec.rb +338 -0
- data/spec/aliyun/oss/client/client_spec.rb +228 -0
- data/spec/aliyun/oss/client/resumable_download_spec.rb +217 -0
- data/spec/aliyun/oss/client/resumable_upload_spec.rb +318 -0
- data/spec/aliyun/oss/http_spec.rb +26 -0
- data/spec/aliyun/oss/multipart_spec.rb +675 -0
- data/spec/aliyun/oss/object_spec.rb +741 -0
- data/spec/aliyun/oss/service_spec.rb +142 -0
- data/spec/aliyun/oss/util_spec.rb +50 -0
- metadata +181 -0
@@ -0,0 +1,199 @@
|
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
|
3
|
+
module Aliyun
|
4
|
+
module OSS
|
5
|
+
|
6
|
+
##
|
7
|
+
# Access Control List, it controls how the bucket/object can be
|
8
|
+
# accessed.
|
9
|
+
# * public-read-write: allow access(read&write) anonymously
|
10
|
+
# * public-read: allow read anonymously
|
11
|
+
# * private: access must be signatured
|
12
|
+
#
|
13
|
+
module ACL
|
14
|
+
PUBLIC_READ_WRITE = "public-read-write"
|
15
|
+
PUBLIC_READ = "public-read"
|
16
|
+
PRIVATE = "private"
|
17
|
+
end # ACL
|
18
|
+
|
19
|
+
##
|
20
|
+
# A OSS object may carry some metas(String key-value pairs) with
|
21
|
+
# it. MetaDirective specifies what to do with the metas in the
|
22
|
+
# copy process.
|
23
|
+
# * COPY: metas are copied from the source object to the dest
|
24
|
+
# object
|
25
|
+
# * REPLACE: source object's metas are NOT copied, use user
|
26
|
+
# provided metas for the dest object
|
27
|
+
#
|
28
|
+
module MetaDirective
|
29
|
+
COPY = "COPY"
|
30
|
+
REPLACE = "REPLACE"
|
31
|
+
end # MetaDirective
|
32
|
+
|
33
|
+
##
|
34
|
+
# The object key may contains unicode charactors which cannot be
|
35
|
+
# encoded in the request/response body(XML). KeyEncoding specifies
|
36
|
+
# the encoding type for the object key.
|
37
|
+
# * url: the object key is url-encoded
|
38
|
+
# @note url-encoding is the only supported KeyEncoding type
|
39
|
+
#
|
40
|
+
module KeyEncoding
|
41
|
+
URL = "url"
|
42
|
+
|
43
|
+
@@all = [URL]
|
44
|
+
|
45
|
+
def self.include?(enc)
|
46
|
+
all.include?(enc)
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.all
|
50
|
+
@@all
|
51
|
+
end
|
52
|
+
end # KeyEncoding
|
53
|
+
|
54
|
+
##
|
55
|
+
# Common structs used. It provides a 'attrs' helper method for
|
56
|
+
# subclass to define its attributes. 'attrs' is based on
|
57
|
+
# attr_reader and provide additional functionalities for classes
|
58
|
+
# that inherits Struct::Base :
|
59
|
+
# * the constuctor is provided to accept options and set the
|
60
|
+
# corresponding attibute automatically
|
61
|
+
# * the #to_s method is rewrite to concatenate the defined
|
62
|
+
# attributes keys and values
|
63
|
+
# @example
|
64
|
+
# class X < Struct::Base
|
65
|
+
# attrs :foo, :bar
|
66
|
+
# end
|
67
|
+
#
|
68
|
+
# x.new(:foo => 'hello', :bar => 'world')
|
69
|
+
# x.foo # == "hello"
|
70
|
+
# x.bar # == "world"
|
71
|
+
# x.to_s # == "foo: hello, bar: world"
|
72
|
+
module Struct
|
73
|
+
class Base
|
74
|
+
module AttrHelper
|
75
|
+
def attrs(*s)
|
76
|
+
define_method(:attrs) {s}
|
77
|
+
attr_reader(*s)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
extend AttrHelper
|
82
|
+
|
83
|
+
def initialize(opts = {})
|
84
|
+
extra_keys = opts.keys - attrs
|
85
|
+
unless extra_keys.empty?
|
86
|
+
fail ClientError, "Unexpected extra keys: #{extra_keys.join(', ')}"
|
87
|
+
end
|
88
|
+
|
89
|
+
attrs.each do |attr|
|
90
|
+
instance_variable_set("@#{attr}", opts[attr])
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
def to_s
|
95
|
+
attrs.map do |attr|
|
96
|
+
v = instance_variable_get("@#{attr}")
|
97
|
+
"#{attr.to_s}: #{v}"
|
98
|
+
end.join(", ")
|
99
|
+
end
|
100
|
+
end # Base
|
101
|
+
end # Struct
|
102
|
+
|
103
|
+
##
|
104
|
+
# Bucket Logging setting. See: {http://help.aliyun.com/document_detail/oss/product-documentation/function/logging.html OSS Bucket logging}
|
105
|
+
# Attributes:
|
106
|
+
# * enable [Boolean] whether to enable bucket logging
|
107
|
+
# * target_bucket [String] the target bucket to store access logs
|
108
|
+
# * target_prefix [String] the target object prefix to store access logs
|
109
|
+
# @example Enable bucket logging
|
110
|
+
# bucket.logging = BucketLogging.new(
|
111
|
+
# :enable => true, :target_bucket => 'log_bucket', :target_prefix => 'my-log')
|
112
|
+
# @example Disable bucket logging
|
113
|
+
# bucket.logging = BucketLogging.new(:enable => false)
|
114
|
+
class BucketLogging < Struct::Base
|
115
|
+
attrs :enable, :target_bucket, :target_prefix
|
116
|
+
|
117
|
+
def enabled?
|
118
|
+
enable == true
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
##
|
123
|
+
# Bucket website setting. See: {http://help.aliyun.com/document_detail/oss/product-documentation/function/host-static-website.html OSS Website hosting}
|
124
|
+
# Attributes:
|
125
|
+
# * enable [Boolean] whether to enable website hosting for the bucket
|
126
|
+
# * index [String] the index object as the index page for the website
|
127
|
+
# * error [String] the error object as the error page for the website
|
128
|
+
class BucketWebsite < Struct::Base
|
129
|
+
attrs :enable, :index, :error
|
130
|
+
|
131
|
+
def enabled?
|
132
|
+
enable == true
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
##
|
137
|
+
# Bucket referer setting. See: {http://help.aliyun.com/document_detail/oss/product-documentation/function/referer-white-list.html OSS Website hosting}
|
138
|
+
# Attributes:
|
139
|
+
# * allow_empty [Boolean] whether to allow requests with empty "Referer"
|
140
|
+
# * whitelist [Array<String>] the allowed origins for requests
|
141
|
+
class BucketReferer < Struct::Base
|
142
|
+
attrs :allow_empty, :whitelist
|
143
|
+
|
144
|
+
def allow_empty?
|
145
|
+
allow_empty == true
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
##
|
150
|
+
# LifeCycle rule for bucket. See: {http://help.aliyun.com/document_detail/oss/product-documentation/function/lifecycle.html OSS Bucket LifeCycle}
|
151
|
+
# Attributes:
|
152
|
+
# * id [String] the unique id of a rule
|
153
|
+
# * enabled [Boolean] whether to enable this rule
|
154
|
+
# * prefix [String] the prefix objects to apply this rule
|
155
|
+
# * expiry [Date] or [Fixnum] the expire time of objects
|
156
|
+
# * if expiry is a Date, it specifies the absolute date to
|
157
|
+
# expire objects
|
158
|
+
# * if expiry is a Fixnum, it specifies the relative date to
|
159
|
+
# expire objects: how many days after the object's last
|
160
|
+
# modification time to expire the object
|
161
|
+
# @example Specify expiry as Date
|
162
|
+
# LifeCycleRule.new(
|
163
|
+
# :id => 'rule1',
|
164
|
+
# :enabled => true,
|
165
|
+
# :prefix => 'foo/',
|
166
|
+
# :expiry => Date.new(2016, 1, 1))
|
167
|
+
# @example Specify expiry as days
|
168
|
+
# LifeCycleRule.new(
|
169
|
+
# :id => 'rule1',
|
170
|
+
# :enabled => true,
|
171
|
+
# :prefix => 'foo/',
|
172
|
+
# :expiry => 15)
|
173
|
+
# @note the expiry date is treated as UTC time
|
174
|
+
class LifeCycleRule < Struct::Base
|
175
|
+
|
176
|
+
attrs :id, :enable, :prefix, :expiry
|
177
|
+
|
178
|
+
def enabled?
|
179
|
+
enable == true
|
180
|
+
end
|
181
|
+
end # LifeCycleRule
|
182
|
+
|
183
|
+
##
|
184
|
+
# CORS rule for bucket. See: {http://help.aliyun.com/document_detail/oss/product-documentation/function/referer-white-list.html OSS CORS}
|
185
|
+
# Attributes:
|
186
|
+
# * allowed_origins [Array<String>] the allowed origins
|
187
|
+
# * allowed_methods [Array<String>] the allowed methods
|
188
|
+
# * allowed_headers [Array<String>] the allowed headers
|
189
|
+
# * expose_headers [Array<String>] the expose headers
|
190
|
+
# * max_age_seconds [Integer] the max age seconds
|
191
|
+
class CORSRule < Struct::Base
|
192
|
+
|
193
|
+
attrs :allowed_origins, :allowed_methods, :allowed_headers,
|
194
|
+
:expose_headers, :max_age_seconds
|
195
|
+
|
196
|
+
end # CORSRule
|
197
|
+
|
198
|
+
end # OSS
|
199
|
+
end # Aliyun
|
@@ -0,0 +1,195 @@
|
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
|
3
|
+
module Aliyun
|
4
|
+
module OSS
|
5
|
+
module Multipart
|
6
|
+
##
|
7
|
+
# A multipart upload transaction
|
8
|
+
#
|
9
|
+
class Upload < Transaction
|
10
|
+
PART_SIZE = 4 * 1024 * 1024
|
11
|
+
READ_SIZE = 16 * 1024
|
12
|
+
|
13
|
+
def initialize(protocol, opts)
|
14
|
+
args = opts.dup
|
15
|
+
@protocol = protocol
|
16
|
+
@progress = args.delete(:progress)
|
17
|
+
@file = args.delete(:file)
|
18
|
+
@checkpoint_file = args.delete(:cpt_file)
|
19
|
+
@file_meta = {}
|
20
|
+
@parts = []
|
21
|
+
super(args)
|
22
|
+
end
|
23
|
+
|
24
|
+
# Run the upload transaction, which includes 3 stages:
|
25
|
+
# * 1a. initiate(new upload) and divide parts
|
26
|
+
# * 1b. rebuild states(resumed upload)
|
27
|
+
# * 2. upload each unfinished part
|
28
|
+
# * 3. commit the multipart upload transaction
|
29
|
+
def run
|
30
|
+
logger.info("Begin upload, file: #{@file}, checkpoint file: " \
|
31
|
+
"#{@checkpoint_file}")
|
32
|
+
|
33
|
+
# Rebuild transaction states from checkpoint file
|
34
|
+
# Or initiate new transaction states
|
35
|
+
rebuild
|
36
|
+
|
37
|
+
# Divide the file to upload into parts to upload separately
|
38
|
+
divide_parts if @parts.empty?
|
39
|
+
|
40
|
+
# Upload each part
|
41
|
+
@parts.reject { |p| p[:done] }.each { |p| upload_part(p) }
|
42
|
+
|
43
|
+
# Commit the multipart upload transaction
|
44
|
+
commit
|
45
|
+
|
46
|
+
logger.info("Done upload, file: #{@file}")
|
47
|
+
end
|
48
|
+
|
49
|
+
# Checkpoint structures:
|
50
|
+
# @example
|
51
|
+
# states = {
|
52
|
+
# :id => 'upload_id',
|
53
|
+
# :file => 'file',
|
54
|
+
# :file_meta => {
|
55
|
+
# :mtime => Time.now,
|
56
|
+
# :md5 => 1024
|
57
|
+
# },
|
58
|
+
# :parts => [
|
59
|
+
# {:number => 1, :range => [0, 100], :done => false},
|
60
|
+
# {:number => 2, :range => [100, 200], :done => true}
|
61
|
+
# ],
|
62
|
+
# :md5 => 'states_md5'
|
63
|
+
# }
|
64
|
+
def checkpoint
|
65
|
+
logger.debug("Begin make checkpoint, disable_cpt: #{options[:disable_cpt]}")
|
66
|
+
|
67
|
+
ensure_file_not_changed
|
68
|
+
|
69
|
+
states = {
|
70
|
+
:id => id,
|
71
|
+
:file => @file,
|
72
|
+
:file_meta => @file_meta,
|
73
|
+
:parts => @parts
|
74
|
+
}
|
75
|
+
|
76
|
+
# report progress
|
77
|
+
if @progress
|
78
|
+
done = @parts.count { |p| p[:done] }
|
79
|
+
@progress.call(done.to_f / @parts.size) if done > 0
|
80
|
+
end
|
81
|
+
|
82
|
+
write_checkpoint(states, @checkpoint_file) unless options[:disable_cpt]
|
83
|
+
|
84
|
+
logger.debug("Done make checkpoint, states: #{states}")
|
85
|
+
end
|
86
|
+
|
87
|
+
private
|
88
|
+
# Commit the transaction when all parts are succefully uploaded
|
89
|
+
# @todo handle undefined behaviors: commit succeeds in server
|
90
|
+
# but return error in client
|
91
|
+
def commit
|
92
|
+
logger.info("Begin commit transaction, id: #{id}")
|
93
|
+
|
94
|
+
parts = @parts.map{ |p| Part.new(:number => p[:number], :etag => p[:etag])}
|
95
|
+
@protocol.complete_multipart_upload(bucket, object, id, parts)
|
96
|
+
|
97
|
+
File.delete(@checkpoint_file) unless options[:disable_cpt]
|
98
|
+
|
99
|
+
logger.info("Done commit transaction, id: #{id}")
|
100
|
+
end
|
101
|
+
|
102
|
+
# Rebuild the states of the transaction from checkpoint file
|
103
|
+
def rebuild
|
104
|
+
logger.info("Begin rebuild transaction, checkpoint: #{@checkpoint_file}")
|
105
|
+
|
106
|
+
if File.exists?(@checkpoint_file) and not options[:disable_cpt]
|
107
|
+
states = load_checkpoint(@checkpoint_file)
|
108
|
+
|
109
|
+
if states[:file_md5] != @file_meta[:md5]
|
110
|
+
fail FileInconsistentError.new("The file to upload is changed.")
|
111
|
+
end
|
112
|
+
|
113
|
+
@id = states[:id]
|
114
|
+
@file_meta = states[:file_meta]
|
115
|
+
@parts = states[:parts]
|
116
|
+
else
|
117
|
+
initiate
|
118
|
+
end
|
119
|
+
|
120
|
+
logger.info("Done rebuild transaction, states: #{states}")
|
121
|
+
end
|
122
|
+
|
123
|
+
def initiate
|
124
|
+
logger.info("Begin initiate transaction")
|
125
|
+
|
126
|
+
@id = @protocol.initiate_multipart_upload(bucket, object, options)
|
127
|
+
@file_meta = {
|
128
|
+
:mtime => File.mtime(@file),
|
129
|
+
:md5 => get_file_md5(@file)
|
130
|
+
}
|
131
|
+
checkpoint
|
132
|
+
|
133
|
+
logger.info("Done initiate transaction, id: #{id}")
|
134
|
+
end
|
135
|
+
|
136
|
+
# Upload a part
|
137
|
+
def upload_part(p)
|
138
|
+
logger.debug("Begin upload part: #{p}")
|
139
|
+
|
140
|
+
result = nil
|
141
|
+
File.open(@file) do |f|
|
142
|
+
range = p[:range]
|
143
|
+
pos = range.first
|
144
|
+
f.seek(pos)
|
145
|
+
|
146
|
+
result = @protocol.upload_part(bucket, object, id, p[:number]) do |sw|
|
147
|
+
while pos < range.at(1)
|
148
|
+
bytes = [READ_SIZE, range.at(1) - pos].min
|
149
|
+
sw << f.read(bytes)
|
150
|
+
pos += bytes
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
p[:done] = true
|
155
|
+
p[:etag] = result.etag
|
156
|
+
|
157
|
+
checkpoint
|
158
|
+
|
159
|
+
logger.debug("Done upload part: #{p}")
|
160
|
+
end
|
161
|
+
|
162
|
+
# Devide the file into parts to upload
|
163
|
+
def divide_parts
|
164
|
+
logger.info("Begin divide parts, file: #{@file}")
|
165
|
+
|
166
|
+
max_parts = 10000
|
167
|
+
file_size = File.size(@file)
|
168
|
+
part_size = [@options[:part_size] || PART_SIZE, file_size / max_parts].max
|
169
|
+
num_parts = (file_size - 1) / part_size + 1
|
170
|
+
@parts = (1..num_parts).map do |i|
|
171
|
+
{
|
172
|
+
:number => i,
|
173
|
+
:range => [(i-1) * part_size, [i * part_size, file_size].min],
|
174
|
+
:done => false
|
175
|
+
}
|
176
|
+
end
|
177
|
+
|
178
|
+
checkpoint
|
179
|
+
|
180
|
+
logger.info("Done divide parts, parts: #{@parts}")
|
181
|
+
end
|
182
|
+
|
183
|
+
# Ensure file not changed during uploading
|
184
|
+
def ensure_file_not_changed
|
185
|
+
return if File.mtime(@file) == @file_meta[:mtime]
|
186
|
+
|
187
|
+
if @file_meta[:md5] != get_file_md5(@file)
|
188
|
+
fail FileInconsistentError, "The file to upload is changed."
|
189
|
+
end
|
190
|
+
end
|
191
|
+
end # Upload
|
192
|
+
|
193
|
+
end # Multipart
|
194
|
+
end # OSS
|
195
|
+
end # Aliyun
|
@@ -0,0 +1,88 @@
|
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
|
3
|
+
require 'time'
|
4
|
+
require 'base64'
|
5
|
+
require 'openssl'
|
6
|
+
|
7
|
+
module Aliyun
|
8
|
+
module OSS
|
9
|
+
##
|
10
|
+
# Util functions to help generate formatted Date, signatures,
|
11
|
+
# etc.
|
12
|
+
#
|
13
|
+
module Util
|
14
|
+
|
15
|
+
# Prefix for OSS specific HTTP headers
|
16
|
+
HEADER_PREFIX = "x-oss-"
|
17
|
+
|
18
|
+
class << self
|
19
|
+
|
20
|
+
include Logging
|
21
|
+
|
22
|
+
# Calculate request signatures
|
23
|
+
def get_signature(key, verb, headers, resources)
|
24
|
+
logger.debug("Sign, headers: #{headers}, resources: #{resources}")
|
25
|
+
|
26
|
+
content_md5 = headers['Content-MD5'] || ""
|
27
|
+
content_type = headers['Content-Type'] || ""
|
28
|
+
date = headers['Date']
|
29
|
+
|
30
|
+
cano_headers = headers.select { |k, v| k.start_with?(HEADER_PREFIX) }
|
31
|
+
.map { |k, v| [k.downcase.strip, v.strip] }
|
32
|
+
.sort.map { |k, v| [k, v].join(":") + "\n" }.join
|
33
|
+
|
34
|
+
cano_res = resources[:path] || "/"
|
35
|
+
sub_res = (resources[:sub_res] || {})
|
36
|
+
.sort.map { |k, v| v ? [k, v].join("=") : k }.join("&")
|
37
|
+
cano_res += "?#{sub_res}" unless sub_res.empty?
|
38
|
+
|
39
|
+
string_to_sign =
|
40
|
+
"#{verb}\n#{content_md5}\n#{content_type}\n#{date}\n" +
|
41
|
+
"#{cano_headers}#{cano_res}"
|
42
|
+
|
43
|
+
logger.debug("String to sign: #{string_to_sign}")
|
44
|
+
|
45
|
+
Util.sign(key, string_to_sign)
|
46
|
+
end
|
47
|
+
|
48
|
+
# Sign a string using HMAC and BASE64
|
49
|
+
# @param [String] key the secret key
|
50
|
+
# @param [String] string_to_sign the string to sign
|
51
|
+
# @return [String] the signature
|
52
|
+
def sign(key, string_to_sign)
|
53
|
+
Base64.strict_encode64(
|
54
|
+
OpenSSL::HMAC.digest('sha1', key, string_to_sign))
|
55
|
+
end
|
56
|
+
|
57
|
+
# Calculate content md5
|
58
|
+
def get_content_md5(content)
|
59
|
+
Base64.strict_encode64(OpenSSL::Digest::MD5.digest(content))
|
60
|
+
end
|
61
|
+
|
62
|
+
end # self
|
63
|
+
end # Util
|
64
|
+
end # OSS
|
65
|
+
end # Aliyun
|
66
|
+
|
67
|
+
# Monkey patch to support #to_bool
|
68
|
+
class String
|
69
|
+
def to_bool
|
70
|
+
return true if self =~ /^true$/i
|
71
|
+
false
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
# Monkey patch to support #symbolize_keys!
|
76
|
+
class Array
|
77
|
+
def symbolize_keys!
|
78
|
+
self.each { |v| v.symbolize_keys! if v.is_a?(Hash) or v.is_a?(Array) }
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
# Monkey patch to support #symbolize_keys!
|
83
|
+
class Hash
|
84
|
+
def symbolize_keys!
|
85
|
+
self.keys.each { |k| self[k.to_sym] = self.delete(k) }
|
86
|
+
self.values.each { |v| v.symbolize_keys! if v.is_a?(Hash) or v.is_a?(Array) }
|
87
|
+
end
|
88
|
+
end
|