fog-aliyun 0.3.18 → 0.3.19
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +7 -1
- data/fog-aliyun.gemspec +2 -2
- data/lib/fog/aliyun/models/storage/directories.rb +30 -53
- data/lib/fog/aliyun/models/storage/directory.rb +96 -17
- data/lib/fog/aliyun/models/storage/file.rb +127 -126
- data/lib/fog/aliyun/models/storage/files.rb +48 -127
- data/lib/fog/aliyun/requests/storage/abort_multipart_upload.rb +22 -0
- data/lib/fog/aliyun/requests/storage/complete_multipart_upload.rb +21 -0
- data/lib/fog/aliyun/requests/storage/copy_object.rb +14 -18
- data/lib/fog/aliyun/requests/storage/delete_bucket.rb +3 -9
- data/lib/fog/aliyun/requests/storage/delete_multiple_objects.rb +20 -0
- data/lib/fog/aliyun/requests/storage/delete_object.rb +10 -11
- data/lib/fog/aliyun/requests/storage/get_bucket.rb +22 -99
- data/lib/fog/aliyun/requests/storage/get_bucket_location.rb +33 -0
- data/lib/fog/aliyun/requests/storage/get_object.rb +20 -12
- data/lib/fog/aliyun/requests/storage/get_object_acl.rb +30 -0
- data/lib/fog/aliyun/requests/storage/get_object_http_url.rb +6 -9
- data/lib/fog/aliyun/requests/storage/get_object_https_url.rb +6 -9
- data/lib/fog/aliyun/requests/storage/get_service.rb +13 -0
- data/lib/fog/aliyun/requests/storage/head_object.rb +25 -13
- data/lib/fog/aliyun/requests/storage/initiate_multipart_upload.rb +19 -0
- data/lib/fog/aliyun/requests/storage/list_buckets.rb +5 -25
- data/lib/fog/aliyun/requests/storage/list_objects.rb +10 -62
- data/lib/fog/aliyun/requests/storage/put_bucket.rb +2 -8
- data/lib/fog/aliyun/requests/storage/put_object.rb +16 -122
- data/lib/fog/aliyun/requests/storage/upload_part.rb +24 -0
- data/lib/fog/aliyun/storage.rb +20 -4
- data/lib/fog/aliyun/version.rb +1 -1
- metadata +14 -10
- data/lib/fog/aliyun/requests/storage/delete_container.rb +0 -30
- data/lib/fog/aliyun/requests/storage/get_container.rb +0 -57
- data/lib/fog/aliyun/requests/storage/get_containers.rb +0 -61
- data/lib/fog/aliyun/requests/storage/put_container.rb +0 -29
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5f527fdb37f55f3f14e68b37fa7d038ba1b351e798d25bc761e18af8e2543c82
|
4
|
+
data.tar.gz: c7b4cdb85fe538f7934eafe57e4108d8fd411084fb3bb3cdd7c1cd0a28714923
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ef5fcc535228173a33fc33294c132ca1e6153bee3e9ac15c9626c06313eaba4e7e0e6a9f88457198e872042ee4a14697d040d772ec6e43203e891c89ee591fb4
|
7
|
+
data.tar.gz: 19054a55dd975915129f3e7cc0893e81aae38a844bc4d04363de0daf2d0cad484ea1cb62496f0813cb466d7689c65d1a3773a75c597ccbb7f5fcc30073e40d7b
|
data/CHANGELOG.md
CHANGED
data/fog-aliyun.gemspec
CHANGED
@@ -28,9 +28,9 @@ Gem::Specification.new do |spec|
|
|
28
28
|
spec.add_development_dependency 'rubocop'
|
29
29
|
spec.add_development_dependency 'simplecov'
|
30
30
|
spec.add_development_dependency 'memory_profiler'
|
31
|
-
spec.add_development_dependency 'aliyun-sdk', '~> 0.
|
31
|
+
spec.add_development_dependency 'aliyun-sdk', '~> 0.8.0'
|
32
32
|
|
33
|
-
spec.add_dependency 'aliyun-sdk', '~> 0.
|
33
|
+
spec.add_dependency 'aliyun-sdk', '~> 0.8.0'
|
34
34
|
spec.add_dependency 'fog-core'
|
35
35
|
spec.add_dependency 'fog-json'
|
36
36
|
spec.add_dependency 'ipaddress', '~> 0.8'
|
@@ -10,70 +10,47 @@ module Fog
|
|
10
10
|
model Fog::Aliyun::Storage::Directory
|
11
11
|
|
12
12
|
def all
|
13
|
-
|
14
|
-
return nil if
|
13
|
+
buckets = service.get_service[0]
|
14
|
+
return nil if buckets.size < 1
|
15
15
|
data = []
|
16
16
|
i = 0
|
17
|
-
|
18
|
-
|
19
|
-
key[-1] = ''
|
20
|
-
data[i] = { key: key }
|
17
|
+
buckets.each do |b|
|
18
|
+
data[i] = { key: b.name }
|
21
19
|
i += 1
|
22
20
|
end
|
23
|
-
|
24
21
|
load(data)
|
25
22
|
end
|
26
23
|
|
27
|
-
|
28
|
-
# If the directory is not exist, this method will create a new with 'key'
|
29
|
-
# In order to support multi-buckets scenario which making bucket as a solo directory, it have been expanded.
|
30
|
-
# If key is a directory(including /), return an existed or a new one;
|
31
|
-
# If key does not contain /, if bucket, return '', else return an existed or a new one directory;
|
24
|
+
|
32
25
|
def get(key, options = {})
|
33
|
-
|
34
|
-
|
26
|
+
data = service.get_bucket(key, options)
|
27
|
+
|
28
|
+
directory = new(:key => key, :is_persisted => true)
|
29
|
+
|
30
|
+
options = data[1]
|
31
|
+
options[:max_keys] = options[:limit]
|
32
|
+
directory.files.merge_attributes(options)
|
33
|
+
|
34
|
+
objects = []
|
35
|
+
i = 0
|
36
|
+
data[0].each do |o|
|
37
|
+
objects[i] = {
|
38
|
+
'Key' => o.key,
|
39
|
+
'Type' => o.type,
|
40
|
+
'Size' => o.size,
|
41
|
+
'ETag' => o.etag,
|
42
|
+
'LastModified' => o.last_modified
|
43
|
+
}
|
44
|
+
i += 1
|
35
45
|
end
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
new(key: key) if ret.data[:status] == 200
|
42
|
-
else
|
43
|
-
remap_attributes(options, {
|
44
|
-
:delimiter => 'delimiter',
|
45
|
-
:marker => 'marker',
|
46
|
-
:max_keys => 'max-keys',
|
47
|
-
:prefix => 'prefix'
|
48
|
-
})
|
49
|
-
data = service.get_bucket(key, options)
|
50
|
-
directory = new(:key => data['Name'], :is_persisted => true)
|
51
|
-
options = {}
|
52
|
-
for k, v in data
|
53
|
-
if ['CommonPrefixes', 'Delimiter', 'IsTruncated', 'Marker', 'MaxKeys', 'Prefix'].include?(k)
|
54
|
-
# Sometimes, the v will be a Array, like "Name"=>["blobstore-droplet1"], "Prefix"=>[{}], "Marker"=>[{}], "MaxKeys"=>["100"], "Delimiter"=>[{}], "IsTruncated"=>["false"]
|
55
|
-
# and there needs to parse them
|
56
|
-
if !v.nil? && (v.is_a? Array) && (v.size > 0)
|
57
|
-
if v[0].is_a? Hash
|
58
|
-
v = nil
|
59
|
-
else
|
60
|
-
v = v[0]
|
61
|
-
end
|
62
|
-
end
|
63
|
-
options[k] = v
|
64
|
-
end
|
65
|
-
end
|
66
|
-
directory.files.merge_attributes(options)
|
67
|
-
if data.key?('Contents') && !data['Contents'].nil?
|
68
|
-
directory.files.load(data['Contents'])
|
69
|
-
end
|
70
|
-
directory
|
71
|
-
end
|
46
|
+
directory.files.load(objects)
|
47
|
+
directory
|
48
|
+
rescue AliyunOssSdk::ServerError => error
|
49
|
+
if error.error_code == "NoSuchBucket"
|
50
|
+
nil
|
72
51
|
else
|
73
|
-
|
52
|
+
raise(error)
|
74
53
|
end
|
75
|
-
rescue Fog::Aliyun::Storage::NotFound
|
76
|
-
nil
|
77
54
|
end
|
78
55
|
end
|
79
56
|
end
|
@@ -7,24 +7,62 @@ module Fog
|
|
7
7
|
module Aliyun
|
8
8
|
class Storage
|
9
9
|
class Directory < Fog::Model
|
10
|
+
VALID_ACLS = ['private', 'public-read', 'public-read-write']
|
11
|
+
|
12
|
+
attr_reader :acl
|
10
13
|
identity :key, :aliases => ['Key', 'Name', 'name']
|
11
14
|
|
15
|
+
attribute :creation_date, :aliases => 'CreationDate', :type => 'time'
|
16
|
+
|
17
|
+
def acl=(new_acl)
|
18
|
+
unless VALID_ACLS.include?(new_acl)
|
19
|
+
raise ArgumentError.new("acl must be one of [#{VALID_ACLS.join(', ')}]")
|
20
|
+
else
|
21
|
+
@acl = new_acl
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
12
25
|
def destroy
|
13
26
|
requires :key
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
if
|
27
|
+
service.delete_bucket(key)
|
28
|
+
true
|
29
|
+
rescue AliyunOssSdk::ServerError => error
|
30
|
+
if error.error_code == "NoSuchBucket"
|
18
31
|
false
|
19
|
-
elsif ret.size == 1
|
20
|
-
service.delete_container(key)
|
21
|
-
true
|
22
32
|
else
|
23
|
-
raise
|
33
|
+
raise(error)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def destroy!(options = {})
|
38
|
+
requires :key
|
39
|
+
options = {
|
40
|
+
timeout: Fog.timeout,
|
41
|
+
interval: Fog.interval,
|
42
|
+
}.merge(options)
|
43
|
+
|
44
|
+
begin
|
45
|
+
clear!
|
46
|
+
Fog.wait_for(options[:timeout], options[:interval]) { objects_keys.size == 0 }
|
47
|
+
service.delete_bucket(key)
|
48
|
+
true
|
49
|
+
rescue AliyunOssSdk::ServerError
|
24
50
|
false
|
25
51
|
end
|
26
52
|
end
|
27
53
|
|
54
|
+
def location
|
55
|
+
region = @aliyun_region_id
|
56
|
+
region ||= Storage::DEFAULT_REGION
|
57
|
+
@location = (bucket_location || 'oss-' + region)
|
58
|
+
end
|
59
|
+
|
60
|
+
# NOTE: you can't change the region once the bucket is created
|
61
|
+
def location=(new_location)
|
62
|
+
new_location = 'oss-' + new_location unless new_location.start_with?('oss-')
|
63
|
+
@location = new_location
|
64
|
+
end
|
65
|
+
|
28
66
|
def files
|
29
67
|
@files ||= begin
|
30
68
|
Fog::Aliyun::Storage::Files.new(
|
@@ -34,6 +72,12 @@ module Fog
|
|
34
72
|
end
|
35
73
|
end
|
36
74
|
|
75
|
+
# TODO
|
76
|
+
def public=(new_public)
|
77
|
+
nil
|
78
|
+
end
|
79
|
+
|
80
|
+
# TODO
|
37
81
|
def public_url
|
38
82
|
nil
|
39
83
|
end
|
@@ -41,18 +85,53 @@ module Fog
|
|
41
85
|
def save
|
42
86
|
requires :key
|
43
87
|
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
end
|
88
|
+
options = {}
|
89
|
+
|
90
|
+
options['x-oss-acl'] = acl if acl
|
91
|
+
|
92
|
+
# https://help.aliyun.com/document_detail/31959.html
|
93
|
+
# if !persisted?
|
94
|
+
# # There is a sdk bug that location can not be set
|
95
|
+
# options[:location] = location
|
96
|
+
# end
|
97
|
+
|
98
|
+
service.put_bucket(key, options)
|
99
|
+
attributes[:is_persisted] = true
|
53
100
|
|
54
101
|
true
|
55
102
|
end
|
103
|
+
|
104
|
+
def persisted?
|
105
|
+
# is_persisted is true in case of directories.get or after #save
|
106
|
+
# creation_date is set in case of directories.all
|
107
|
+
attributes[:is_persisted] || !!attributes[:creation_date]
|
108
|
+
end
|
109
|
+
|
110
|
+
private
|
111
|
+
|
112
|
+
def bucket_location
|
113
|
+
requires :key
|
114
|
+
return nil unless persisted?
|
115
|
+
service.get_bucket_location(key)
|
116
|
+
end
|
117
|
+
|
118
|
+
def objects_keys
|
119
|
+
requires :key
|
120
|
+
bucket_query = service.get_bucket(key)
|
121
|
+
|
122
|
+
object_keys = []
|
123
|
+
i = 0
|
124
|
+
bucket_query[0].each do |o|
|
125
|
+
object_keys[i] = o.key
|
126
|
+
i += 1
|
127
|
+
end
|
128
|
+
object_keys
|
129
|
+
end
|
130
|
+
|
131
|
+
def clear!
|
132
|
+
requires :key
|
133
|
+
service.delete_multiple_objects(key, objects_keys) if objects_keys.size > 0
|
134
|
+
end
|
56
135
|
end
|
57
136
|
end
|
58
137
|
end
|
@@ -7,77 +7,123 @@ module Fog
|
|
7
7
|
class Storage
|
8
8
|
class File < Fog::Model
|
9
9
|
identity :key, aliases: ['Key', 'Name', 'name']
|
10
|
+
|
11
|
+
attr_writer :body
|
12
|
+
attribute :cache_control, aliases: 'Cache-Control'
|
13
|
+
attribute :content_encoding, aliases: 'Content-Encoding'
|
10
14
|
attribute :date, aliases: 'Date'
|
11
|
-
attribute :content_length, aliases: 'Content-Length', type: :integer
|
15
|
+
attribute :content_length, aliases: ['Content-Length', 'Size'], type: :integer
|
16
|
+
attribute :content_md5, aliases: 'Content-MD5'
|
12
17
|
attribute :content_type, aliases: 'Content-Type'
|
13
18
|
attribute :connection, aliases: 'Connection'
|
14
19
|
attribute :content_disposition, aliases: 'Content-Disposition'
|
15
|
-
attribute :etag, aliases: 'Etag'
|
20
|
+
attribute :etag, aliases: ['Etag', 'ETag']
|
21
|
+
attribute :expires, aliases: 'Expires'
|
22
|
+
attribute :metadata
|
23
|
+
attribute :owner, aliases: 'Owner'
|
16
24
|
attribute :last_modified, aliases: 'Last-Modified', type: :time
|
17
25
|
attribute :accept_ranges, aliases: 'Accept-Ranges'
|
18
26
|
attribute :server, aliases: 'Server'
|
19
|
-
attribute :object_type, aliases: 'x-oss-object-type'
|
27
|
+
attribute :object_type, aliases: ['x-oss-object-type', 'x_oss_object_type']
|
28
|
+
|
29
|
+
# @note Chunk size to use for multipart uploads.
|
30
|
+
# Use small chunk sizes to minimize memory. E.g. 5242880 = 5mb
|
31
|
+
attr_reader :multipart_chunk_size
|
32
|
+
def multipart_chunk_size=(mp_chunk_size)
|
33
|
+
raise ArgumentError.new("minimum multipart_chunk_size is 5242880") if mp_chunk_size < 5242880
|
34
|
+
@multipart_chunk_size = mp_chunk_size
|
35
|
+
end
|
36
|
+
|
37
|
+
def acl
|
38
|
+
requires :directory, :key
|
39
|
+
service.get_object_acl(directory.key, key)
|
40
|
+
end
|
41
|
+
|
42
|
+
def acl=(new_acl)
|
43
|
+
valid_acls = ['private', 'public-read', 'public-read-write', 'default']
|
44
|
+
unless valid_acls.include?(new_acl)
|
45
|
+
raise ArgumentError.new("acl must be one of [#{valid_acls.join(', ')}]")
|
46
|
+
end
|
47
|
+
@acl = new_acl
|
48
|
+
end
|
20
49
|
|
21
50
|
def body
|
22
|
-
attributes[:body]
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
51
|
+
return attributes[:body] if attributes[:body]
|
52
|
+
return '' unless last_modified
|
53
|
+
|
54
|
+
file = collection.get(identity)
|
55
|
+
if file
|
56
|
+
attributes[:body] = file.body
|
57
|
+
else
|
58
|
+
attributes[:body] = ''
|
59
|
+
end
|
28
60
|
end
|
29
61
|
|
30
62
|
def body=(new_body)
|
31
63
|
attributes[:body] = new_body
|
32
64
|
end
|
33
65
|
|
34
|
-
|
66
|
+
def directory
|
67
|
+
@directory
|
68
|
+
end
|
35
69
|
|
70
|
+
# Copy object from one bucket to other bucket.
|
71
|
+
#
|
72
|
+
# required attributes: directory, key
|
73
|
+
#
|
74
|
+
# @param target_directory_key [String]
|
75
|
+
# @param target_file_key [String]
|
76
|
+
# @param options [Hash] options for copy_object method
|
77
|
+
# @return [String] Fog::Aliyun::Files#head status of directory contents
|
78
|
+
#
|
36
79
|
def copy(target_directory_key, target_file_key, options = {})
|
37
80
|
requires :directory, :key
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
target_bucket, target_directory_key = collection.check_directory_key(target_directory_key)
|
45
|
-
target_object = if target_directory_key == ''
|
46
|
-
target_file_key
|
47
|
-
else
|
48
|
-
target_directory_key + '/' + target_file_key
|
49
|
-
end
|
50
|
-
service.copy_object(source_bucket, source_object, target_bucket, target_object, options)
|
51
|
-
target_directory = service.directories.new(key: target_directory_key)
|
52
|
-
target_directory.files.get(target_file_key)
|
53
|
-
end
|
54
|
-
|
55
|
-
def destroy
|
81
|
+
service.copy_object(directory.key, key, target_directory_key, target_file_key, options)
|
82
|
+
target_directory = service.directories.new(:key => target_directory_key)
|
83
|
+
target_directory.files.head(target_file_key)
|
84
|
+
end
|
85
|
+
|
86
|
+
def destroy(options = {})
|
56
87
|
requires :directory, :key
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
else
|
61
|
-
directory_key + '/' + key
|
62
|
-
end
|
63
|
-
service.delete_object(object, bucket: bucket_name)
|
88
|
+
# TODO support versionId
|
89
|
+
# attributes[:body] = nil if options['versionId'] == version
|
90
|
+
service.delete_object(directory.key, key, options)
|
64
91
|
true
|
65
92
|
end
|
66
93
|
|
94
|
+
remove_method :metadata
|
67
95
|
def metadata
|
68
|
-
attributes
|
96
|
+
attributes.reject {|key, value| !(key.to_s =~ /^x-oss-/)}
|
97
|
+
end
|
98
|
+
|
99
|
+
remove_method :metadata=
|
100
|
+
def metadata=(new_metadata)
|
101
|
+
merge_attributes(new_metadata)
|
69
102
|
end
|
70
103
|
|
104
|
+
remove_method :owner=
|
71
105
|
def owner=(new_owner)
|
72
106
|
if new_owner
|
73
107
|
attributes[:owner] = {
|
74
|
-
|
75
|
-
|
108
|
+
:display_name => new_owner['DisplayName'] || new_owner[:display_name],
|
109
|
+
:id => new_owner['ID'] || new_owner[:id]
|
76
110
|
}
|
77
111
|
end
|
78
112
|
end
|
79
113
|
|
114
|
+
# Set Access-Control-List permissions.
|
115
|
+
#
|
116
|
+
# valid new_publics: public_read, private
|
117
|
+
#
|
118
|
+
# @param [String] new_public
|
119
|
+
# @return [String] new_public
|
120
|
+
#
|
80
121
|
def public=(new_public)
|
122
|
+
if new_public
|
123
|
+
@acl = 'public-read'
|
124
|
+
else
|
125
|
+
@acl = 'private'
|
126
|
+
end
|
81
127
|
new_public
|
82
128
|
end
|
83
129
|
|
@@ -86,50 +132,32 @@ module Fog
|
|
86
132
|
# required attributes: directory, key
|
87
133
|
#
|
88
134
|
# @param expires [String] number of seconds (since 1970-01-01 00:00) before url expires
|
89
|
-
# @param options
|
135
|
+
# @param options[Hash] No need to use
|
90
136
|
# @return [String] url
|
91
137
|
#
|
92
138
|
def url(expires, options = {})
|
93
|
-
|
94
|
-
expires = expires.nil? ? 0 : expires.to_i
|
95
|
-
|
96
|
-
requires :directory, :key
|
97
|
-
bucket_name, directory_key = collection.check_directory_key(directory.key)
|
98
|
-
object = if directory_key == ''
|
99
|
-
key
|
100
|
-
else
|
101
|
-
directory_key + '/' + key
|
102
|
-
end
|
103
|
-
service.get_object_http_url_public(object, expires, options.merge(bucket: bucket_name))
|
104
|
-
end
|
105
|
-
|
106
|
-
def public_url
|
107
139
|
requires :key
|
108
|
-
|
140
|
+
service.get_object_http_url_public(directory.key, key, expires)
|
109
141
|
end
|
110
142
|
|
111
143
|
def save(options = {})
|
112
144
|
requires :body, :directory, :key
|
113
|
-
options['
|
145
|
+
options['x-oss-object-acl'] ||= @acl if @acl
|
146
|
+
options['Cache-Control'] = cache_control if cache_control
|
114
147
|
options['Content-Disposition'] = content_disposition if content_disposition
|
115
|
-
options
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
if body.
|
123
|
-
|
124
|
-
data = service.head_object(object, bucket: bucket_name)
|
125
|
-
elsif body.is_a?(String)
|
126
|
-
data = service.put_object_with_body(object, body, options.merge(bucket: bucket_name)).data
|
148
|
+
options['Content-Encoding'] = content_encoding if content_encoding
|
149
|
+
options['Content-MD5'] = content_md5 if content_md5
|
150
|
+
options['Content-Type'] = content_type if content_type
|
151
|
+
options['Expires'] = expires if expires
|
152
|
+
options.merge!(metadata)
|
153
|
+
|
154
|
+
self.multipart_chunk_size = 5242880 if !multipart_chunk_size && Fog::Storage.get_body_size(body) > 5368709120
|
155
|
+
if multipart_chunk_size && Fog::Storage.get_body_size(body) >= multipart_chunk_size && body.respond_to?(:read)
|
156
|
+
multipart_save(options)
|
127
157
|
else
|
128
|
-
|
158
|
+
service.put_object(directory.key, key, body, options)
|
129
159
|
end
|
130
|
-
|
131
|
-
refresh_metadata
|
132
|
-
|
160
|
+
self.etag = self.etag.gsub('"','') if self.etag
|
133
161
|
self.content_length = Fog::Storage.get_body_size(body)
|
134
162
|
self.content_type ||= Fog::Storage.get_content_type(body)
|
135
163
|
true
|
@@ -137,70 +165,43 @@ module Fog
|
|
137
165
|
|
138
166
|
private
|
139
167
|
|
140
|
-
|
141
|
-
|
142
|
-
def refresh_metadata
|
143
|
-
metadata.reject! { |_k, v| v.nil? }
|
144
|
-
end
|
145
|
-
|
146
|
-
def headers_to_metadata
|
147
|
-
key_map = key_mapping
|
148
|
-
Hash[metadata_attributes.map { |k, v| [key_map[k], v] }]
|
149
|
-
end
|
150
|
-
|
151
|
-
def key_mapping
|
152
|
-
key_map = metadata_attributes
|
153
|
-
key_map.each_pair { |k, _v| key_map[k] = header_to_key(k) }
|
168
|
+
def directory=(new_directory)
|
169
|
+
@directory = new_directory
|
154
170
|
end
|
155
171
|
|
156
|
-
def
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
def metadata_to_headers
|
161
|
-
header_map = header_mapping
|
162
|
-
Hash[metadata.map { |k, v| [header_map[k], v] }]
|
163
|
-
end
|
164
|
-
|
165
|
-
def header_mapping
|
166
|
-
header_map = metadata.dup
|
167
|
-
header_map.each_pair { |k, _v| header_map[k] = key_to_header(k) }
|
168
|
-
end
|
169
|
-
|
170
|
-
def key_to_header(key)
|
171
|
-
metadata_prefix + key.to_s.split(/[-_]/).map(&:capitalize).join('-')
|
172
|
-
end
|
172
|
+
def multipart_save(options)
|
173
|
+
# Initiate the upload
|
174
|
+
upload_id = service.initiate_multipart_upload(directory.key, key, options)
|
173
175
|
|
174
|
-
|
175
|
-
|
176
|
-
bucket_name, directory_key = collection.check_directory_key(directory.key)
|
177
|
-
object = if directory_key == ''
|
178
|
-
key
|
179
|
-
else
|
180
|
-
directory_key + '/' + key
|
181
|
-
end
|
176
|
+
# Store ETags of upload parts
|
177
|
+
part_tags = []
|
182
178
|
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
179
|
+
# Upload each part
|
180
|
+
# TODO: optionally upload chunks in parallel using threads
|
181
|
+
# (may cause network performance problems with many small chunks)
|
182
|
+
# TODO: Support large chunk sizes without reading the chunk into memory
|
183
|
+
if body.respond_to?(:rewind)
|
184
|
+
body.rewind rescue nil
|
185
|
+
end
|
186
|
+
while (chunk = body.read(multipart_chunk_size)) do
|
187
|
+
part_upload = service.upload_part(directory.key, key, upload_id, part_tags.size + 1, chunk)
|
188
|
+
part_tags << part_upload
|
190
189
|
end
|
191
|
-
end
|
192
190
|
|
193
|
-
|
194
|
-
|
195
|
-
|
191
|
+
if part_tags.empty? #it is an error to have a multipart upload with no parts
|
192
|
+
part_upload = service.upload_part(directory.key, key, upload_id, 1, '')
|
193
|
+
part_tags << part_upload
|
194
|
+
end
|
196
195
|
|
197
|
-
|
198
|
-
|
196
|
+
rescue
|
197
|
+
# Abort the upload & reraise
|
198
|
+
service.abort_multipart_upload(directory.key, key, upload_id) if upload_id
|
199
|
+
raise
|
200
|
+
else
|
201
|
+
# Complete the upload
|
202
|
+
service.complete_multipart_upload(directory.key, key, upload_id, part_tags)
|
199
203
|
end
|
200
204
|
|
201
|
-
def update_attributes_from(data)
|
202
|
-
merge_attributes(data[:headers].reject { |key, _value| ['Content-Length', 'Content-Type'].include?(key) })
|
203
|
-
end
|
204
205
|
end
|
205
206
|
end
|
206
207
|
end
|