manage_s3_bucket 0.0.3 → 0.0.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: dde6a371a9ad8dfac1a067d160d4cd1cd424f996
4
- data.tar.gz: cf5af5ae4c878c0a79da5c41cd174bbb3b23ebc1
3
+ metadata.gz: 7ba3626e48782dd839e5dff9b475afbe0f958bd4
4
+ data.tar.gz: 8fc596f3f870e4f8f560406ed2d10baafdc69122
5
5
  SHA512:
6
- metadata.gz: 36febb344e664a9f8ffec61c8702032d200cd5a3cd4339a5095a06e24cd567518f40dfb9bf93a1ad31bfbe72877daa13b54938188056b8adb6bbfa9574386ca2
7
- data.tar.gz: 6629c227e85346af7792e8633d9228504d43901cce0dbfb089cad2d230819886102045df7857252b6670b06e10eae60993d3c39aeef6b013cd77a3166d511e8a
6
+ metadata.gz: 51721bbb20836405f279ec1abe7cbae908862fb8e3fd8ebde415e9fbf97209d6aa180e6efcedc83a5b40f13e240206d0301bdc6c0fb536a48ca8e88a7c1e28ff
7
+ data.tar.gz: 3528b15e0c04f85d32a273784e495e61603ed8d63ca91e7945d49606618b9943670c4e0867234a393ed2b8ad6581eb85d37b45ab46296ed5922458f68f1fafbc
@@ -1,26 +1,118 @@
1
- require "manage_s3_bucket/version"
1
+ ###############################################################################################
2
+ # How to use
3
+ ## Get Bucket
4
+ ### s3 = Manage::S3::new(@aws_access_key_id, @aws_secret_key_id)
5
+ ## Remove Directory
6
+ ### s3.remove_path("bucket_name/folder/subfolder")
7
+ ## Clear all files and folders from bucket
8
+ ### s3.remove_path("bucket_name")
9
+ ## Copy path
10
+ ### s3.copy_path("backups-all-apps/upload_files/erp/2015.04.06.21.00.01", "backups-all-apps/folder/subfolder")
11
+ ### Not too fast as s3s3mirror (https://github.com/cobbzilla/s3s3mirror)
12
+
13
+ ###############################################################################################
14
+ require "manage_s3_bucket"
2
15
  require "right_aws_api"
16
+ require 'pp' # show thread errors
17
+ require 'work_queue'
3
18
 
4
19
  class Manage
5
20
  class S3
6
- class Bucket
7
- def initialize(bucket_name, aws_access_key_id, aws_secret_key_id)
8
- @bucket_name = bucket_name
9
- @aws_access_key_id = aws_access_key_id
10
- @aws_secret_key_id = aws_secret_key_id
21
+ # nodoc
22
+ def initialize(aws_access_key_id, aws_secret_key_id)
23
+ @aws_access_key_id = aws_access_key_id
24
+ @aws_secret_key_id = aws_secret_key_id
25
+ end
26
+
27
+ def s3
28
+ @s3 ||= RightScale::CloudApi::AWS::S3::Manager::new(@aws_access_key_id, @aws_secret_key_id, 'https://s3.amazonaws.com')
29
+ end
30
+
31
+ def extract_path(path)
32
+ slice_path = path.split('/')
33
+ [slice_path[0], slice_path[1..-1].join('/')]
34
+ end
35
+
36
+ def list_directory(path, marker = nil)
37
+ bucket, directory = extract_path(path)
38
+ opt_list = {
39
+ 'Bucket' => bucket,
40
+ 'prefix' => directory,
41
+ "max-keys" => 1000
42
+ }
43
+ opt_list['marker'] = marker if marker
44
+ list = s3.ListObjects(opt_list)['ListBucketResult']
45
+ return list
46
+ end
47
+
48
+ def execute_in_path(path, marker = nil, &block)
49
+ marker ||= nil
50
+ bucket, directory = extract_path(path)
51
+ list = list_directory(path, marker)
52
+ objects = list['Contents']
53
+ if objects
54
+ keys = objects.map { |c| c['Key'] }
55
+ yield keys
56
+ if list["IsTruncated"] == "true"
57
+ marker = keys.last
58
+ p "Next Page: #{marker}"
59
+ execute_in_path path, marker do |keys|
60
+ yield keys
61
+ end
62
+ end
11
63
  end
12
- def s3
13
- @s3 ||= RightScale::CloudApi::AWS::S3::Manager::new(@aws_access_key_id, @aws_secret_key_id, 'https://s3.amazonaws.com')
64
+ end
65
+
66
+ def remove_path(path)
67
+ bucket, directory = extract_path(path)
68
+ p "Deleting #{path}"
69
+ execute_in_path path do |keys|
70
+ s3.DeleteMultipleObjects('Bucket' => bucket, 'Object' => keys)
71
+ end
72
+ end
73
+
74
+ def copy_key(_s3, source_path, dest_path, key)
75
+ copied = false
76
+ i = 1
77
+ source_bucket, source_directory = extract_path(source_path)
78
+ dest_bucket, dest_directory = extract_path(dest_path)
79
+ new_key = "#{dest_directory}#{key.gsub(source_directory, '')}"
80
+ while !copied
81
+ begin
82
+ _s3.CopyObject(
83
+ 'SourceBucket' => source_bucket,
84
+ 'SourceObject' => key,
85
+ 'DestinationBucket' => dest_bucket,
86
+ 'DestinationObject' => new_key
87
+ )
88
+ copied = true
89
+ pp "SUCCESS: Copyed #{key} to #{new_key}"
90
+ rescue
91
+ i += 1
92
+ pp $!
93
+ sleep 10
94
+ pp '#'*200
95
+ pp "ERROR: Trying #{i} to copy #{key} to #{new_key}"
96
+ _s3 = s3
97
+ copy_key(_s3, source_path, dest_path, key)
98
+ end
14
99
  end
15
- def remove_directory(directory)
16
- # deletando keys
17
- objects = s3.ListObjects('Bucket' => @bucket_name, 'prefix' => directory)["ListBucketResult"]["Contents"]
18
- if objects
19
- keys = objects.map{|c| c["Key"]}
20
- s3.DeleteMultipleObjects('Bucket' => @bucket_name, "Object" => keys)
21
- remove_directory(directory) # s3 removes max 1000 objects, cause this we used recursive method
100
+ end
101
+
102
+ def copy_path(source_path, dest_path)
103
+ # It's not too fast as s3mirror (java tool)
104
+ max_threads = 500
105
+ wq = WorkQueue.new max_threads
106
+ execute_in_path source_path do |keys|
107
+ wq.enqueue_b do
108
+ _s3 = s3
109
+ keys.each do |key|
110
+ copy_key(_s3, source_path, dest_path, key)
111
+ end
22
112
  end
23
113
  end
24
114
  end
25
115
  end
26
116
  end
117
+
118
+
@@ -1,3 +1,3 @@
1
1
  module ManageS3Bucket
2
- VERSION = "0.0.3"
2
+ VERSION = "0.0.4"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: manage_s3_bucket
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.3
4
+ version: 0.0.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Rodrigo
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-04-07 00:00:00.000000000 Z
11
+ date: 2015-04-14 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: right_aws_api