s3_to_drive 0.1.1 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. checksums.yaml +4 -4
  2. data/lib/s3_to_drive/client/client.rb +30 -2
  3. data/lib/s3_to_drive/client/configuration.rb +18 -15
  4. data/lib/s3_to_drive/client/validation.rb +9 -0
  5. data/lib/s3_to_drive/concerns/drive_upload.rb +149 -0
  6. data/lib/s3_to_drive/concerns/drive_uploader.rb +198 -0
  7. data/lib/s3_to_drive/concerns/state_manager.rb +96 -0
  8. data/lib/s3_to_drive/exception/missing_config_error.rb +3 -33
  9. data/lib/s3_to_drive/version.rb +1 -1
  10. data/lib/s3_to_drive.rb +5 -2
  11. metadata +5 -17
  12. data/config/initializers/ s3_to_drive.rb +0 -6
  13. data/lib/concerns/transfer_s3_to_drive/.env +0 -22
  14. data/lib/concerns/transfer_s3_to_drive/Dockerfile +0 -14
  15. data/lib/concerns/transfer_s3_to_drive/app/__init__.py +0 -0
  16. data/lib/concerns/transfer_s3_to_drive/app/__pycache__/main.cpython-312.pyc +0 -0
  17. data/lib/concerns/transfer_s3_to_drive/app/config.py +0 -21
  18. data/lib/concerns/transfer_s3_to_drive/app/db.py +0 -18
  19. data/lib/concerns/transfer_s3_to_drive/app/drive_upload.py +0 -311
  20. data/lib/concerns/transfer_s3_to_drive/app/drive_uploader.py +0 -213
  21. data/lib/concerns/transfer_s3_to_drive/app/kidsly-admin.code-workspace +0 -8
  22. data/lib/concerns/transfer_s3_to_drive/app/main.py +0 -118
  23. data/lib/concerns/transfer_s3_to_drive/app/models.py +0 -38
  24. data/lib/concerns/transfer_s3_to_drive/app/state_manager.py +0 -160
  25. data/lib/concerns/transfer_s3_to_drive/docker-compose.yml +0 -20
  26. data/lib/concerns/transfer_s3_to_drive/requirements.txt +0 -20
  27. data/lib/concerns/transfer_s3_to_drive/service_account.json +0 -13
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b0c426f8242c7448f1b2951ad60d5573b61b92ceb3e2443007c0183121b9befd
4
- data.tar.gz: e3f37c4a2ec4931f95fdf63874ea2e171c981b80d2fe0ce196861239c3ccff2d
3
+ metadata.gz: b7f9841bcacc693525edfb210d91fc4ab55185fcd885efe232e41829146cdb05
4
+ data.tar.gz: 3ee8e6668a761b6ea1072fa3419cae834bff070662947aee7ecf2df19754d71b
5
5
  SHA512:
6
- metadata.gz: c83626376f55d2ee3f9bd42595984636bbb500236303e793cef3c10ec7e44dcb1844d79a265939dba222d5f09e042396abc3edcb6500b29b0d3c66cb28d01c90
7
- data.tar.gz: 6e88bbef05ce46a322c5993e92a0bc80ac8e2a2d1060a653e8d296c46f14599451b9a5f88b9685aa191d60bee483098c67b1a2d703803f2fca6fe6c894369725
6
+ metadata.gz: 51450cde1370cb2fa047718ae99b41a9f77a26500d3e9cacb0082a80460f01def891b7335d692d7890866f3b3184d37d54ed880669d2766178f6d38d5c625b4a
7
+ data.tar.gz: aa8b7a75bc988238f0337da7c86c771267bb01023225fb14b3e7f6d1a79bcdbeba97c786d30d4e76230353da58ac60a2c95b1308f37dc5a16e54d8dd0e8febfe
@@ -1,3 +1,9 @@
1
+ require_relative "configuration"
2
+ require_relative "validation"
3
+ require_relative "../concerns/drive_upload"
4
+ require_relative "../concerns/state_manager"
5
+ require_relative "../concerns/drive_uploader"
6
+
1
7
  module S3ToDrive
2
8
  module Client
3
9
  class Client
@@ -10,13 +16,35 @@ module S3ToDrive
10
16
  end
11
17
  end
12
18
 
13
- def transfer
19
+ def transfer(s3_path, drive_id, folder_name)
20
+ S3ToDrive.validate!
14
21
  puts "Bucket name: #{bucket_name}"
15
22
  puts "AWS Region: #{aws_region}"
16
23
  puts "AWS Access Key ID: #{aws_access_key_id}"
17
24
  puts "AWS Secret Access Key: #{aws_secret_access_key}"
18
- end
25
+ puts "S3 Path: #{s3_path}"
26
+ puts "Drive ID: #{drive_id}"
27
+ puts "Folder Name: #{folder_name}"
19
28
 
29
+ begin
30
+ stream_s3_to_drive(
31
+ s3_bucket: bucket_name,
32
+ s3_key: s3_path,
33
+ drive_folder_id: drive_id,
34
+ folder_name: folder_name
35
+ )
36
+ rescue Interrupt
37
+ puts "\n\n[INTERRUPTED] Upload interrupted by user"
38
+ puts "[INFO] State saved. Resume by running the same command again."
39
+ exit(1)
40
+ rescue StandardError => e
41
+ puts "\n[FATAL ERROR] #{e}"
42
+ puts e.backtrace
43
+ exit(1)
44
+ end
45
+
46
+ puts "🎉 DONE"
47
+ end
20
48
  end
21
49
  end
22
50
  end
@@ -1,33 +1,36 @@
1
1
  module S3ToDrive
2
2
  module Configuration
3
-
4
3
  VALID_OPTIONS_KEYS = %i[
5
4
  aws_access_key_id
6
5
  aws_secret_access_key
7
6
  aws_region
8
7
  bucket_name
8
+ service_account_file
9
9
  ].freeze
10
10
 
11
11
  attr_accessor(*VALID_OPTIONS_KEYS)
12
12
 
13
- # def self.extended(base)
14
- # base.reset
15
- # end
16
-
17
- # def reset
18
- # self.aws_access_key_id = '123'
19
- # self.aws_secret_access_key = '123'
20
- # self.aws_region = '123'
21
- # self.bucket_name = '123'
22
- # self
23
- # end
24
-
25
13
  def validate!
26
14
  missing_keys = VALID_OPTIONS_KEYS.select { |key| send(key).nil? }
27
- raise MissingConfigurationError, "Missing configuration: #{missing_keys.join(", ")}" if missing_keys.any?
15
+ return unless missing_keys.any?
16
+
17
+ raise S3ToDrive::MissingConfigurationError,
18
+ "Missing configuration: #{missing_keys.join(", ")}"
19
+ end
20
+
21
+ def self.extended(base)
22
+ base.reset
23
+ end
24
+
25
+ def reset
26
+ self.aws_access_key_id = "AKIA3VOL6WH7UI2CD4N7"
27
+ self.aws_secret_access_key = "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"
28
+ self.aws_region = "ap-northeast-1"
29
+ self.bucket_name = "kidsly-dev-private-contents"
30
+ self.service_account_file = "service_account.json"
28
31
  end
29
32
 
30
- # Support S3ToDrive::Configuration.configure { |c| ... }
33
+ # Support S3ToDrive.configure { |c| ... }
31
34
  def configure
32
35
  yield self
33
36
  self
@@ -0,0 +1,9 @@
1
+ module S3ToDrive
2
+ module Client
3
+ module Validation
4
+ def check_configuration
5
+ S3ToDrive.validate!
6
+ end
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,149 @@
1
+ require 'aws-sdk-s3'
2
+ require 'json'
3
+ require 'time'
4
+
5
+ require_relative 'state_manager'
6
+ require_relative 'drive_upload'
7
+
8
+ def stream_s3_to_drive(s3_bucket:, s3_key:, drive_folder_id:, folder_name:)
9
+ # Tạo file state để lưu trạng thái upload
10
+ state_mgr = StateManager.new('state.json')
11
+ # Tạo uploader để upload file lên Google Drive
12
+ uploader = DriveUpload.new(service_account_file: service_account_file)
13
+ # Tạo s3 client để upload file lên S3
14
+ s3_client = Aws::S3::Client.new(
15
+ region: aws_region,
16
+ access_key_id: aws_access_key_id,
17
+ secret_access_key: aws_secret_access_key
18
+ )
19
+
20
+ puts "S3 to Google Drive Transfer"
21
+ puts "S3 Bucket: #{s3_bucket}"
22
+ puts "S3 Key: #{s3_key}"
23
+ puts "Drive Folder: #{drive_folder_id}"
24
+ puts "Folder Name: #{folder_name}"
25
+
26
+ # ===== Get file info =====
27
+ begin
28
+ head = s3_client.head_object(bucket: s3_bucket, key: s3_key)
29
+ file_size = head.content_length
30
+ filename = File.basename(s3_key)
31
+
32
+ puts "File: #{filename}"
33
+ puts "Size: #{file_size} bytes (#{(file_size.to_f / 1024**3).round(2)} GB)"
34
+ rescue => e
35
+ puts "[ERROR] Failed to get S3 file info: #{e}"
36
+ exit(1)
37
+ end
38
+
39
+ # ===== Load state =====
40
+ state = state_mgr.load
41
+
42
+ if state
43
+ puts "[RESUME] Found existing upload state"
44
+ puts "[RESUME] Progress: #{state['uploaded_bytes']} / #{file_size}"
45
+ puts "[RESUME] #{(state['uploaded_bytes'] * 100.0 / file_size).round(1)}%\n"
46
+
47
+ if state['s3_key'] != s3_key || state['file_size'] != file_size
48
+ puts "[WARNING] State mismatch. Restarting..."
49
+ state_mgr.delete
50
+ state = nil
51
+ end
52
+ end
53
+
54
+ # ===== New upload =====
55
+ unless state
56
+ puts "[NEW] Starting new upload...\n"
57
+
58
+ begin
59
+ puts "[DRIVE] Checking folder: #{folder_name}"
60
+
61
+ folder_id = uploader.find_folder(
62
+ folder_name: folder_name,
63
+ parent_folder_id: drive_folder_id
64
+ )
65
+
66
+ unless folder_id
67
+ puts "[DRIVE] Creating folder..."
68
+ folder_id = uploader.create_folder(
69
+ folder_name: folder_name,
70
+ parent_folder_id: drive_folder_id
71
+ )
72
+ end
73
+
74
+ upload_url = uploader.create_resumable_upload(
75
+ filename: filename,
76
+ folder_id: folder_id,
77
+ file_size: file_size
78
+ )
79
+
80
+ rescue => e
81
+ puts "[ERROR] Failed to init upload: #{e}"
82
+ exit(1)
83
+ end
84
+
85
+ state = state_mgr.create_initial_state(
86
+ s3_bucket: s3_bucket,
87
+ s3_key: s3_key,
88
+ upload_url: upload_url,
89
+ file_size: file_size,
90
+ chunk_size: DriveUpload::CHUNK_SIZE
91
+ )
92
+ end
93
+
94
+ # ===== Upload loop =====
95
+ upload_url = state['upload_url']
96
+ chunk_size = state['chunk_size']
97
+ offset = state['uploaded_bytes']
98
+ total_chunks = state['total_chunks']
99
+
100
+ start_time = Time.now
101
+
102
+ while offset < file_size
103
+ end_byte = [offset + chunk_size, file_size].min
104
+ range = "bytes=#{offset}-#{end_byte - 1}"
105
+
106
+ puts "[CHUNK] #{offset} - #{end_byte}"
107
+
108
+ begin
109
+ # Read from S3
110
+ resp = s3_client.get_object(
111
+ bucket: s3_bucket,
112
+ key: s3_key,
113
+ range: range
114
+ )
115
+
116
+ chunk_data = resp.body.read
117
+
118
+ # Upload to Drive
119
+ success, new_offset = uploader.upload_chunk(
120
+ chunk_data: chunk_data,
121
+ offset: offset,
122
+ file_size: file_size,
123
+ upload_url: upload_url
124
+ )
125
+
126
+ raise "Upload failed" unless success
127
+
128
+ offset = new_offset
129
+ state_mgr.update_progress(offset)
130
+
131
+ # Progress
132
+ elapsed = Time.now - start_time
133
+ speed = offset / elapsed
134
+ percent = offset * 100.0 / file_size
135
+ eta = (file_size - offset) / speed
136
+
137
+ puts "✅ #{percent.round(1)}% | #{(speed / 1024**2).round(1)} MB/s | ETA #{(eta / 60).round} min"
138
+
139
+ rescue => e
140
+ puts "[ERROR] #{e}"
141
+ puts "[INFO] Resume supported"
142
+ exit(1)
143
+ end
144
+ end
145
+
146
+ puts "\n✅ UPLOAD COMPLETED!"
147
+
148
+ state_mgr.delete
149
+ end
@@ -0,0 +1,198 @@
1
+ # lib/drive_upload.rb
2
+
3
+ require 'net/http'
4
+ require 'json'
5
+ require 'uri'
6
+ require 'googleauth'
7
+ require 'stringio'
8
+
9
+ class DriveUpload
10
+ CHUNK_SIZE = 256 * 1024 * 1024 # 256MB
11
+ MAX_RETRIES = 5
12
+ SCOPES = ['https://www.googleapis.com/auth/drive.file']
13
+
14
+ def initialize(service_account_file:)
15
+ @authorizer = Google::Auth::ServiceAccountCredentials.make_creds(
16
+ json_key_io: File.open(service_account_file),
17
+ scope: SCOPES
18
+ )
19
+ end
20
+
21
+ # ===== AUTH =====
22
+ def access_token
23
+ @authorizer.fetch_access_token!['access_token']
24
+ end
25
+
26
+ # ===== FIND FOLDER =====
27
+ def find_folder(folder_name:, parent_folder_id:)
28
+ token = access_token
29
+
30
+ query = "name='#{folder_name}' and mimeType='application/vnd.google-apps.folder' and '#{parent_folder_id}' in parents and trashed=false"
31
+
32
+ uri = URI("https://www.googleapis.com/drive/v3/files")
33
+ uri.query = URI.encode_www_form({
34
+ q: query,
35
+ fields: 'files(id,name)',
36
+ supportsAllDrives: 'true',
37
+ includeItemsFromAllDrives: 'true'
38
+ })
39
+
40
+ req = Net::HTTP::Get.new(uri)
41
+ req['Authorization'] = "Bearer #{token}"
42
+
43
+ res = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) { |http| http.request(req) }
44
+
45
+ return nil unless res.code.to_i == 200
46
+
47
+ data = JSON.parse(res.body)
48
+ files = data['files'] || []
49
+
50
+ if files.any?
51
+ folder_id = files.first['id']
52
+ puts "[DRIVE] Found folder: #{folder_name} (#{folder_id})"
53
+ return folder_id
54
+ end
55
+
56
+ nil
57
+ end
58
+
59
+ # ===== CREATE FOLDER =====
60
+ def create_folder(folder_name:, parent_folder_id:)
61
+ token = access_token
62
+
63
+ uri = URI("https://www.googleapis.com/drive/v3/files?supportsAllDrives=true")
64
+
65
+ body = {
66
+ name: folder_name,
67
+ mimeType: 'application/vnd.google-apps.folder',
68
+ parents: [parent_folder_id]
69
+ }
70
+
71
+ req = Net::HTTP::Post.new(uri)
72
+ req['Authorization'] = "Bearer #{token}"
73
+ req['Content-Type'] = 'application/json'
74
+ req.body = body.to_json
75
+
76
+ res = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) { |http| http.request(req) }
77
+
78
+ unless [200, 201].include?(res.code.to_i)
79
+ raise "Create folder failed: #{res.code} #{res.body}"
80
+ end
81
+
82
+ data = JSON.parse(res.body)
83
+ folder_id = data['id']
84
+
85
+ puts "[DRIVE] Created folder: #{folder_name} (#{folder_id})"
86
+ folder_id
87
+ end
88
+
89
+ # ===== CREATE RESUMABLE UPLOAD =====
90
+ def create_resumable_upload(filename:, folder_id:, file_size:)
91
+ token = access_token
92
+
93
+ uri = URI("https://www.googleapis.com/upload/drive/v3/files?uploadType=resumable&supportsAllDrives=true")
94
+
95
+ metadata = {
96
+ name: filename,
97
+ parents: [folder_id]
98
+ }
99
+
100
+ req = Net::HTTP::Post.new(uri)
101
+ req['Authorization'] = "Bearer #{token}"
102
+ req['Content-Type'] = 'application/json; charset=UTF-8'
103
+ req['X-Upload-Content-Type'] = 'application/octet-stream'
104
+ req['X-Upload-Content-Length'] = file_size.to_s
105
+ req.body = metadata.to_json
106
+
107
+ res = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) { |http| http.request(req) }
108
+
109
+ unless res.code.to_i == 200
110
+ raise "Create upload session failed: #{res.code} #{res.body}"
111
+ end
112
+
113
+ upload_url = res['Location']
114
+ raise "No upload URL" unless upload_url
115
+
116
+ puts "[DRIVE] Resumable session created"
117
+ puts "[DRIVE] Upload URL: #{upload_url[0..80]}..."
118
+
119
+ upload_url
120
+ end
121
+
122
+ # ===== GET UPLOAD STATUS =====
123
+ def get_upload_status(upload_url:, file_size:)
124
+ uri = URI(upload_url)
125
+
126
+ req = Net::HTTP::Put.new(uri)
127
+ req['Content-Range'] = "bytes */#{file_size}"
128
+ req['Content-Length'] = '0'
129
+
130
+ res = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) { |http| http.request(req) }
131
+
132
+ if res.code.to_i == 308
133
+ range = res['Range']
134
+ if range
135
+ uploaded = range.split('-').last.to_i + 1
136
+ puts "[DRIVE] Uploaded: #{uploaded}"
137
+ return uploaded
138
+ end
139
+ end
140
+
141
+ 0
142
+ end
143
+
144
+ # ===== UPLOAD CHUNK =====
145
+ def upload_chunk(chunk_data:, offset:, file_size:, upload_url:)
146
+ chunk_size = chunk_data.bytesize
147
+ end_byte = offset + chunk_size - 1
148
+
149
+ uri = URI(upload_url)
150
+
151
+ MAX_RETRIES.times do |attempt|
152
+ begin
153
+ req = Net::HTTP::Put.new(uri)
154
+ req['Content-Range'] = "bytes #{offset}-#{end_byte}/#{file_size}"
155
+ req['Content-Length'] = chunk_size.to_s
156
+ req.body = chunk_data
157
+
158
+ res = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true, read_timeout: 300) do |http|
159
+ http.request(req)
160
+ end
161
+
162
+ case res.code.to_i
163
+ when 200, 201
164
+ puts "[DRIVE] ✅ Upload complete"
165
+ return [true, file_size]
166
+
167
+ when 308
168
+ puts "[DRIVE] ✅ Chunk uploaded #{offset} - #{end_byte}"
169
+ return [true, end_byte + 1]
170
+
171
+ when 429
172
+ wait = [2**attempt, 60].min
173
+ puts "[429] Retry #{attempt + 1}/#{MAX_RETRIES}, wait #{wait}s"
174
+ sleep(wait)
175
+ next
176
+
177
+ else
178
+ puts "[ERROR] HTTP #{res.code}"
179
+ puts res.body[0..200]
180
+ raise "Upload failed"
181
+ end
182
+
183
+ rescue => e
184
+ puts "[ERROR] #{e}"
185
+
186
+ if attempt < MAX_RETRIES - 1
187
+ wait = [2**attempt, 10].min
188
+ puts "[RETRY] #{attempt + 1}/#{MAX_RETRIES}, wait #{wait}s"
189
+ sleep(wait)
190
+ else
191
+ raise
192
+ end
193
+ end
194
+ end
195
+
196
+ raise "Max retries exceeded at offset #{offset}"
197
+ end
198
+ end
@@ -0,0 +1,96 @@
1
+ # lib/s3_to_drive/concerns/state_manager.rb
2
+
3
+ require 'json'
4
+ require 'time'
5
+ require 'fileutils'
6
+
7
+ class StateManager
8
+ attr_reader :state_file, :state
9
+
10
+ def initialize(state_file = 'state.json')
11
+ @state_file = state_file
12
+ @state = nil
13
+ end
14
+
15
+ # Load state from file
16
+ def load
17
+ return nil unless File.exist?(state_file)
18
+
19
+ begin
20
+ @state = JSON.parse(File.read(state_file))
21
+
22
+ required_fields = %w[upload_url file_size uploaded_bytes s3_key]
23
+ unless required_fields.all? { |f| @state.key?(f) }
24
+ puts "[WARNING] Invalid state file, missing fields. Starting fresh..."
25
+ return nil
26
+ end
27
+
28
+ puts "[STATE] Loaded: #{@state['uploaded_bytes']} / #{@state['file_size']} bytes"
29
+ @state
30
+
31
+ rescue JSON::ParserError, IOError => e
32
+ puts "[WARNING] Failed to load state: #{e}. Starting fresh..."
33
+ nil
34
+ end
35
+ end
36
+
37
+ # Save state (atomic write)
38
+ def save(state)
39
+ @state = state
40
+ @state['last_update'] = Time.now.utc.iso8601
41
+
42
+ temp_file = "#{state_file}.tmp"
43
+
44
+ begin
45
+ File.open(temp_file, 'w:utf-8') do |f|
46
+ f.write(JSON.pretty_generate(@state))
47
+ end
48
+
49
+ # atomic replace
50
+ FileUtils.rm_f(state_file)
51
+ FileUtils.mv(temp_file, state_file)
52
+
53
+ rescue IOError => e
54
+ puts "[ERROR] Failed to save state: #{e}"
55
+ raise
56
+ end
57
+ end
58
+
59
+ # Delete state file
60
+ def delete
61
+ if File.exist?(state_file)
62
+ File.delete(state_file)
63
+ puts "[STATE] Deleted #{state_file}"
64
+ end
65
+ end
66
+
67
+ # Create initial state
68
+ def create_initial_state(s3_bucket:, s3_key:, upload_url:, file_size:, chunk_size:)
69
+ puts "[DRIVE] Upload URL: #{upload_url}"
70
+
71
+ state = {
72
+ 's3_bucket' => s3_bucket,
73
+ 's3_key' => s3_key,
74
+ 'upload_url' => upload_url,
75
+ 'file_size' => file_size,
76
+ 'uploaded_bytes' => 0,
77
+ 'chunk_size' => chunk_size,
78
+ 'total_chunks' => (file_size + chunk_size - 1) / chunk_size,
79
+ 'last_chunk_uploaded' => -1,
80
+ 'started_at' => Time.now.utc.iso8601,
81
+ 'last_update' => Time.now.utc.iso8601
82
+ }
83
+
84
+ save(state)
85
+ state
86
+ end
87
+
88
+ # Update progress
89
+ def update_progress(uploaded_bytes, chunk_index)
90
+ raise "State not initialized" if @state.nil?
91
+
92
+ @state['uploaded_bytes'] = uploaded_bytes
93
+ @state['last_chunk_uploaded'] = chunk_index
94
+ save(@state)
95
+ end
96
+ end
@@ -1,33 +1,3 @@
1
- # module MissingConfigError
2
- # class MissingConfigurationError < StandardError; end
3
- # class << self
4
- # attr_accessor :config
5
-
6
- # def validate!
7
- # missing = REQUIRED_KEYS.select do |key|
8
- # config.send(key).nil?
9
- # end
10
-
11
- # return if missing.empty?
12
-
13
- # raise MissingConfigurationError, <<~MSG
14
- # Missing configuration for MyGem: #{missing.join(", ")}
15
-
16
- # 👉 Run:
17
- # rails g my_gem:install
18
-
19
- # Then edit:
20
- # config/initializers/my_gem.rb
21
- # MSG
22
- # end
23
-
24
- # def configure
25
- # config ||= OpenStruct.new
26
- # yield(config)
27
- # end
28
-
29
- # def config
30
- # @config ||= OpenStruct.new
31
- # end
32
- # end
33
- # end
1
+ module S3ToDrive
2
+ class MissingConfigurationError < StandardError; end
3
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module S3ToDrive
4
- VERSION = "0.1.1"
4
+ VERSION = "1.0.0"
5
5
  end
data/lib/s3_to_drive.rb CHANGED
@@ -1,18 +1,19 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require_relative "s3_to_drive/version"
4
+ require_relative "s3_to_drive/exception/missing_config_error"
4
5
  require_relative "s3_to_drive/client/configuration"
5
- require_relative "s3_to_drive/client/client"
6
6
 
7
7
  module S3ToDrive
8
8
  extend Configuration
9
-
9
+
10
10
  def self.new
11
11
  S3ToDrive::Client::Client.new
12
12
  end
13
13
 
14
14
  def self.method_missing(method_name, *args, &block)
15
15
  return super unless new.respond_to?(method_name)
16
+
16
17
  new.send(method_name, *args, &block)
17
18
  end
18
19
 
@@ -20,3 +21,5 @@ module S3ToDrive
20
21
  new.respond_to?(method_name, include_private) || super
21
22
  end
22
23
  end
24
+
25
+ require_relative "s3_to_drive/client/client"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: s3_to_drive
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 1.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - mirabo-hoang-lx
@@ -48,27 +48,15 @@ files:
48
48
  - LICENSE.txt
49
49
  - README.md
50
50
  - Rakefile
51
- - config/initializers/ s3_to_drive.rb
52
- - lib/concerns/transfer_s3_to_drive/.env
53
- - lib/concerns/transfer_s3_to_drive/Dockerfile
54
- - 'lib/concerns/transfer_s3_to_drive/app/__init__.py '
55
- - lib/concerns/transfer_s3_to_drive/app/__pycache__/main.cpython-312.pyc
56
- - lib/concerns/transfer_s3_to_drive/app/config.py
57
- - lib/concerns/transfer_s3_to_drive/app/db.py
58
- - lib/concerns/transfer_s3_to_drive/app/drive_upload.py
59
- - lib/concerns/transfer_s3_to_drive/app/drive_uploader.py
60
- - lib/concerns/transfer_s3_to_drive/app/kidsly-admin.code-workspace
61
- - lib/concerns/transfer_s3_to_drive/app/main.py
62
- - lib/concerns/transfer_s3_to_drive/app/models.py
63
- - lib/concerns/transfer_s3_to_drive/app/state_manager.py
64
- - lib/concerns/transfer_s3_to_drive/docker-compose.yml
65
- - lib/concerns/transfer_s3_to_drive/requirements.txt
66
- - lib/concerns/transfer_s3_to_drive/service_account.json
67
51
  - lib/generators/s3_to_drive/install_generator.rb
68
52
  - lib/generators/s3_to_drive/templates/s3_to_drive.rb
69
53
  - lib/s3_to_drive.rb
70
54
  - lib/s3_to_drive/client/client.rb
71
55
  - lib/s3_to_drive/client/configuration.rb
56
+ - lib/s3_to_drive/client/validation.rb
57
+ - lib/s3_to_drive/concerns/drive_upload.rb
58
+ - lib/s3_to_drive/concerns/drive_uploader.rb
59
+ - lib/s3_to_drive/concerns/state_manager.rb
72
60
  - lib/s3_to_drive/exception/missing_config_error.rb
73
61
  - lib/s3_to_drive/railtie.rb
74
62
  - lib/s3_to_drive/version.rb
@@ -1,6 +0,0 @@
1
- S3ToDrive.configure do |config|
2
- config.aws_access_key_id = "AKIA26557Y2Q66666666"
3
- config.aws_secret_access_key = "AKIA26557Y2Q66666666"
4
- config.aws_region = "AKIA26557Y2Q66666666"
5
- config.bucket_name = "AKIA26557Y2Q66666666"
6
- end