tus-server 1.2.1 → 2.0.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 37df5f3056cadbbdf3b73e78a24894232584f184
4
- data.tar.gz: 6b04e64b37d3f95e4e8f458866313338a97ce060
3
+ metadata.gz: f18da15eb73c98ae5dba91a27fcd0bb139281be0
4
+ data.tar.gz: 534ba0c6d6886e65f35c13e108ae40fb36a3a8de
5
5
  SHA512:
6
- metadata.gz: ac910fc9997db3a4f99b5d56529e16c60cd596e0a29ec7b18bd3c60936c9d31a3662988183382c0ffef5664766eb439de73fad2d02fdd499953580855b9b4b1d
7
- data.tar.gz: 20ae1bbce665211819fc2921a247bb89990f89bf985d25c245cfa7b26970a90eef2cc0ce1631c5567357252e91b45b5f3a2bf19f45f25f51b352c4c85fc1064f
6
+ metadata.gz: fce19c8935054e91852ff8b41449b6de7ddda6c780b9b7c4700311e62dcbc416032ad584c6de814d5cd80a2c726438a562d2d3417e5d586246c3ee55158fa50f
7
+ data.tar.gz: 1d7abdbaa7d83bdbed79bb4073491ba5e2f0ac4c0bf472b9a9022840474eb398bbf9ac9414f541861bd51c9d2fb2eb0e828a541f1ce792cb2497a7c8ba8b3a71
@@ -1,3 +1,21 @@
1
+ ## 2.0.0 (2017-11-13)
2
+
3
+ * Upgrade to Roda 3 (@janko-m)
4
+
5
+ * Remove deprecated support for aws-sdk 2.x in `Tus::Storage::S3` (@janko-m)
6
+
7
+ * Drop official support for MRI 2.1 (@janko-m)
8
+
9
+ * Add generic `Tus::Response` class that storages can use (@janko-m)
10
+
11
+ * Remove `Tus::Response#length` (@janko-m)
12
+
13
+ * Remove deprecated Goliath integration (@janko-m)
14
+
15
+ * Return `400 Bad Request` instead of `404 Not Found` when some partial uploads are missing in a concatenation request (@janko-m)
16
+
17
+ * Use Rack directly instead of Roda's `streaming` plugin for downloding (@janko-m)
18
+
1
19
  ## 1.2.1 (2017-11-05)
2
20
 
3
21
  * Improve communication when handling `aws-sdk 2.x` fallback in `Tus::Storage::S3` (@janko-m)
@@ -1,6 +1,15 @@
1
1
  # frozen-string-literal: true
2
2
 
3
3
  module Tus
4
+ # Generates various checksums for given IO objects. The following algorithms
5
+ # are supported:
6
+ #
7
+ # * SHA1
8
+ # * SHA256
9
+ # * SHA384
10
+ # * SHA512
11
+ # * MD5
12
+ # * CRC32
4
13
  class Checksum
5
14
  CHUNK_SIZE = 16*1024
6
15
 
@@ -1,8 +1,10 @@
1
1
  # frozen-string-literal: true
2
+
2
3
  require "base64"
3
4
  require "time"
4
5
 
5
6
  module Tus
7
+ # Holds request headers and other information about tus uploads.
6
8
  class Info
7
9
  HEADERS = %w[
8
10
  Upload-Length
@@ -1,8 +1,16 @@
1
1
  # frozen-string-literal: true
2
+
3
+ require "tus/input/unicorn"
2
4
  require "tus/errors"
3
5
 
4
6
  module Tus
7
+ # Wrapper around the Rack input, which adds the ability to limit the amount of
8
+ # bytes that will be read from the Rack input. If there are more bytes in the
9
+ # Rack input than the specified limit, a Tus::MaxSizeExceeded exception is
10
+ # raised.
5
11
  class Input
12
+ prepend Tus::Input::Unicorn
13
+
6
14
  def initialize(input, limit: nil)
7
15
  @input = input
8
16
  @limit = limit
@@ -10,16 +18,12 @@ module Tus
10
18
  end
11
19
 
12
20
  def read(length = nil, outbuf = nil)
13
- data = @input.read(length, outbuf)
21
+ data = @input.read(*length, *outbuf)
14
22
 
15
23
  @pos += data.bytesize if data
16
24
  raise MaxSizeExceeded if @limit && @pos > @limit
17
25
 
18
26
  data
19
- rescue => exception
20
- raise unless exception.class.name == "Unicorn::ClientShutdown"
21
- outbuf = outbuf.to_s.clear
22
- outbuf unless length
23
27
  end
24
28
 
25
29
  def pos
@@ -0,0 +1,16 @@
1
+ module Tus
2
+ class Input
3
+ # Extension for Unicorn to gracefully handle interrupted uploads.
4
+ module Unicorn
5
+ # Rescues Unicorn::ClientShutdown exception when reading, and instead of
6
+ # failing just returns blank data to signal end of input.
7
+ def read(length = nil, outbuf = nil)
8
+ super
9
+ rescue => exception
10
+ raise unless exception.class.name == "Unicorn::ClientShutdown"
11
+ outbuf = outbuf.to_s.clear
12
+ outbuf unless length
13
+ end
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,18 @@
1
+ module Tus
2
+ # Object that responds to #each, #length, and #close, suitable for returning
3
+ # as a Rack response body.
4
+ class Response
5
+ def initialize(chunks:, close: ->{})
6
+ @chunks = chunks
7
+ @close = close
8
+ end
9
+
10
+ def each(&block)
11
+ @chunks.each(&block)
12
+ end
13
+
14
+ def close
15
+ @close.call
16
+ end
17
+ end
18
+ end
@@ -32,7 +32,6 @@ module Tus
32
32
  plugin :delete_empty_headers
33
33
  plugin :request_headers
34
34
  plugin :not_allowed
35
- plugin :streaming
36
35
 
37
36
  route do |r|
38
37
  if request.headers["X-HTTP-Method-Override"]
@@ -47,6 +46,7 @@ module Tus
47
46
  validate_tus_resumable! unless request.options? || request.get?
48
47
 
49
48
  r.is ['', true] do
49
+ # OPTIONS /
50
50
  r.options do
51
51
  response.headers.update(
52
52
  "Tus-Version" => SUPPORTED_VERSIONS.join(","),
@@ -58,6 +58,7 @@ module Tus
58
58
  no_content!
59
59
  end
60
60
 
61
+ # POST /
61
62
  r.post do
62
63
  validate_upload_length! unless request.headers["Upload-Concat"].to_s.start_with?("final") || request.headers["Upload-Defer-Length"] == "1"
63
64
  validate_upload_metadata! if request.headers["Upload-Metadata"]
@@ -92,7 +93,8 @@ module Tus
92
93
  end
93
94
  end
94
95
 
95
- r.is ":uid" do |uid|
96
+ r.is String do |uid|
97
+ # OPTIONS /{uid}
96
98
  r.options do
97
99
  response.headers.update(
98
100
  "Tus-Version" => SUPPORTED_VERSIONS.join(","),
@@ -117,6 +119,7 @@ module Tus
117
119
  no_content!
118
120
  end
119
121
 
122
+ # PATCH /{uid}
120
123
  r.patch do
121
124
  if info.defer_length? && request.headers["Upload-Length"]
122
125
  validate_upload_length!
@@ -151,6 +154,7 @@ module Tus
151
154
  no_content!
152
155
  end
153
156
 
157
+ # GET /{uid}
154
158
  r.get do
155
159
  validate_upload_finished!(info)
156
160
  range = handle_range_request!(info.length)
@@ -163,13 +167,12 @@ module Tus
163
167
  response.headers["Content-Type"] = metadata["content_type"] || "application/octet-stream"
164
168
  response.headers
165
169
 
166
- response = storage.get_file(uid, info.to_h, range: range)
170
+ body = storage.get_file(uid, info.to_h, range: range)
167
171
 
168
- stream(callback: ->{response.close}) do |out|
169
- response.each { |chunk| out << chunk }
170
- end
172
+ request.halt response.finish_with_body(body)
171
173
  end
172
174
 
175
+ # DELETE /{uid}
173
176
  r.delete do
174
177
  storage.delete_file(uid, info.to_h)
175
178
 
@@ -178,6 +181,8 @@ module Tus
178
181
  end
179
182
  end
180
183
 
184
+ # Wraps the Rack input (request body) into a Tus::Input object, applying a
185
+ # size limit if one exists.
181
186
  def get_input(info)
182
187
  offset = info.offset
183
188
  total = info.length || max_size
@@ -263,6 +268,7 @@ module Tus
263
268
  end
264
269
  end
265
270
 
271
+ # Validates that each partial upload exists and is marked as one.
266
272
  def validate_partial_uploads!(part_uids)
267
273
  queue = Queue.new
268
274
  part_uids.each { |part_uid| queue << part_uid }
@@ -284,7 +290,7 @@ module Tus
284
290
  error!(400, "One or more uploads were not partial")
285
291
  end
286
292
  rescue Tus::NotFound
287
- error!(404, "One or more partial uploads were not found")
293
+ error!(400, "One or more partial uploads were not found")
288
294
  end
289
295
 
290
296
  def validate_upload_checksum!(input)
@@ -297,7 +303,8 @@ module Tus
297
303
  error!(460, "Upload-Checksum value doesn't match generated checksum") if generated_checksum != checksum
298
304
  end
299
305
 
300
- # "Range" header handling logic copied from Rack::File
306
+ # Handles partial responses requested in the "Range" header. Implementation
307
+ # is mostly copied from Rack::File.
301
308
  def handle_range_request!(length)
302
309
  # we support ranged requests
303
310
  response.headers["Accept-Ranges"] = "bytes"
@@ -1,4 +1,6 @@
1
1
  # frozen-string-literal: true
2
+
3
+ require "tus/response"
2
4
  require "tus/errors"
3
5
 
4
6
  require "pathname"
@@ -10,6 +12,8 @@ module Tus
10
12
  class Filesystem
11
13
  attr_reader :directory
12
14
 
15
+ # Initializes the storage with a directory, in which it will save all
16
+ # files. Creates the directory if it doesn't exist.
13
17
  def initialize(directory, permissions: 0644, directory_permissions: 0755)
14
18
  @directory = Pathname(directory)
15
19
  @permissions = permissions
@@ -18,6 +22,7 @@ module Tus
18
22
  create_directory! unless @directory.exist?
19
23
  end
20
24
 
25
+ # Creates a file for storing uploaded data and a file for storing info.
21
26
  def create_file(uid, info = {})
22
27
  file_path(uid).binwrite("")
23
28
  file_path(uid).chmod(@permissions)
@@ -26,6 +31,11 @@ module Tus
26
31
  info_path(uid).chmod(@permissions)
27
32
  end
28
33
 
34
+ # Concatenates multiple partial uploads into a single upload, and returns
35
+ # the size of the resulting upload. The partial uploads are deleted after
36
+ # concatenation.
37
+ #
38
+ # Raises Tus::Error if any partial upload is missing.
29
39
  def concatenate(uid, part_uids, info = {})
30
40
  create_file(uid, info)
31
41
 
@@ -49,20 +59,28 @@ module Tus
49
59
  file_path(uid).size
50
60
  end
51
61
 
62
+ # Appends data to the specified upload in a streaming fashion, and
63
+ # returns the number of bytes it managed to save.
52
64
  def patch_file(uid, input, info = {})
53
65
  file_path(uid).open("ab") { |file| IO.copy_stream(input, file) }
54
66
  end
55
67
 
68
+ # Returns info of the specified upload. Raises Tus::NotFound if the upload
69
+ # wasn't found.
56
70
  def read_info(uid)
57
71
  raise Tus::NotFound if !file_path(uid).exist?
58
72
 
59
73
  JSON.parse(info_path(uid).binread)
60
74
  end
61
75
 
76
+ # Updates info of the specified upload.
62
77
  def update_info(uid, info)
63
78
  info_path(uid).binwrite(JSON.generate(info))
64
79
  end
65
80
 
81
+ # Returns a Tus::Response object through which data of the specified
82
+ # upload can be retrieved in a streaming fashion. Accepts an optional
83
+ # range parameter for selecting a subset of bytes to retrieve.
66
84
  def get_file(uid, info = {}, range: nil)
67
85
  file = file_path(uid).open("rb")
68
86
  length = range ? range.size : file.size
@@ -81,15 +99,15 @@ module Tus
81
99
  end
82
100
  end
83
101
 
84
- # We return a response object that responds to #each, #length and #close,
85
- # which the tus server can return directly as the Rack response.
86
- Response.new(chunks: chunks, length: length, close: -> { file.close })
102
+ Tus::Response.new(chunks: chunks, close: file.method(:close))
87
103
  end
88
104
 
105
+ # Deletes data and info files for the specified upload.
89
106
  def delete_file(uid, info = {})
90
107
  delete([uid])
91
108
  end
92
109
 
110
+ # Deletes data and info files of uploads older than the specified date.
93
111
  def expire_files(expiration_date)
94
112
  uids = directory.children
95
113
  .select { |pathname| pathname.mtime <= expiration_date }
@@ -118,26 +136,6 @@ module Tus
118
136
  directory.mkpath
119
137
  directory.chmod(@directory_permissions)
120
138
  end
121
-
122
- class Response
123
- def initialize(chunks:, close:, length:)
124
- @chunks = chunks
125
- @close = close
126
- @length = length
127
- end
128
-
129
- def length
130
- @length
131
- end
132
-
133
- def each(&block)
134
- @chunks.each(&block)
135
- end
136
-
137
- def close
138
- @close.call
139
- end
140
- end
141
139
  end
142
140
  end
143
141
  end
@@ -1,7 +1,9 @@
1
1
  # frozen-string-literal: true
2
+
2
3
  require "mongo"
3
4
 
4
5
  require "tus/info"
6
+ require "tus/response"
5
7
  require "tus/errors"
6
8
 
7
9
  require "digest"
@@ -13,6 +15,7 @@ module Tus
13
15
 
14
16
  attr_reader :client, :prefix, :bucket, :chunk_size
15
17
 
18
+ # Initializes the GridFS storage and creates necessary indexes.
16
19
  def initialize(client:, prefix: "fs", chunk_size: 256*1024)
17
20
  @client = client
18
21
  @prefix = prefix
@@ -22,6 +25,7 @@ module Tus
22
25
  @bucket.send(:ensure_indexes!)
23
26
  end
24
27
 
28
+ # Creates a file for the specified upload.
25
29
  def create_file(uid, info = {})
26
30
  content_type = Tus::Info.new(info).metadata["content_type"]
27
31
 
@@ -31,6 +35,15 @@ module Tus
31
35
  )
32
36
  end
33
37
 
38
+ # Concatenates multiple partial uploads into a single upload, and returns
39
+ # the size of the resulting upload. The partial uploads are deleted after
40
+ # concatenation.
41
+ #
42
+ # It concatenates by updating partial upload's GridFS chunks to point to
43
+ # the new upload.
44
+ #
45
+ # Raises Tus::Error if GridFS chunks of partial uploads don't exist or
46
+ # aren't completely filled.
34
47
  def concatenate(uid, part_uids, info = {})
35
48
  grid_infos = files_collection.find(filename: {"$in" => part_uids}).to_a
36
49
  grid_infos.sort_by! { |grid_info| part_uids.index(grid_info[:filename]) }
@@ -65,14 +78,25 @@ module Tus
65
78
  length
66
79
  end
67
80
 
81
+ # Appends data to the specified upload in a streaming fashion, and
82
+ # returns the number of bytes it managed to save.
83
+ #
84
+ # It does so by reading the input data in batches of chunks, creating a
85
+ # new GridFS chunk for each chunk of data and appending it to the
86
+ # existing list.
68
87
  def patch_file(uid, input, info = {})
69
88
  grid_info = files_collection.find(filename: uid).first
70
89
  current_length = grid_info[:length]
71
90
  chunk_size = grid_info[:chunkSize]
72
91
  bytes_saved = 0
73
92
 
93
+ # It's possible that the previous data append didn't fill in the last
94
+ # GridFS chunk completely, so we fill in that gap now before creating
95
+ # new GridFS chunks.
74
96
  bytes_saved += patch_last_chunk(input, grid_info) if current_length % chunk_size != 0
75
97
 
98
+ # Create an Enumerator which yields chunks of input data which have the
99
+ # size of the configured :chunkSize of the GridFS file.
76
100
  chunks_enumerator = Enumerator.new do |yielder|
77
101
  while (data = input.read(chunk_size))
78
102
  yielder << data
@@ -82,6 +106,9 @@ module Tus
82
106
  chunks_in_batch = (BATCH_SIZE.to_f / chunk_size).ceil
83
107
  chunks_offset = chunks_collection.count(files_id: grid_info[:_id]) - 1
84
108
 
109
+ # Iterate in batches of data chunks and bulk-insert new GridFS chunks.
110
+ # This way we try to have a balance between bulking inserts and keeping
111
+ # memory usage low.
85
112
  chunks_enumerator.each_slice(chunks_in_batch) do |chunks|
86
113
  grid_chunks = chunks.map do |data|
87
114
  Mongo::Grid::File::Chunk.new(
@@ -107,23 +134,24 @@ module Tus
107
134
  bytes_saved
108
135
  end
109
136
 
137
+ # Returns info of the specified upload. Raises Tus::NotFound if the upload
138
+ # wasn't found.
110
139
  def read_info(uid)
111
140
  grid_info = files_collection.find(filename: uid).first or raise Tus::NotFound
112
-
113
141
  grid_info[:metadata]
114
142
  end
115
143
 
144
+ # Updates info of the specified upload.
116
145
  def update_info(uid, info)
117
- grid_info = files_collection.find(filename: uid).first
118
-
119
146
  files_collection.update_one({filename: uid}, {"$set" => {metadata: info}})
120
147
  end
121
148
 
149
+ # Returns a Tus::Response object through which data of the specified
150
+ # upload can be retrieved in a streaming fashion. Accepts an optional
151
+ # range parameter for selecting a subset of bytes we want to retrieve.
122
152
  def get_file(uid, info = {}, range: nil)
123
153
  grid_info = files_collection.find(filename: uid).first
124
154
 
125
- length = range ? range.size : grid_info[:length]
126
-
127
155
  filter = { files_id: grid_info[:_id] }
128
156
 
129
157
  if range
@@ -166,16 +194,16 @@ module Tus
166
194
  end
167
195
  end
168
196
 
169
- # We return a response object that responds to #each, #length and #close,
170
- # which the tus server can return directly as the Rack response.
171
- Response.new(chunks: chunks, length: length, close: ->{chunks_view.close_query})
197
+ Tus::Response.new(chunks: chunks, close: chunks_view.method(:close_query))
172
198
  end
173
199
 
200
+ # Deletes the GridFS file and chunks for the specified upload.
174
201
  def delete_file(uid, info = {})
175
202
  grid_info = files_collection.find(filename: uid).first
176
203
  bucket.delete(grid_info[:_id]) if grid_info
177
204
  end
178
205
 
206
+ # Deletes GridFS file and chunks of uploads older than the specified date.
179
207
  def expire_files(expiration_date)
180
208
  grid_infos = files_collection.find(uploadDate: {"$lte" => expiration_date}).to_a
181
209
  grid_info_ids = grid_infos.map { |info| info[:_id] }
@@ -186,15 +214,18 @@ module Tus
186
214
 
187
215
  private
188
216
 
217
+ # Creates a GridFS file.
189
218
  def create_grid_file(**options)
190
- file_options = {metadata: {}, chunk_size: chunk_size}.merge(options)
191
- grid_file = Mongo::Grid::File.new("", file_options)
219
+ grid_file = Mongo::Grid::File.new("", metadata: {}, chunk_size: chunk_size, **options)
192
220
 
193
221
  bucket.insert_one(grid_file)
194
222
 
195
223
  grid_file
196
224
  end
197
225
 
226
+ # If the last GridFS chunk of the file is incomplete (meaning it's smaller
227
+ # than the configured :chunkSize of the GridFS file), fills the missing
228
+ # data by reading a chunk of the input data.
198
229
  def patch_last_chunk(input, grid_info)
199
230
  last_chunk = chunks_collection.find(files_id: grid_info[:_id]).sort(n: -1).limit(1).first
200
231
  data = last_chunk[:data].data
@@ -210,17 +241,21 @@ module Tus
210
241
  patch.bytesize
211
242
  end
212
243
 
244
+ # Validates that GridFS files of partial uploads are suitable for
245
+ # concatentation.
213
246
  def validate_parts!(grid_infos, part_uids)
214
247
  validate_parts_presence!(grid_infos, part_uids)
215
248
  validate_parts_full_chunks!(grid_infos)
216
249
  end
217
250
 
251
+ # Validates that each partial upload has a corresponding GridFS file.
218
252
  def validate_parts_presence!(grid_infos, part_uids)
219
253
  if grid_infos.count != part_uids.count
220
254
  raise Tus::Error, "some parts for concatenation are missing"
221
255
  end
222
256
  end
223
257
 
258
+ # Validates that GridFS chunks of each file are filled completely.
224
259
  def validate_parts_full_chunks!(grid_infos)
225
260
  grid_infos.each do |grid_info|
226
261
  if grid_info[:length] % grid_info[:chunkSize] != 0 && grid_info != grid_infos.last
@@ -236,26 +271,6 @@ module Tus
236
271
  def chunks_collection
237
272
  bucket.chunks_collection
238
273
  end
239
-
240
- class Response
241
- def initialize(chunks:, close:, length:)
242
- @chunks = chunks
243
- @close = close
244
- @length = length
245
- end
246
-
247
- def length
248
- @length
249
- end
250
-
251
- def each(&block)
252
- @chunks.each(&block)
253
- end
254
-
255
- def close
256
- @close.call
257
- end
258
- end
259
274
  end
260
275
  end
261
276
  end
@@ -1,21 +1,12 @@
1
1
  # frozen-string-literal: true
2
2
 
3
- begin
4
- require "aws-sdk-s3"
5
- if Gem::Version.new(Aws::S3::GEM_VERSION) < Gem::Version.new("1.2.0")
6
- raise "Tus::Storage::S3 requires aws-sdk-s3 version 1.2.0 or above"
7
- end
8
- rescue LoadError => exception
9
- begin
10
- require "aws-sdk"
11
- warn "Using aws-sdk 2.x is deprecated and support for it will be removed in tus-server 2.0, use the new aws-sdk-s3 gem instead."
12
- Aws.eager_autoload!(services: ["S3"])
13
- rescue LoadError
14
- raise exception
15
- end
3
+ require "aws-sdk-s3"
4
+ if Gem::Version.new(Aws::S3::GEM_VERSION) < Gem::Version.new("1.2.0")
5
+ raise "Tus::Storage::S3 requires aws-sdk-s3 version 1.2.0 or above"
16
6
  end
17
7
 
18
8
  require "tus/info"
9
+ require "tus/response"
19
10
  require "tus/errors"
20
11
 
21
12
  require "json"
@@ -30,6 +21,7 @@ module Tus
30
21
 
31
22
  attr_reader :client, :bucket, :prefix, :upload_options
32
23
 
24
+ # Initializes an aws-sdk-s3 client with the given credentials.
33
25
  def initialize(bucket:, prefix: nil, upload_options: {}, thread_count: 10, **client_options)
34
26
  resource = Aws::S3::Resource.new(**client_options)
35
27
 
@@ -40,6 +32,8 @@ module Tus
40
32
  @thread_count = thread_count
41
33
  end
42
34
 
35
+ # Initiates multipart upload for the given upload, and stores its
36
+ # information inside the info hash.
43
37
  def create_file(uid, info = {})
44
38
  tus_info = Tus::Info.new(info)
45
39
 
@@ -63,6 +57,15 @@ module Tus
63
57
  multipart_upload
64
58
  end
65
59
 
60
+ # Concatenates multiple partial uploads into a single upload, and returns
61
+ # the size of the resulting upload. The partial uploads are deleted after
62
+ # concatenation.
63
+ #
64
+ # Internally it creates a new multipart upload, copies objects of the
65
+ # given partial uploads into multipart parts, and finalizes the multipart
66
+ # upload.
67
+ #
68
+ # The multipart upload is automatically aborted in case of an exception.
66
69
  def concatenate(uid, part_uids, info = {})
67
70
  multipart_upload = create_file(uid, info)
68
71
 
@@ -83,6 +86,18 @@ module Tus
83
86
  raise error
84
87
  end
85
88
 
89
+ # Appends data to the specified upload in a streaming fashion, and returns
90
+ # the number of bytes it managed to save.
91
+ #
92
+ # The data read from the input is first buffered in memory, and once 5MB
93
+ # (AWS S3's mininum allowed size for a multipart part) or more data has
94
+ # been retrieved, it starts being uploaded in a background thread as the
95
+ # next multipart part. This allows us to start reading the next chunk of
96
+ # input data and soon as possible, achieving streaming.
97
+ #
98
+ # If any network error is raised during the upload to S3, the upload of
99
+ # further input data stops and the number of bytes that manged to get
100
+ # uploaded is returned.
86
101
  def patch_file(uid, input, info = {})
87
102
  tus_info = Tus::Info.new(info)
88
103
 
@@ -129,6 +144,8 @@ module Tus
129
144
  bytes_uploaded
130
145
  end
131
146
 
147
+ # Completes the multipart upload using the part information saved in the
148
+ # info hash.
132
149
  def finalize_file(uid, info = {})
133
150
  upload_id = info["multipart_id"]
134
151
  parts = info["multipart_parts"].map do |part|
@@ -142,6 +159,8 @@ module Tus
142
159
  info.delete("multipart_parts")
143
160
  end
144
161
 
162
+ # Returns info of the specified upload. Raises Tus::NotFound if the upload
163
+ # wasn't found.
145
164
  def read_info(uid)
146
165
  response = object("#{uid}.info").get
147
166
  JSON.parse(response.body.string)
@@ -149,22 +168,26 @@ module Tus
149
168
  raise Tus::NotFound
150
169
  end
151
170
 
171
+ # Updates info of the specified upload.
152
172
  def update_info(uid, info)
153
173
  object("#{uid}.info").put(body: info.to_json)
154
174
  end
155
175
 
176
+ # Returns a Tus::Response object through which data of the specified
177
+ # upload can be retrieved in a streaming fashion. Accepts an optional
178
+ # range parameter for selecting a subset of bytes to retrieve.
156
179
  def get_file(uid, info = {}, range: nil)
157
180
  tus_info = Tus::Info.new(info)
158
181
 
159
- length = range ? range.size : tus_info.length
160
182
  range = "bytes=#{range.begin}-#{range.end}" if range
161
183
  chunks = object(uid).enum_for(:get, range: range)
162
184
 
163
- # We return a response object that responds to #each, #length and #close,
164
- # which the tus server can return directly as the Rack response.
165
- Response.new(chunks: chunks, length: length)
185
+ Tus::Response.new(chunks: chunks)
166
186
  end
167
187
 
188
+ # Deletes resources for the specified upload. If multipart upload is
189
+ # still in progress, aborts the multipart upload, otherwise deletes the
190
+ # object.
168
191
  def delete_file(uid, info = {})
169
192
  if info["multipart_id"]
170
193
  multipart_upload = object(uid).multipart_upload(info["multipart_id"])
@@ -176,6 +199,9 @@ module Tus
176
199
  end
177
200
  end
178
201
 
202
+ # Deletes resources of uploads older than the specified date. For
203
+ # multipart uploads still in progress, it checks the upload date of the
204
+ # last multipart part.
179
205
  def expire_files(expiration_date)
180
206
  old_objects = bucket.objects.select do |object|
181
207
  object.last_modified <= expiration_date
@@ -196,10 +222,15 @@ module Tus
196
222
 
197
223
  private
198
224
 
225
+ # Spawns a thread which uploads given body as a new multipart part with
226
+ # the specified part number to the specified multipart upload.
199
227
  def upload_part_thread(body, key, upload_id, part_number)
200
228
  Thread.new { upload_part(body, key, upload_id, part_number) }
201
229
  end
202
230
 
231
+ # Uploads given body as a new multipart part with the specified part
232
+ # number to the specified multipart upload. Returns part number and ETag
233
+ # that will be required later for completing the multipart upload.
203
234
  def upload_part(body, key, upload_id, part_number)
204
235
  multipart_upload = object(key).multipart_upload(upload_id)
205
236
  multipart_part = multipart_upload.part(part_number)
@@ -229,6 +260,9 @@ module Tus
229
260
  # multipart upload was successfully aborted or doesn't exist
230
261
  end
231
262
 
263
+ # Creates multipart parts for the specified multipart upload by copying
264
+ # given objects into them. It uses a queue and a fixed-size thread pool
265
+ # which consumes that queue.
232
266
  def copy_parts(objects, multipart_upload)
233
267
  parts = compute_parts(objects, multipart_upload)
234
268
  queue = parts.inject(Queue.new) { |queue, part| queue << part }
@@ -238,6 +272,7 @@ module Tus
238
272
  threads.flat_map(&:value).sort_by { |part| part["part_number"] }
239
273
  end
240
274
 
275
+ # Computes data required for copying objects into new multipart parts.
241
276
  def compute_parts(objects, multipart_upload)
242
277
  objects.map.with_index do |object, idx|
243
278
  {
@@ -250,6 +285,8 @@ module Tus
250
285
  end
251
286
  end
252
287
 
288
+ # Consumes the queue for new multipart part information and issues the
289
+ # copy requests.
253
290
  def copy_part_thread(queue)
254
291
  Thread.new do
255
292
  begin
@@ -266,50 +303,19 @@ module Tus
266
303
  end
267
304
  end
268
305
 
306
+ # Creates a new multipart part by copying the object specified in the
307
+ # given data. Returns part number and ETag that will be required later
308
+ # for completing the multipart upload.
269
309
  def copy_part(part)
270
310
  response = client.upload_part_copy(part)
271
311
 
272
312
  { "part_number" => part[:part_number], "etag" => response.copy_part_result.etag }
273
313
  end
274
314
 
315
+ # Retuns an Aws::S3::Object with the prefix applied.
275
316
  def object(key)
276
317
  bucket.object([*prefix, key].join("/"))
277
318
  end
278
-
279
- class Response
280
- def initialize(chunks:, length:)
281
- @chunks = chunks
282
- @length = length
283
- end
284
-
285
- def length
286
- @length
287
- end
288
-
289
- def each
290
- return enum_for(__method__) unless block_given?
291
-
292
- while (chunk = chunks_fiber.resume)
293
- yield chunk
294
- end
295
- end
296
-
297
- def close
298
- chunks_fiber.resume(:close) if chunks_fiber.alive?
299
- end
300
-
301
- private
302
-
303
- def chunks_fiber
304
- @chunks_fiber ||= Fiber.new do
305
- @chunks.each do |chunk|
306
- action = Fiber.yield chunk
307
- break if action == :close
308
- end
309
- nil
310
- end
311
- end
312
- end
313
319
  end
314
320
  end
315
321
  end
@@ -1,8 +1,8 @@
1
1
  Gem::Specification.new do |gem|
2
2
  gem.name = "tus-server"
3
- gem.version = "1.2.1"
3
+ gem.version = "2.0.0"
4
4
 
5
- gem.required_ruby_version = ">= 2.1"
5
+ gem.required_ruby_version = ">= 2.2"
6
6
 
7
7
  gem.summary = "Ruby server implementation of tus.io, the open protocol for resumable file uploads."
8
8
 
@@ -14,7 +14,7 @@ Gem::Specification.new do |gem|
14
14
  gem.files = Dir["README.md", "LICENSE.txt", "CHANGELOG.md", "lib/**/*.rb", "*.gemspec"]
15
15
  gem.require_path = "lib"
16
16
 
17
- gem.add_dependency "roda", "~> 2.19"
17
+ gem.add_dependency "roda", "~> 3.0"
18
18
 
19
19
  gem.add_development_dependency "rake", "~> 11.1"
20
20
  gem.add_development_dependency "minitest", "~> 5.8"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tus-server
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.1
4
+ version: 2.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Janko Marohnić
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-11-05 00:00:00.000000000 Z
11
+ date: 2017-11-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: roda
@@ -16,14 +16,14 @@ dependencies:
16
16
  requirements:
17
17
  - - "~>"
18
18
  - !ruby/object:Gem::Version
19
- version: '2.19'
19
+ version: '3.0'
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
- version: '2.19'
26
+ version: '3.0'
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: rake
29
29
  requirement: !ruby/object:Gem::Requirement
@@ -123,8 +123,9 @@ files:
123
123
  - lib/tus/errors.rb
124
124
  - lib/tus/info.rb
125
125
  - lib/tus/input.rb
126
+ - lib/tus/input/unicorn.rb
127
+ - lib/tus/response.rb
126
128
  - lib/tus/server.rb
127
- - lib/tus/server/goliath.rb
128
129
  - lib/tus/storage/filesystem.rb
129
130
  - lib/tus/storage/gridfs.rb
130
131
  - lib/tus/storage/s3.rb
@@ -141,7 +142,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
141
142
  requirements:
142
143
  - - ">="
143
144
  - !ruby/object:Gem::Version
144
- version: '2.1'
145
+ version: '2.2'
145
146
  required_rubygems_version: !ruby/object:Gem::Requirement
146
147
  requirements:
147
148
  - - ">="
@@ -1,71 +0,0 @@
1
- # frozen-string-literal: true
2
- require "tus/server"
3
- require "goliath"
4
-
5
- warn "Tus::Server::Goliath has been deprecated in favor of goliath-rack_proxy -- https://github.com/janko-m/goliath-rack_proxy"
6
-
7
- class Tus::Server::Goliath < Goliath::API
8
- # Called as soon as request headers are parsed.
9
- def on_headers(env, headers)
10
- # the write end of the pipe is written in #on_body, and the read end is read by Tus::Server
11
- env["tus.input-reader"], env["tus.input-writer"] = IO.pipe
12
- # use a thread so that request is being processed in parallel
13
- env["tus.request-thread"] = Thread.new do
14
- call_tus_server env.merge("rack.input" => env["tus.input-reader"])
15
- end
16
- end
17
-
18
- # Called on each request body chunk received from the client.
19
- def on_body(env, data)
20
- # append data to the write end of the pipe if open, otherwise do nothing
21
- env["tus.input-writer"].write(data) unless env["tus.input-writer"].closed?
22
- rescue Errno::EPIPE
23
- # read end of the pipe has been closed, so we close the write end as well
24
- env["tus.input-writer"].close
25
- end
26
-
27
- # Called at the end of the request (after #response is called), but also on
28
- # client disconnect (in which case #response isn't called), so we want to do
29
- # the same finalization in both methods.
30
- def on_close(env)
31
- finalize(env)
32
- end
33
-
34
- # Called after all the data has been received from the client.
35
- def response(env)
36
- status, headers, body = finalize(env)
37
-
38
- env[STREAM_START].call(status, headers)
39
-
40
- operation = proc { body.each { |chunk| env.stream_send(chunk) } }
41
- callback = proc { env.stream_close }
42
-
43
- EM.defer(operation, callback) # use an outside thread pool for streaming
44
-
45
- nil
46
- end
47
-
48
- private
49
-
50
- # Calls the actual Roda application with the slightly modified env hash.
51
- def call_tus_server(env)
52
- Tus::Server.call env.merge(
53
- "rack.url_scheme" => (env["options"][:ssl] ? "https" : "http"), # https://github.com/postrank-labs/goliath/issues/210
54
- "async.callback" => nil, # prevent Roda from calling EventMachine when streaming
55
- )
56
- end
57
-
58
- # This method needs to be idempotent, because it can be called twice (on
59
- # normal requests both #response and #on_close will be called, and on client
60
- # disconnect only #on_close will be called).
61
- def finalize(env)
62
- # closing the write end of the pipe will mark EOF on the read end
63
- env["tus.input-writer"].close unless env["tus.input-writer"].closed?
64
- # wait for the request to finish
65
- result = env["tus.request-thread"].value
66
- # close read end of the pipe, since nothing is going to read from it anymore
67
- env["tus.input-reader"].close unless env["tus.input-reader"].closed?
68
- # return rack response
69
- result
70
- end
71
- end