filestack 2.7.0 → 2.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 36cd3251f64e64ffcdb9d42c73cbf6d49bcda8a2533cb8053e0087229a077bf2
4
- data.tar.gz: f2e38e698557c0b336c332be5b68775fe2624e046a56e1c521717554f23b6495
3
+ metadata.gz: 8293c3bf008ad994166882a2029f0936ee64f4c070ef41040d2c97f492f37cb4
4
+ data.tar.gz: 7749e600bce0dc504a943553ea6e807a5ed4aab8f43813a2cc62fd78d61905ff
5
5
  SHA512:
6
- metadata.gz: 26a1b21b1ad69ae42708e94323e28d7656881824e668677b0dc27a4bb13a035ed6f5b104f90615a0e53bf3b454621fa3a3e129a6a412eed15dd7271f33100f84
7
- data.tar.gz: 4d2c6c3e96da48df28e067f6990e3313669ffb2cf0f33f62964db5f708b42b0818c9992cec713c0b70acc6db72425bb727545726a1757c9c64c9dc7addcf6090
6
+ metadata.gz: f44323a268793efb45679967fbeafe880ab3b21299744c5ab545d066bfe72007af5673d4e750e789d400469ae6e3888ba58459f8f5d41713d31d527bdc7f4147
7
+ data.tar.gz: 01d18cbf962f1548b8cdca6db43d15705d07c15647c638710eaf90d9bba419e315f2fc39ef15ee48bc0e247f4cf7cbcd0bb094525413ded7d4cf341584e90cec
@@ -1,5 +1,8 @@
1
1
  # Filestack-Ruby Changelog
2
2
 
3
+ ## 2.8.0 (September 29, 2020)
4
+ - Add IO object upload
5
+
3
6
  ## 2.7.0 (September 28, 2020)
4
7
  - Add workflows
5
8
 
data/README.md CHANGED
@@ -55,13 +55,33 @@ filelink = client.upload(filepath: '/path/to/localfile')
55
55
  # OR
56
56
 
57
57
  filelink = client.upload(external_url: 'http://domain.com/image.png')
58
+
59
+ # OR
60
+
61
+ file = StringIO.new
62
+ filelink = client.upload(io: file)
58
63
  ```
59
64
 
60
- To upload a local and an external file with query parameters:
65
+ To upload a local, an IO object and an external file with following optional options:
66
+
61
67
  ```ruby
62
- filelink = client.upload(filepath: '/path/to/localfile', options: { mimetype: 'image/png' })
68
+ options = {
69
+ filename: 'string',
70
+ location: 'string',
71
+ path: 'string',
72
+ container: 'string',
73
+ mimetype: 'string',
74
+ region: 'string',
75
+ workflows: ['workflow-id-1', 'workflow-id-2'],
76
+ upload_tags: {
77
+ key: 'value',
78
+ key2: 'value'
79
+ }
80
+ }
81
+
82
+ filelink = client.upload(filepath: '/path/to/localfile', options: { mimetype: 'image/png', filename: 'custom_filename.png' })
63
83
 
64
- filelink = client.upload(external_url: 'http://domain.com/image.png', options: { mimetype: 'image/jpeg' })
84
+ filelink = client.upload(external_url: 'http://domain.com/image.png', options: { mimetype: 'image/jpeg', filename: 'custom_filename.png' })
65
85
  ```
66
86
 
67
87
  To store file on `dropbox`, `azure`, `gcs` or `rackspace`, you must have the chosen provider configured in the developer portal to enable this feature. By default the file is stored on `s3`. You can add more details of the storage in `options`.
data/VERSION CHANGED
@@ -1 +1 @@
1
- 2.7.0
1
+ 2.8.0
@@ -22,6 +22,8 @@ class FilestackConfig
22
22
  'Accept-Encoding' => "application/json"
23
23
  }.freeze
24
24
 
25
+ DEFAULT_UPLOAD_MIMETYPE = 'application/octet-stream'
26
+
25
27
  INTELLIGENT_ERROR_MESSAGES = ['BACKEND_SERVER', 'BACKEND_NETWORK', 'S3_SERVER', 'S3_NETWORK']
26
28
 
27
29
  def self.multipart_start_url
@@ -23,23 +23,25 @@ class FilestackClient
23
23
  @security = security
24
24
  end
25
25
 
26
- # Upload a local file or external url
26
+ # Upload a local file, external url or IO object
27
27
  # @param [String] filepath The path of a local file
28
28
  # @param [String] external_url An external URL
29
+ # @param [StringIO] io The IO object
29
30
  # @param [Hash] options User-supplied upload options
31
+ # @param [Boolean] intelligent Upload file using Filestack Intelligent Ingestion
32
+ # @param [String] storage Default storage to be used for uploads
30
33
  #
31
34
  # return [Filestack::FilestackFilelink]
32
- def upload(filepath: nil, external_url: nil, options: {}, intelligent: false, timeout: 60, storage: 'S3')
33
- return 'You cannot upload a URL and file at the same time' if filepath && external_url
35
+ def upload(filepath: nil, external_url: nil, io: nil, options: {}, intelligent: false, timeout: 60, storage: 'S3')
36
+ return 'You cannot upload a URL and file at the same time' if (filepath || io) && external_url
34
37
 
35
- response = if filepath
36
- multipart_upload(@apikey, filepath, @security, options, timeout, storage, intelligent: intelligent)
38
+ response = if external_url
39
+ send_upload(@apikey, external_url, @security, options)
37
40
  else
38
- send_upload(@apikey,
39
- external_url: external_url,
40
- options: options,
41
- security: @security)
41
+ return 'You cannot upload IO object and file at the same time' if io && filepath
42
+ multipart_upload(@apikey, filepath, io, @security, options, timeout, storage, intelligent)
42
43
  end
44
+
43
45
  FilestackFilelink.new(response['handle'], security: @security, apikey: @apikey)
44
46
  end
45
47
  # Transform an external URL
@@ -1,5 +1,5 @@
1
1
  module Filestack
2
2
  module Ruby
3
- VERSION = '2.7.0'.freeze
3
+ VERSION = '2.8.0'.freeze
4
4
  end
5
5
  end
@@ -13,13 +13,22 @@ include UploadUtils
13
13
  include IntelligentUtils
14
14
  # Includes all the utility functions for Filestack multipart uploads
15
15
  module MultipartUploadUtils
16
- def get_file_info(file)
17
- filename = File.basename(file)
16
+
17
+ def get_file_attributes(file, options = {})
18
+ filename = options[:filename] || File.basename(file)
19
+ mimetype = options[:mimetype] || MimeMagic.by_magic(File.open(file)) || FilestackConfig::DEFAULT_UPLOAD_MIMETYPE
18
20
  filesize = File.size(file)
19
- mimetype = MimeMagic.by_magic(File.open(file))
20
- if mimetype.nil?
21
- mimetype = 'application/octet-stream'
22
- end
21
+
22
+ [filename, filesize, mimetype.to_s]
23
+ end
24
+
25
+ def get_io_attributes(io, options = {})
26
+ filename = options[:filename] || 'unnamed_file'
27
+ mimetype = options[:mimetype] || FilestackConfig::DEFAULT_UPLOAD_MIMETYPE
28
+
29
+ io.seek(0, IO::SEEK_END)
30
+ filesize = io.tell
31
+
23
32
  [filename, filesize, mimetype.to_s]
24
33
  end
25
34
 
@@ -31,8 +40,10 @@ module MultipartUploadUtils
31
40
  # @param [String] mimetype Mimetype of incoming file
32
41
  # @param [FilestackSecurity] security Security object with
33
42
  # policy/signature
43
+ # @param [String] storage Default storage to be used for uploads
34
44
  # @param [Hash] options User-defined options for
35
45
  # multipart uploads
46
+ # @param [Bool] intelligent Upload file using Filestack Intelligent Ingestion
36
47
  #
37
48
  # @return [Typhoeus::Response]
38
49
  def multipart_start(apikey, filename, filesize, mimetype, security, storage, options = {}, intelligent)
@@ -67,22 +78,21 @@ module MultipartUploadUtils
67
78
  #
68
79
  # @param [String] apikey Filestack API key
69
80
  # @param [String] filename Name of incoming file
70
- # @param [String] filepath Local path to file
71
81
  # @param [Int] filesize Size of incoming file
72
82
  # @param [Typhoeus::Response] start_response Response body from
73
83
  # multipart_start
84
+ # @param [String] storage Default storage to be used for uploads
74
85
  # @param [Hash] options User-defined options for
75
86
  # multipart uploads
76
87
  #
77
88
  # @return [Array]
78
- def create_upload_jobs(apikey, filename, filepath, filesize, start_response, storage, options)
89
+ def create_upload_jobs(apikey, filename, filesize, start_response, storage, options)
79
90
  jobs = []
80
91
  part = 1
81
92
  seek_point = 0
82
93
  while seek_point < filesize
83
94
  part_info = {
84
95
  seek_point: seek_point,
85
- filepath: filepath,
86
96
  filename: filename,
87
97
  apikey: apikey,
88
98
  part: part,
@@ -92,7 +102,7 @@ module MultipartUploadUtils
92
102
  upload_id: start_response['upload_id'],
93
103
  location_url: start_response['location_url'],
94
104
  start_response: start_response,
95
- store: { location: storage }
105
+ store: { location: storage },
96
106
  }
97
107
 
98
108
  part_info[:store].merge!(options) if options
@@ -116,15 +126,16 @@ module MultipartUploadUtils
116
126
  # @param [Hash] job Hash of options needed
117
127
  # to upload a chunk
118
128
  # @param [String] apikey Filestack API key
119
- # @param [String] location_url Location url given back
129
+ # @param [String] filepath Location url given back
120
130
  # from endpoint
121
- # @param [String] filepath Local path to file
131
+ # @param [StringIO] io The IO object
122
132
  # @param [Hash] options User-defined options for
123
133
  # multipart uploads
134
+ # @param [String] storage Default storage to be used for uploads
124
135
  #
125
136
  # @return [Typhoeus::Response]
126
- def upload_chunk(job, apikey, filepath, options, storage)
127
- file = File.open(filepath)
137
+ def upload_chunk(job, apikey, filepath, io, options, storage)
138
+ file = filepath ? File.open(filepath) : io
128
139
  file.seek(job[:seek_point])
129
140
  chunk = file.read(FilestackConfig::DEFAULT_CHUNK_SIZE)
130
141
 
@@ -139,7 +150,6 @@ module MultipartUploadUtils
139
150
  region: job[:region],
140
151
  upload_id: job[:upload_id],
141
152
  store: { location: storage },
142
- file: Tempfile.new(job[:filename])
143
153
  }
144
154
  data = data.merge!(options) if options
145
155
 
@@ -158,13 +168,14 @@ module MultipartUploadUtils
158
168
  # @param [String] filepath Local path to file
159
169
  # @param [Hash] options User-defined options for
160
170
  # multipart uploads
171
+ # @param [String] storage Default storage to be used for uploads
161
172
  #
162
173
  # @return [Array] Array of parts/etags strings
163
- def run_uploads(jobs, apikey, filepath, options, storage)
174
+ def run_uploads(jobs, apikey, filepath, io, options, storage)
164
175
  bar = ProgressBar.new(jobs.length)
165
176
  results = Parallel.map(jobs, in_threads: 4) do |job|
166
177
  response = upload_chunk(
167
- job, apikey, filepath, options, storage
178
+ job, apikey, filepath, io, options, storage
168
179
  )
169
180
  if response.code == 200
170
181
  bar.increment!
@@ -190,6 +201,8 @@ module MultipartUploadUtils
190
201
  # part numbers
191
202
  # @param [Hash] options User-defined options for
192
203
  # multipart uploads
204
+ # @param [String] storage Default storage to be used for uploads
205
+ # @param [Boolean] intelligent Upload file using Filestack Intelligent Ingestion
193
206
  #
194
207
  # @return [Typhoeus::Response]
195
208
  def multipart_complete(apikey, filename, filesize, mimetype, start_response, parts_and_etags, options, storage, intelligent = false)
@@ -215,32 +228,39 @@ module MultipartUploadUtils
215
228
  #
216
229
  # @param [String] apikey Filestack API key
217
230
  # @param [String] filename Name of incoming file
231
+ # @param [StringIO] io The IO object
218
232
  # @param [FilestackSecurity] security Security object with
219
233
  # policy/signature
220
234
  # @param [Hash] options User-defined options for
221
235
  # multipart uploads
236
+ # @param [String] storage Default storage to be used for uploads
237
+ # @param [Boolean] intelligent Upload file using Filestack Intelligent Ingestion
222
238
  #
223
239
  # @return [Hash]
224
- def multipart_upload(apikey, filepath, security, options, timeout, storage, intelligent: false)
225
- filename, filesize, mimetype = get_file_info(filepath)
240
+ def multipart_upload(apikey, filepath, io, security, options, timeout, storage, intelligent = false)
241
+ filename, filesize, mimetype = if filepath
242
+ get_file_attributes(filepath, options)
243
+ else
244
+ get_io_attributes(io, options)
245
+ end
226
246
 
227
247
  start_response = multipart_start(
228
248
  apikey, filename, filesize, mimetype, security, storage, options, intelligent
229
249
  )
230
250
 
231
251
  jobs = create_upload_jobs(
232
- apikey, filename, filepath, filesize, start_response, storage, options
252
+ apikey, filename, filesize, start_response, storage, options
233
253
  )
234
254
 
235
255
  if intelligent
236
256
  state = IntelligentState.new
237
- run_intelligent_upload_flow(jobs, state, storage)
257
+ run_intelligent_upload_flow(jobs, filepath, io, state, storage)
238
258
  response_complete = multipart_complete(
239
259
  apikey, filename, filesize, mimetype,
240
260
  start_response, nil, options, storage, intelligent
241
261
  )
242
262
  else
243
- parts_and_etags = run_uploads(jobs, apikey, filepath, options, storage)
263
+ parts_and_etags = run_uploads(jobs, apikey, filepath, io, options, storage)
244
264
  response_complete = multipart_complete(
245
265
  apikey, filename, filesize, mimetype,
246
266
  start_response, parts_and_etags, options, storage
@@ -88,7 +88,7 @@ module UploadUtils
88
88
  # @param [Hash] options User-defined options for
89
89
  # multipart uploads
90
90
  # @return [Hash]
91
- def send_upload(apikey, external_url: nil, security: nil, options: nil)
91
+ def send_upload(apikey, external_url = nil, security = nil, options = nil)
92
92
  base = "#{FilestackConfig::CDN_URL}/#{apikey}/#{build_store_task(options)}"
93
93
 
94
94
  if security
@@ -199,7 +199,7 @@ module IntelligentUtils
199
199
  # @param [IntelligentState] state An IntelligentState object
200
200
  #
201
201
  # @return [Array]
202
- def run_intelligent_upload_flow(jobs, state, storage)
202
+ def run_intelligent_upload_flow(jobs, filepath, io, state, storage)
203
203
  bar = ProgressBar.new(jobs.length)
204
204
  generator = create_intelligent_generator(jobs)
205
205
  working_offset = FilestackConfig::DEFAULT_OFFSET_SIZE
@@ -207,7 +207,7 @@ module IntelligentUtils
207
207
  batch = get_generator_batch(generator)
208
208
  # run parts
209
209
  Parallel.map(batch, in_threads: 4) do |part|
210
- state = run_intelligent_uploads(part, state, storage)
210
+ state = run_intelligent_uploads(part, filepath, io, state, storage)
211
211
  # condition: a chunk has failed but we have not reached the maximum retries
212
212
  while bad_state(state)
213
213
  # condition: timeout to S3, requiring offset size to be changed
@@ -219,7 +219,7 @@ module IntelligentUtils
219
219
  sleep(state.backoff)
220
220
  end
221
221
  state.add_retry
222
- state = run_intelligent_uploads(part, state, storage)
222
+ state = run_intelligent_uploads(part, filepath, io, state, storage)
223
223
  end
224
224
  raise "Upload has failed. Please try again later." unless state.ok
225
225
  bar.increment!
@@ -275,7 +275,7 @@ module IntelligentUtils
275
275
  # multipart_start
276
276
  #
277
277
  # @return [Dict]
278
- def chunk_job(job, state, apikey, filename, filepath, filesize, start_response, storage)
278
+ def chunk_job(job, state, apikey, filename, filesize, start_response, storage)
279
279
  offset = 0
280
280
  seek_point = job[:seek_point]
281
281
  chunk_list = []
@@ -283,7 +283,6 @@ module IntelligentUtils
283
283
  while (offset < FilestackConfig::DEFAULT_CHUNK_SIZE) && (seek_point + offset) < filesize
284
284
  chunk_list.push(
285
285
  seek_point: seek_point,
286
- filepath: filepath,
287
286
  filename: filename,
288
287
  apikey: apikey,
289
288
  part: job[:part],
@@ -307,15 +306,14 @@ module IntelligentUtils
307
306
  # @param [IntelligentState] state An IntelligentState object
308
307
  #
309
308
  # @return [IntelligentState]
310
- def run_intelligent_uploads(part, state, storage)
309
+ def run_intelligent_uploads(part, filepath, io, state, storage)
311
310
  failed = false
312
311
  chunks = chunk_job(
313
- part, state, part[:apikey], part[:filename], part[:filepath],
314
- part[:filesize], part[:start_response], storage
312
+ part, state, part[:apikey], part[:filename], part[:filesize], part[:start_response], storage
315
313
  )
316
314
  Parallel.map(chunks, in_threads: 3) do |chunk|
317
315
  begin
318
- upload_chunk_intelligently(chunk, state, part[:apikey], part[:filepath], part[:options], storage)
316
+ upload_chunk_intelligently(chunk, state, part[:apikey], filepath, io, part[:options], storage)
319
317
  rescue => e
320
318
  state.error_type = e.message
321
319
  failed = true
@@ -364,8 +362,8 @@ module IntelligentUtils
364
362
  # multipart uploads
365
363
  #
366
364
  # @return [Typhoeus::Response]
367
- def upload_chunk_intelligently(job, state, apikey, filepath, options, storage)
368
- file = File.open(filepath)
365
+ def upload_chunk_intelligently(job, state, apikey, filepath, io, options, storage)
366
+ file = filepath ? File.open(filepath) : io
369
367
  file.seek(job[:seek_point] + job[:offset])
370
368
 
371
369
  chunk = file.read(state.offset)
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: filestack
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.7.0
4
+ version: 2.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Filestack
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-09-28 00:00:00.000000000 Z
11
+ date: 2020-09-29 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: typhoeus