jpsclient 1.1.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f185bd85f53ebc4262ecc570b1bb0d69ace35f375f063f2b536109fc988f38d5
4
- data.tar.gz: 3c34fde3e18d9ae5f98822f5c629fb1c693dbc54afc572f6db871c467ad322d8
3
+ metadata.gz: 119466770ec55de57abefc0e70ea4aa8ecd481f03e8d1cc949c476c183c0b128
4
+ data.tar.gz: 4b86a7ce21d27547e244fb74d0deaaecefb4b8f30d90bbba27b7a82391314b1b
5
5
  SHA512:
6
- metadata.gz: dcd4b34b4f6171933976af6d3cf13bb712245abcefb74359b29f2c1862ceb95c7ef0398db86b2b73cb78b3537b2511c9f52a63e282dbc2334d0179c4547de8db
7
- data.tar.gz: 156f2d4b7940a58e9936bdcac1d91b3d1b93a52f84768b68d38c5c32a5459836a8f67b046d07f12261d155527cf9974673332d21df97b5c3c25cc762e198ea1f
6
+ metadata.gz: bc4f5f634d54c043bf6e113af55678fd343babca1ad6a903c9e565c33f47fa333ed503741f23fe803e5b15882738804256d23177e21a2a424916e98156a794b5
7
+ data.tar.gz: d8b96409527709af81761bab9d69b92d433b6b744c13474cb185a8462566327480c4cc6a9b923f8cb441d88bd4df4a4bfcf543eb3708959cf72eb0045bc324fd
@@ -45,6 +45,12 @@ module JPSClient
45
45
 
46
46
  @upload_binary_file = binary_file
47
47
  @file_size = File.size(@upload_binary_file)
48
+
49
+ # 处理空文件
50
+ if @file_size == 0
51
+ raise ExceptionError, "不能上传空文件: #{binary_file}"
52
+ end
53
+
48
54
  @progress_bar = UploadProgress.new(upload_total_size:@file_size)
49
55
  @upload_failed = false # 重置上传失败标志
50
56
 
@@ -134,11 +140,12 @@ module JPSClient
134
140
  )
135
141
 
136
142
  if complete_result && complete_result["code"] == 200
143
+ # 返回 URL 字符串以保持向后兼容
137
144
  upload_result = complete_result.dig("data", "url") || s3_key
138
145
  Logger.instance.fancyinfo_success("文件#{@upload_binary_file} 上传成功! 😎😎😎")
139
146
  else
140
147
  upload_result = nil
141
- error_msg = complete_result["msg"] || complete_result["message"] || "未知错误"
148
+ error_msg = complete_result ? (complete_result["msg"] || complete_result["message"] || "未知错误") : "未知错误"
142
149
  Logger.instance.fancyinfo_error("文件#{@upload_binary_file} 上传失败: #{error_msg} 😭😭😭")
143
150
  end
144
151
 
@@ -169,6 +176,8 @@ module JPSClient
169
176
 
170
177
  # 清理所有工作线程
171
178
  def cleanup_worker_threads
179
+ return unless @worker_threads # 防止未初始化
180
+
172
181
  @worker_threads.each do |thread|
173
182
  # 尝试安全终止线程
174
183
  thread.exit if thread.alive?
@@ -180,6 +189,7 @@ module JPSClient
180
189
  # 使用固定大小的线程池,避免线程无限增长
181
190
  @worker_threads = []
182
191
  @active_tasks = 0
192
+ @stop_workers = false
183
193
  @task_complete_cv = ConditionVariable.new
184
194
 
185
195
  # 创建固定数量的工作线程
@@ -187,29 +197,31 @@ module JPSClient
187
197
  @worker_threads << Thread.new { worker_loop }
188
198
  end
189
199
 
190
- # 设置超时保护
191
- timeout_seconds = 300 # 5分钟超时
200
+ # 主线程等待所有任务完成
201
+ # 没有总体超时限制,会一直等待直到:
202
+ # 1. 所有分片上传成功
203
+ # 2. 某个分片在重试后彻底失败
192
204
  start_time = Time.now
193
205
 
194
- # 主线程等待所有任务完成
195
206
  @tasks_queue_mutex.synchronize do
196
207
  while (@active_tasks > 0 || !@tasks_queue.empty?) && !upload_failed?
197
- remaining_time = timeout_seconds - (Time.now - start_time)
198
- if remaining_time <= 0
199
- set_upload_failed("上传任务超时")
200
- break
201
- end
202
-
203
- # 等待任务完成通知
204
- @task_complete_cv.wait(@tasks_queue_mutex, [remaining_time, 30].min)
208
+ # 等待任务完成通知,每30秒唤醒一次检查状态
209
+ @task_complete_cv.wait(@tasks_queue_mutex, 30)
205
210
  end
206
211
  end
207
212
 
208
213
  # 停止所有工作线程
209
- @worker_threads.each { |t| t.kill }
214
+ @stop_workers = true
215
+ @tasks_queue_mutex.synchronize { @task_complete_cv.broadcast }
210
216
 
211
- # 检查所有分片是否都上传成功
212
- if @upload_eTags.length != @expected_parts && !upload_failed?
217
+ # 等待所有线程结束(最多等待5秒)
218
+ @worker_threads.each do |t|
219
+ t.join(5)
220
+ t.kill if t.alive? # 如果仍然存活,强制终止
221
+ end
222
+
223
+ # 只在非失败情况下检查分片完整性
224
+ if !upload_failed? && @upload_eTags.length != @expected_parts
213
225
  set_upload_failed("部分分片上传失败,已上传#{@upload_eTags.length}/#{@expected_parts}")
214
226
  end
215
227
  end
@@ -221,7 +233,7 @@ module JPSClient
221
233
 
222
234
  # 从队列获取任务
223
235
  @tasks_queue_mutex.synchronize do
224
- return if upload_failed? && @tasks_queue.empty?
236
+ return if @stop_workers || (upload_failed? && @tasks_queue.empty?)
225
237
 
226
238
  if @tasks_queue.empty?
227
239
  # 队列为空,等待新任务
@@ -230,9 +242,11 @@ module JPSClient
230
242
  end
231
243
 
232
244
  upload_params_item = @tasks_queue.pop
233
- @active_tasks_mutex.synchronize { @active_tasks += 1 }
234
245
  end
235
246
 
247
+ # 在锁外增加活跃任务计数,避免嵌套锁
248
+ @active_tasks_mutex.synchronize { @active_tasks += 1 } if upload_params_item
249
+
236
250
  # 处理任务
237
251
  if upload_params_item
238
252
  begin
@@ -254,6 +268,14 @@ module JPSClient
254
268
  part_no = upload_params_item["partNo"]
255
269
  s3_key = upload_params_item["s3Key"]
256
270
  upload_id = upload_params_item["uploadId"]
271
+ retry_count = upload_params_item["retryCount"]
272
+
273
+ # 记录重试信息(仅在重试时记录)
274
+ # 注释掉以避免打断进度条
275
+ # current_attempt = @upload_config.max_retry_times - retry_count + 1
276
+ # if current_attempt > 1
277
+ # Logger.instance.info("上传分片 ##{part_no},第 #{current_attempt} 次尝试(共 #{@upload_config.max_retry_times + 1} 次机会)")
278
+ # end
257
279
 
258
280
  # 步骤2: 获取分片的预签名URL
259
281
  sign_result = @jps_client.get_file_sign_url(
@@ -264,7 +286,9 @@ module JPSClient
264
286
  )
265
287
 
266
288
  if sign_result.nil? || !sign_result.dig("data", "url")
267
- raise ExceptionError, "获取分片#{part_no}的上传URL失败"
289
+ # Logger.instance.info("分片 ##{part_no} 获取上传URL失败") # 避免打断进度条
290
+ handle_retry(upload_params_item, "获取上传URL失败")
291
+ return
268
292
  end
269
293
 
270
294
  upload_url = sign_result["data"]["url"]
@@ -278,63 +302,106 @@ module JPSClient
278
302
  read_length = chunk_size
279
303
  end
280
304
 
281
- file = File.open(@upload_binary_file, "rb")
305
+ # 读取文件数据(每个线程独立打开文件,无需互斥锁)
306
+ put_data = nil
282
307
  begin
283
- file.seek(start_position)
284
- put_data = file.read(read_length)
285
-
286
- # 创建上传请求(直接使用预签名URL)
287
- request = Typhoeus::Request.new(
288
- upload_url,
289
- method: :put,
290
- body: put_data,
291
- headers: {
292
- 'Content-Type' => 'application/octet-stream',
293
- 'Content-Length' => read_length.to_s
294
- },
295
- timeout: 300 # 5分钟超时
296
- )
297
-
298
- # 设置上传进度回调
299
- upload_size_last = 0
300
- request.on_progress do |dltotal, dlnow, ultotal, ulnow|
301
- if ulnow && ulnow > upload_size_last
302
- upload_size_last = ulnow
308
+ file = File.open(@upload_binary_file, "rb")
309
+ begin
310
+ file.seek(start_position)
311
+ put_data = file.read(read_length)
312
+ ensure
313
+ file.close if file
314
+ end
315
+ rescue => e
316
+ # Logger.instance.info("分片 ##{part_no} 读取文件失败: #{e.message}") # 避免打断进度条
317
+ handle_retry(upload_params_item, "文件读取失败: #{e.message}")
318
+ return
319
+ end
320
+
321
+ # 创建上传请求(直接使用预签名URL)
322
+ # 对于单个分片,使用更合理的超时时间(根据分片大小动态调整)
323
+ # 5MB 分片在慢速网络(100KB/s)下需要约50秒,所以基础超时应该更长
324
+ chunk_timeout = calculate_chunk_timeout(read_length)
325
+
326
+ request = Typhoeus::Request.new(
327
+ upload_url,
328
+ method: :put,
329
+ body: put_data,
330
+ headers: {
331
+ 'Content-Type' => 'application/octet-stream',
332
+ 'Content-Length' => read_length.to_s
333
+ },
334
+ timeout: chunk_timeout,
335
+ connecttimeout: 30 # 连接超时30秒
336
+ )
337
+
338
+ # 设置上传进度回调
339
+ upload_size_last = 0
340
+ last_progress_time = Time.now
341
+ request.on_progress do |dltotal, dlnow, ultotal, ulnow|
342
+ if ulnow && ulnow > upload_size_last
343
+ upload_size_last = ulnow
344
+ # 限制进度更新频率,避免过于频繁的更新
345
+ if Time.now - last_progress_time > 0.5 # 每0.5秒更新一次
303
346
  @progress_bar.update_upload_index(upload_part:part_no, upload_size:ulnow)
304
347
  @progress_bar.update_upload_progress()
348
+ last_progress_time = Time.now
305
349
  end
306
350
  end
351
+ end
307
352
 
308
- # 设置请求超时
309
- request.options[:timeout] = 300 # 5分钟超时
310
-
311
- # 执行请求并等待完成
312
- response = request.run
353
+ # 执行请求并等待完成
354
+ response = request.run
313
355
 
314
- # 处理响应结果
315
- if response.success?
316
- @progress_bar.complete_upload_index(upload_part:part_no, complete_size:read_length)
317
- # 只有在未完成时才更新进度条,避免100%时重复显示
318
- unless @progress_bar.is_done
319
- @progress_bar.update_upload_progress()
320
- end
321
- # 新API不再需要收集ETag,只记录成功的分片号
322
- @upload_eTags_mutex.synchronize { @upload_eTags << part_no }
323
- else
324
- @progress_bar.delete_upload_index(upload_part:part_no)
325
- upload_params_item["retryCount"] = upload_params_item["retryCount"] - 1
326
- if upload_params_item["retryCount"] > 0
327
- # 重试任务
328
- @tasks_queue_mutex.synchronize { @tasks_queue.push(upload_params_item) }
329
- else
330
- set_upload_failed("文件#{@upload_binary_file} 分片#{part_no}上传失败: HTTP #{response.code}")
331
- end
356
+ # 处理响应结果
357
+ if response.success?
358
+ @progress_bar.complete_upload_index(upload_part:part_no, complete_size:read_length)
359
+ # 只有在未完成时才更新进度条,避免100%时重复显示
360
+ unless @progress_bar.is_done
361
+ @progress_bar.update_upload_progress()
332
362
  end
333
- ensure
334
- file.close
363
+ # 新API不再需要收集ETag,只记录成功的分片号
364
+ @upload_eTags_mutex.synchronize { @upload_eTags << part_no }
365
+ # Logger.instance.info("分片 ##{part_no} 上传成功") # 避免打断进度条
366
+ elsif response.timed_out?
367
+ # 超时处理
368
+ # Logger.instance.info("分片 ##{part_no} 上传超时,响应时间: #{response.time}秒") # 避免打断进度条
369
+ handle_retry(upload_params_item, "超时")
370
+ elsif response.code == 0
371
+ # 网络错误
372
+ error_msg = response.return_message || "未知网络错误"
373
+ # Logger.instance.info("分片 ##{part_no} 网络错误: #{error_msg}") # 避免打断进度条
374
+ handle_retry(upload_params_item, error_msg)
375
+ else
376
+ # HTTP错误
377
+ # Logger.instance.info("分片 ##{part_no} HTTP错误: #{response.code}, Body: #{response.body[0..200] if response.body}") # 避免打断进度条
378
+ handle_retry(upload_params_item, "HTTP #{response.code}")
379
+ end
380
+ end
381
+
382
+ def calculate_chunk_timeout(chunk_size)
383
+ # 使用配置的 timeout_seconds 作为单个分片的超时时间
384
+ # 默认600秒(10分钟)
385
+ @upload_config.timeout_seconds || 600
386
+ end
387
+
388
+ def handle_retry(upload_params_item, error_reason)
389
+ part_no = upload_params_item["partNo"]
390
+ @progress_bar.delete_upload_index(upload_part:part_no)
391
+
392
+ upload_params_item["retryCount"] = upload_params_item["retryCount"] - 1
393
+ if upload_params_item["retryCount"] > 0
394
+ # 简单重试,直接放回队列(避免创建额外线程)
395
+ # Logger.instance.info("分片 ##{part_no} 准备重试,剩余重试次数: #{upload_params_item["retryCount"]}") # 避免打断进度条
396
+
397
+ # 直接放回队列末尾,让其他分片先执行
398
+ @tasks_queue_mutex.synchronize { @tasks_queue.push(upload_params_item) }
399
+ else
400
+ set_upload_failed("文件#{@upload_binary_file} 分片#{part_no}上传失败: #{error_reason},已达最大重试次数")
335
401
  end
336
402
  end
337
403
 
338
404
 
405
+
339
406
  end
340
407
  end
@@ -12,7 +12,8 @@ module JPSClient
12
12
  attr_accessor :upload_type # 新增:上传类型
13
13
  attr_accessor :concurrent_workers # 新增:并发上传工作线程数
14
14
  attr_accessor :chunk_size_mb # 新增:分片大小(MB)
15
- attr_accessor :max_retry_times # 新增:最大重试次数
15
+ attr_accessor :max_retry_times # 新增:每个分片的最大重试次数
16
+ attr_accessor :timeout_seconds # 新增:单个分片的超时时间(秒)
16
17
 
17
18
  def initialize(
18
19
  region: nil,
@@ -24,7 +25,8 @@ module JPSClient
24
25
  upload_type: nil,
25
26
  concurrent_workers: nil,
26
27
  chunk_size_mb: nil,
27
- max_retry_times: nil
28
+ max_retry_times: nil,
29
+ timeout_seconds: nil
28
30
  )
29
31
  @region = region
30
32
  @bucket_name = bucket_name
@@ -36,6 +38,7 @@ module JPSClient
36
38
  @concurrent_workers = concurrent_workers
37
39
  @chunk_size_mb = chunk_size_mb
38
40
  @max_retry_times = max_retry_times
41
+ @timeout_seconds = timeout_seconds
39
42
  end
40
43
 
41
44
  # 从 JSON 配置创建实例
@@ -55,7 +58,8 @@ module JPSClient
55
58
  upload_type: json_config['upload_type'] || "s3",
56
59
  concurrent_workers: concurrent_workers,
57
60
  chunk_size_mb: json_config['chunk_size_mb'] || 5,
58
- max_retry_times: json_config['max_retry_times'] || 3
61
+ max_retry_times: json_config['max_retry_times'] || 6, # 每个分片重试6次
62
+ timeout_seconds: json_config['timeout_seconds'] || 600 # 单个分片超时10分钟
59
63
  )
60
64
  end
61
65
 
@@ -121,7 +125,8 @@ module JPSClient
121
125
  'upload_type' => @upload_type,
122
126
  'concurrent_workers' => @concurrent_workers,
123
127
  'chunk_size_mb' => @chunk_size_mb,
124
- 'max_retry_times' => @max_retry_times
128
+ 'max_retry_times' => @max_retry_times,
129
+ 'timeout_seconds' => @timeout_seconds
125
130
  }
126
131
  end
127
132
  end
@@ -1,3 +1,3 @@
1
1
  module JPSClient
2
- VERSION = "1.1.0"
2
+ VERSION = "1.1.1"
3
3
  end
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: jpsclient
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.0
4
+ version: 1.1.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Your Name
8
8
  bindir: bin
9
9
  cert_chain: []
10
- date: 2025-09-29 00:00:00.000000000 Z
10
+ date: 2025-10-05 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: faraday