torch-rb 0.10.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7884d0bd8ffd23e775b3a1ca59e536bc18cd79c6bbe2736a9802a2b1693d40de
4
- data.tar.gz: 10849c8a248a70eca7178ca3529fa7428fc3d8e5cfe2ca774b0be6cb5e75eb9b
3
+ metadata.gz: d2f88c938144476a772fd7c606751e0a6e67a338cb1c37ede9c2011db7fc4579
4
+ data.tar.gz: f4408457e3bf8c7bf9b42863459248aa592c70e8e3924cdbe72863a979e65106
5
5
  SHA512:
6
- metadata.gz: d81587eae00527e9d1e4a65b62a686dbdab27eed9401539faddf553d3a0730ad7394b01bac615bdd0474d1a6602cd0a3117e1d7cb3a3f1d5fbf8bd989fa39e59
7
- data.tar.gz: d581d07821f103ee69267bc8e6b15d5094d8b8ef004917af947e2570649e7fbe750c9893d40600b62ee970646654fc545a270e7a59adb764be2ba73f1fa18b62
6
+ metadata.gz: 90ebc506942809b02331f7accce6e93e714a57b5d2f58b06ad0d3c60204b947eba61fe0bf43431a19e27890f5c764cb348ccb1785a2ffccfba7969f4726b6f1f
7
+ data.tar.gz: 410af7b4934f79aaae94f6c1bde1d17ee8b043f02354b4ea9f139271575f43abb90afeba162badfa01a3ec406612ae6cdcb4a2adfb1067b1634982a590d10cc9
data/CHANGELOG.md CHANGED
@@ -1,3 +1,17 @@
1
+ ## 0.11.0 (2022-07-06)
2
+
3
+ - Updated LibTorch to 1.12.0
4
+ - Dropped support for Ruby < 2.7
5
+
6
+ ## 0.10.2 (2022-06-14)
7
+
8
+ - Improved numeric operations between scalars and tensors
9
+ - Fixed `dtype` of `cumsum` method
10
+
11
+ ## 0.10.1 (2022-04-12)
12
+
13
+ - Fixed `dtype`, `device`, and `layout` for `new_*` and `like_*` methods
14
+
1
15
  ## 0.10.0 (2022-03-13)
2
16
 
3
17
  - Updated LibTorch to 1.11.0
data/README.md CHANGED
@@ -409,7 +409,8 @@ Here’s the list of compatible versions.
409
409
 
410
410
  Torch.rb | LibTorch
411
411
  --- | ---
412
- 0.10.0+ | 1.11.0+
412
+ 0.11.0+ | 1.12.0+
413
+ 0.10.0-0.10.2 | 1.11.0
413
414
  0.9.0-0.9.2 | 1.10.0-1.10.2
414
415
  0.8.0-0.8.3 | 1.9.0-1.9.1
415
416
  0.6.0-0.7.0 | 1.8.0-1.8.1
@@ -421,13 +422,25 @@ Torch.rb | LibTorch
421
422
 
422
423
  ### Homebrew
423
424
 
424
- For Mac, you can use Homebrew.
425
+ You can also use Homebrew.
425
426
 
426
427
  ```sh
427
428
  brew install libtorch
428
429
  ```
429
430
 
430
- Then install the gem (no need for `bundle config`).
431
+ For Mac ARM, run:
432
+
433
+ ```sh
434
+ bundle config build.torch-rb --with-torch-dir=/opt/homebrew
435
+ ```
436
+
437
+ And for Linux, run:
438
+
439
+ ```sh
440
+ bundle config build.torch-rb --with-torch-dir=/home/linuxbrew/.linuxbrew
441
+ ```
442
+
443
+ Then install the gem.
431
444
 
432
445
  ## Performance
433
446
 
data/codegen/function.rb CHANGED
@@ -60,7 +60,7 @@ class Function
60
60
 
61
61
  optional = false
62
62
  if type.include?("?")
63
- optional = true unless ["dtype", "device", "layout", "pin_memory"].include?(name)
63
+ optional = true
64
64
  type = type.delete("?")
65
65
  end
66
66
 
@@ -39,7 +39,14 @@ def skip_functions(functions)
39
39
  f.base_name == "index_put" ||
40
40
  # not supported yet
41
41
  f.func.include?("Dimname") ||
42
- f.func.include?("ConstQuantizerPtr")
42
+ f.func.include?("ConstQuantizerPtr") ||
43
+ f.func.include?("SymInt") ||
44
+ # TODO fix LibTorch 1.12 changes
45
+ f.base_name == "histogramdd" ||
46
+ f.base_name == "nested_tensor" ||
47
+ f.base_name == "split_copy" ||
48
+ f.base_name == "split_with_sizes_copy" ||
49
+ f.base_name == "unbind_copy"
43
50
  end
44
51
  end
45
52
 
@@ -250,7 +257,7 @@ def generate_dispatch(function, def_method)
250
257
 
251
258
  cpp_params = generate_dispatch_params(function, params)
252
259
  if opt_index
253
- cpp_params.insert(remove_self ? opt_index + 1 : opt_index, "const TensorOptions & options")
260
+ cpp_params.insert(remove_self ? opt_index + 1 : opt_index, "TensorOptions options")
254
261
  end
255
262
 
256
263
  retval = generate_dispatch_retval(function)
@@ -293,7 +300,13 @@ def split_opt_params(params)
293
300
  end
294
301
 
295
302
  def generate_tensor_options(function, opt_params)
296
- code = "\n const auto options = TensorOptions()"
303
+ new_function = function.base_name.start_with?("new_")
304
+ like_function = function.base_name.end_with?("_like")
305
+
306
+ code = String.new("")
307
+ code << "\n auto self = _r.tensor(0);" if like_function
308
+ code << "\n const auto options = TensorOptions()"
309
+
297
310
  order = ["dtype", "device", "layout", "requires_grad", "pin_memory"]
298
311
  opt_params.sort_by { |v| order.index(v[:name]) }.each do |opt|
299
312
  i = opt[:position]
@@ -304,12 +317,24 @@ def generate_tensor_options(function, opt_params)
304
317
  if function.base_name == "arange"
305
318
  "dtype(_r.scalartypeOptional(#{i}))"
306
319
  else
307
- "dtype(_r.scalartype(#{i}))"
320
+ if new_function || like_function
321
+ "dtype(_r.scalartypeWithDefault(#{i}, self.scalar_type()))"
322
+ else
323
+ "dtype(_r.scalartype(#{i}))"
324
+ end
308
325
  end
309
326
  when "device"
310
- "device(_r.device(#{i}))"
327
+ if new_function || like_function
328
+ "device(_r.deviceWithDefault(#{i}, self.device()))"
329
+ else
330
+ "device(_r.device(#{i}))"
331
+ end
311
332
  when "layout"
312
- "layout(_r.layoutOptional(#{i}))"
333
+ if new_function || like_function
334
+ "layout(_r.layoutWithDefault(#{i}, self.layout()))"
335
+ else
336
+ "layout(_r.layoutOptional(#{i}))"
337
+ end
313
338
  when "requires_grad"
314
339
  "requires_grad(_r.toBool(#{i}))"
315
340
  when "pin_memory"
@@ -392,7 +417,7 @@ def generate_function_params(function, params, remove_self)
392
417
  else
393
418
  "optionalTensor"
394
419
  end
395
- when "generator", "tensorlist", "intlist"
420
+ when "generator", "tensorlist"
396
421
  func
397
422
  when "string"
398
423
  "stringViewOptional"
@@ -453,7 +478,11 @@ def generate_dispatch_params(function, params)
453
478
  when "float"
454
479
  "double"
455
480
  when /\Aint\[/
456
- "IntArrayRef"
481
+ if param[:optional]
482
+ "at::OptionalIntArrayRef"
483
+ else
484
+ "IntArrayRef"
485
+ end
457
486
  when "float[]"
458
487
  "ArrayRef<double>"
459
488
  when "str"
@@ -462,13 +491,19 @@ def generate_dispatch_params(function, params)
462
491
  else
463
492
  "std::string"
464
493
  end
465
- when "Scalar", "bool", "ScalarType", "Layout", "Device", "Storage", "Generator", "MemoryFormat", "Storage"
494
+ when "Scalar"
495
+ if param[:optional]
496
+ "const c10::optional<Scalar> &"
497
+ else
498
+ "const Scalar &"
499
+ end
500
+ when "bool", "ScalarType", "Layout", "Device", "Storage", "Generator", "MemoryFormat", "Storage"
466
501
  param[:type]
467
502
  else
468
503
  raise "Unknown type: #{param[:type]} (#{function.name})"
469
504
  end
470
505
 
471
- if param[:optional] && param[:type] != "Tensor"
506
+ if param[:optional] && !["Tensor", "Scalar"].include?(param[:type]) && !param[:type].start_with?("int[")
472
507
  type = "c10::optional<#{type}>"
473
508
  end
474
509
 
@@ -511,7 +546,7 @@ def generate_dispatch_retval(function)
511
546
  when ["float", "float"]
512
547
  "std::tuple<double,double>"
513
548
  else
514
- raise "Unknown retvals: #{types}"
549
+ raise "Unknown retvals: #{types} (#{function.name})"
515
550
  end
516
551
  end
517
552