torch-rb 0.13.0 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 772ef01be10ea497b3dd029c74bd87c650bd2adf2d1f3f26688d9401d2a1d2af
4
- data.tar.gz: d22bc868344e5d64d70e5327c64dcc3ffe3deacaab4998e157eeec04f27a71de
3
+ metadata.gz: 311e86910351cc5050fb92146e1f6c9ad018a9609d0f5ca0ef51253f1b813b22
4
+ data.tar.gz: afc18a5142abecba2fdd17f669ecdf9de5b90f35e8c79a637c4e960427a08439
5
5
  SHA512:
6
- metadata.gz: b66ec8876a57926a39c2111684205e4110a21af7f9e3dc0c660784f3a27a042fc9395a86bd5cc90bbb99e062116e0e95b7041e2ae9bac2f885b68788a48d97a4
7
- data.tar.gz: 6417859882a411b2fcb9ab24b694b6934444e4ac38a9f478f4878eac26730de4c220d9bd38e8e728a263422a752ab47c842528462184ea839e157698e4209d51
6
+ metadata.gz: 29df29a3dd0f752f4da731b84f0ca40832d49510a76784364d607977c8e90e32074f7e17ea07167e79330a97ef5f3378b19331d326301a3cc5a0e76180384ac6
7
+ data.tar.gz: 50e96247102c02cb6f5b0ddc470947f06dfb4efcc3a675ee174a334c4263c1dfe9f3be9c5bfb249239b8b4abce79f6e6dc625833081ec101180ba9a757f3421c
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 0.13.1 (2023-05-03)
2
+
3
+ - Fixed error with Rice 4.1
4
+
1
5
  ## 0.13.0 (2023-04-13)
2
6
 
3
7
  - Updated LibTorch to 2.0.0
data/README.md CHANGED
@@ -410,18 +410,12 @@ Here’s the list of compatible versions.
410
410
 
411
411
  Torch.rb | LibTorch
412
412
  --- | ---
413
- 0.13.0+ | 2.0.0+
414
- 0.12.0-0.12.2 | 1.13.0-1.13.1
415
- 0.11.0-0.11.2 | 1.12.0-1.12.1
416
- 0.10.0-0.10.2 | 1.11.0
417
- 0.9.0-0.9.2 | 1.10.0-1.10.2
418
- 0.8.0-0.8.3 | 1.9.0-1.9.1
419
- 0.6.0-0.7.0 | 1.8.0-1.8.1
420
- 0.5.0-0.5.3 | 1.7.0-1.7.1
421
- 0.3.0-0.4.2 | 1.6.0
422
- 0.2.0-0.2.7 | 1.5.0-1.5.1
423
- 0.1.8 | 1.4.0
424
- 0.1.0-0.1.7 | 1.3.1
413
+ 0.13.x | 2.0.x
414
+ 0.12.x | 1.13.x
415
+ 0.11.x | 1.12.x
416
+ 0.10.x | 1.11.x
417
+ 0.9.x | 1.10.x
418
+ 0.8.x | 1.9.x
425
419
 
426
420
  ### Homebrew
427
421
 
@@ -433,7 +427,11 @@ brew install pytorch
433
427
 
434
428
  ## Performance
435
429
 
436
- Deep learning is significantly faster on a GPU. With Linux, install [CUDA](https://developer.nvidia.com/cuda-downloads) and [cuDNN](https://developer.nvidia.com/cudnn) and reinstall the gem.
430
+ Deep learning is significantly faster on a GPU.
431
+
432
+ ### Linux
433
+
434
+ With Linux, install [CUDA](https://developer.nvidia.com/cuda-downloads) and [cuDNN](https://developer.nvidia.com/cudnn) and reinstall the gem.
437
435
 
438
436
  Check if CUDA is available
439
437
 
@@ -455,6 +453,21 @@ ankane/ml-stack:torch-gpu
455
453
 
456
454
  And leave the other fields in that section blank. Once the notebook is running, you can run the [MNIST example](https://github.com/ankane/ml-stack/blob/master/torch-gpu/MNIST.ipynb).
457
455
 
456
+ ### Mac
457
+
458
+ With Apple silicon, check if Metal Performance Shaders (MPS) is available
459
+
460
+ ```ruby
461
+ Torch::Backends::MPS.available?
462
+ ```
463
+
464
+ Move a neural network to a GPU
465
+
466
+ ```ruby
467
+ device = Torch.device("mps")
468
+ net.to(device)
469
+ ```
470
+
458
471
  ## History
459
472
 
460
473
  View the [changelog](https://github.com/ankane/torch.rb/blob/master/CHANGELOG.md)
@@ -156,10 +156,7 @@ def generate_attach_def(name, type, def_method)
156
156
  ruby_name = ruby_name.sub(/\Asparse_/, "") if type == "sparse"
157
157
  ruby_name = name if name.start_with?("__")
158
158
 
159
- # cast for Ruby < 3.0 https://github.com/thisMagpie/fftw/issues/22#issuecomment-49508900
160
- cast = RUBY_VERSION.to_f > 2.7 ? "" : "(VALUE (*)(...)) "
161
-
162
- "rb_#{def_method}(m, \"#{ruby_name}\", #{cast}#{full_name(name, type)}, -1);"
159
+ "rb_#{def_method}(m, \"#{ruby_name}\", #{full_name(name, type)}, -1);"
163
160
  end
164
161
 
165
162
  def generate_method_def(name, functions, type, def_method)
@@ -8,14 +8,11 @@ void init_backends(Rice::Module& m) {
8
8
  auto rb_mBackends = Rice::define_module_under(m, "Backends");
9
9
 
10
10
  Rice::define_module_under(rb_mBackends, "OpenMP")
11
- .add_handler<torch::Error>(handle_error)
12
11
  .define_singleton_function("available?", &torch::hasOpenMP);
13
12
 
14
13
  Rice::define_module_under(rb_mBackends, "MKL")
15
- .add_handler<torch::Error>(handle_error)
16
14
  .define_singleton_function("available?", &torch::hasMKL);
17
15
 
18
16
  Rice::define_module_under(rb_mBackends, "MPS")
19
- .add_handler<torch::Error>(handle_error)
20
17
  .define_singleton_function("available?", &torch::hasMPS);
21
18
  }
data/ext/torch/cuda.cpp CHANGED
@@ -6,7 +6,6 @@
6
6
 
7
7
  void init_cuda(Rice::Module& m) {
8
8
  Rice::define_module_under(m, "CUDA")
9
- .add_handler<torch::Error>(handle_error)
10
9
  .define_singleton_function("available?", &torch::cuda::is_available)
11
10
  .define_singleton_function("device_count", &torch::cuda::device_count)
12
11
  .define_singleton_function("manual_seed", &torch::cuda::manual_seed)
data/ext/torch/device.cpp CHANGED
@@ -6,7 +6,6 @@
6
6
 
7
7
  void init_device(Rice::Module& m) {
8
8
  Rice::define_class_under<torch::Device>(m, "Device")
9
- .add_handler<torch::Error>(handle_error)
10
9
  .define_constructor(Rice::Constructor<torch::Device, const std::string&>())
11
10
  .define_method(
12
11
  "index",
data/ext/torch/fft.cpp CHANGED
@@ -8,6 +8,5 @@
8
8
 
9
9
  void init_fft(Rice::Module& m) {
10
10
  auto rb_mFFT = Rice::define_module_under(m, "FFT");
11
- rb_mFFT.add_handler<torch::Error>(handle_error);
12
11
  add_fft_functions(rb_mFFT);
13
12
  }
@@ -7,7 +7,6 @@
7
7
  void init_generator(Rice::Module& m, Rice::Class& rb_cGenerator) {
8
8
  // https://github.com/pytorch/pytorch/blob/master/torch/csrc/Generator.cpp
9
9
  rb_cGenerator
10
- .add_handler<torch::Error>(handle_error)
11
10
  .define_singleton_function(
12
11
  "new",
13
12
  []() {
data/ext/torch/ivalue.cpp CHANGED
@@ -7,7 +7,6 @@
7
7
  void init_ivalue(Rice::Module& m, Rice::Class& rb_cIValue) {
8
8
  // https://pytorch.org/cppdocs/api/structc10_1_1_i_value.html
9
9
  rb_cIValue
10
- .add_handler<torch::Error>(handle_error)
11
10
  .define_method("bool?", &torch::IValue::isBool)
12
11
  .define_method("bool_list?", &torch::IValue::isBoolList)
13
12
  .define_method("capsule?", &torch::IValue::isCapsule)
data/ext/torch/linalg.cpp CHANGED
@@ -8,6 +8,5 @@
8
8
 
9
9
  void init_linalg(Rice::Module& m) {
10
10
  auto rb_mLinalg = Rice::define_module_under(m, "Linalg");
11
- rb_mLinalg.add_handler<torch::Error>(handle_error);
12
11
  add_linalg_functions(rb_mLinalg);
13
12
  }
data/ext/torch/nn.cpp CHANGED
@@ -14,11 +14,9 @@ class Parameter: public torch::autograd::Variable {
14
14
 
15
15
  void init_nn(Rice::Module& m) {
16
16
  auto rb_mNN = Rice::define_module_under(m, "NN");
17
- rb_mNN.add_handler<torch::Error>(handle_error);
18
17
  add_nn_functions(rb_mNN);
19
18
 
20
19
  Rice::define_module_under(rb_mNN, "Init")
21
- .add_handler<torch::Error>(handle_error)
22
20
  .define_singleton_function(
23
21
  "_calculate_gain",
24
22
  [](NonlinearityType nonlinearity, double param) {
@@ -91,7 +89,6 @@ void init_nn(Rice::Module& m) {
91
89
  });
92
90
 
93
91
  Rice::define_class_under<Parameter, torch::Tensor>(rb_mNN, "Parameter")
94
- .add_handler<torch::Error>(handle_error)
95
92
  .define_method(
96
93
  "grad",
97
94
  [](Parameter& self) {
data/ext/torch/random.cpp CHANGED
@@ -6,7 +6,6 @@
6
6
 
7
7
  void init_random(Rice::Module& m) {
8
8
  Rice::define_module_under(m, "Random")
9
- .add_handler<torch::Error>(handle_error)
10
9
  .define_singleton_function(
11
10
  "initial_seed",
12
11
  []() {
@@ -8,6 +8,5 @@
8
8
 
9
9
  void init_special(Rice::Module& m) {
10
10
  auto rb_mSpecial = Rice::define_module_under(m, "Special");
11
- rb_mSpecial.add_handler<torch::Error>(handle_error);
12
11
  add_special_functions(rb_mSpecial);
13
12
  }
data/ext/torch/tensor.cpp CHANGED
@@ -96,7 +96,6 @@ static VALUE tensor__backward(int argc, VALUE* argv, VALUE self_)
96
96
 
97
97
  void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions) {
98
98
  rb_cTensor = c;
99
- rb_cTensor.add_handler<torch::Error>(handle_error);
100
99
  add_tensor_functions(rb_cTensor);
101
100
  THPVariableClass = rb_cTensor.value();
102
101
 
@@ -286,7 +285,6 @@ void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions
286
285
  });
287
286
 
288
287
  rb_cTensorOptions
289
- .add_handler<torch::Error>(handle_error)
290
288
  .define_method(
291
289
  "dtype",
292
290
  [](torch::TensorOptions& self, int dtype) {
data/ext/torch/torch.cpp CHANGED
@@ -24,7 +24,7 @@ torch::Tensor make_tensor(Rice::Array a, std::vector<int64_t> size, const torch:
24
24
  }
25
25
 
26
26
  void init_torch(Rice::Module& m) {
27
- m.add_handler<torch::Error>(handle_error);
27
+ register_handler<torch::Error>(handle_global_error);
28
28
  add_torch_functions(m);
29
29
  m.define_singleton_function(
30
30
  "grad_enabled?",
data/ext/torch/utils.h CHANGED
@@ -10,8 +10,7 @@ static_assert(
10
10
  "Incompatible LibTorch version"
11
11
  );
12
12
 
13
- // TODO find better place
14
- inline void handle_error(torch::Error const & ex) {
13
+ inline void handle_global_error(const torch::Error& ex) {
15
14
  throw Rice::Exception(rb_eRuntimeError, ex.what_without_backtrace());
16
15
  }
17
16
 
data/lib/torch/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Torch
2
- VERSION = "0.13.0"
2
+ VERSION = "0.13.1"
3
3
  end
data/lib/torch-rb.rb CHANGED
@@ -1 +1 @@
1
- require "torch"
1
+ require_relative "torch"
data/lib/torch.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # ext
2
- require "torch/ext"
2
+ require_relative "torch/ext"
3
3
 
4
4
  # stdlib
5
5
  require "fileutils"
@@ -8,197 +8,197 @@ require "set"
8
8
  require "tmpdir"
9
9
 
10
10
  # modules
11
- require "torch/inspector"
12
- require "torch/tensor"
13
- require "torch/version"
11
+ require_relative "torch/inspector"
12
+ require_relative "torch/tensor"
13
+ require_relative "torch/version"
14
14
 
15
15
  # optim
16
- require "torch/optim/optimizer"
17
- require "torch/optim/adadelta"
18
- require "torch/optim/adagrad"
19
- require "torch/optim/adam"
20
- require "torch/optim/adamax"
21
- require "torch/optim/adamw"
22
- require "torch/optim/asgd"
23
- require "torch/optim/rmsprop"
24
- require "torch/optim/rprop"
25
- require "torch/optim/sgd"
16
+ require_relative "torch/optim/optimizer"
17
+ require_relative "torch/optim/adadelta"
18
+ require_relative "torch/optim/adagrad"
19
+ require_relative "torch/optim/adam"
20
+ require_relative "torch/optim/adamax"
21
+ require_relative "torch/optim/adamw"
22
+ require_relative "torch/optim/asgd"
23
+ require_relative "torch/optim/rmsprop"
24
+ require_relative "torch/optim/rprop"
25
+ require_relative "torch/optim/sgd"
26
26
 
27
27
  # optim lr_scheduler
28
- require "torch/optim/lr_scheduler/lr_scheduler"
29
- require "torch/optim/lr_scheduler/lambda_lr"
30
- require "torch/optim/lr_scheduler/multiplicative_lr"
31
- require "torch/optim/lr_scheduler/step_lr"
32
- require "torch/optim/lr_scheduler/multi_step_lr"
33
- require "torch/optim/lr_scheduler/exponential_lr"
34
- require "torch/optim/lr_scheduler/cosine_annealing_lr"
28
+ require_relative "torch/optim/lr_scheduler/lr_scheduler"
29
+ require_relative "torch/optim/lr_scheduler/lambda_lr"
30
+ require_relative "torch/optim/lr_scheduler/multiplicative_lr"
31
+ require_relative "torch/optim/lr_scheduler/step_lr"
32
+ require_relative "torch/optim/lr_scheduler/multi_step_lr"
33
+ require_relative "torch/optim/lr_scheduler/exponential_lr"
34
+ require_relative "torch/optim/lr_scheduler/cosine_annealing_lr"
35
35
 
36
36
  # nn parameters
37
- require "torch/nn/parameter"
38
- require "torch/nn/utils"
37
+ require_relative "torch/nn/parameter"
38
+ require_relative "torch/nn/utils"
39
39
 
40
40
  # nn containers
41
- require "torch/nn/module"
42
- require "torch/nn/module_list"
43
- require "torch/nn/parameter_list"
44
- require "torch/nn/sequential"
41
+ require_relative "torch/nn/module"
42
+ require_relative "torch/nn/module_list"
43
+ require_relative "torch/nn/parameter_list"
44
+ require_relative "torch/nn/sequential"
45
45
 
46
46
  # nn convolution layers
47
- require "torch/nn/convnd"
48
- require "torch/nn/conv1d"
49
- require "torch/nn/conv2d"
50
- require "torch/nn/conv3d"
51
- require "torch/nn/unfold"
52
- require "torch/nn/fold"
47
+ require_relative "torch/nn/convnd"
48
+ require_relative "torch/nn/conv1d"
49
+ require_relative "torch/nn/conv2d"
50
+ require_relative "torch/nn/conv3d"
51
+ require_relative "torch/nn/unfold"
52
+ require_relative "torch/nn/fold"
53
53
 
54
54
  # nn pooling layers
55
- require "torch/nn/max_poolnd"
56
- require "torch/nn/max_pool1d"
57
- require "torch/nn/max_pool2d"
58
- require "torch/nn/max_pool3d"
59
- require "torch/nn/max_unpoolnd"
60
- require "torch/nn/max_unpool1d"
61
- require "torch/nn/max_unpool2d"
62
- require "torch/nn/max_unpool3d"
63
- require "torch/nn/avg_poolnd"
64
- require "torch/nn/avg_pool1d"
65
- require "torch/nn/avg_pool2d"
66
- require "torch/nn/avg_pool3d"
67
- require "torch/nn/lp_poolnd"
68
- require "torch/nn/lp_pool1d"
69
- require "torch/nn/lp_pool2d"
70
- require "torch/nn/adaptive_max_poolnd"
71
- require "torch/nn/adaptive_max_pool1d"
72
- require "torch/nn/adaptive_max_pool2d"
73
- require "torch/nn/adaptive_max_pool3d"
74
- require "torch/nn/adaptive_avg_poolnd"
75
- require "torch/nn/adaptive_avg_pool1d"
76
- require "torch/nn/adaptive_avg_pool2d"
77
- require "torch/nn/adaptive_avg_pool3d"
55
+ require_relative "torch/nn/max_poolnd"
56
+ require_relative "torch/nn/max_pool1d"
57
+ require_relative "torch/nn/max_pool2d"
58
+ require_relative "torch/nn/max_pool3d"
59
+ require_relative "torch/nn/max_unpoolnd"
60
+ require_relative "torch/nn/max_unpool1d"
61
+ require_relative "torch/nn/max_unpool2d"
62
+ require_relative "torch/nn/max_unpool3d"
63
+ require_relative "torch/nn/avg_poolnd"
64
+ require_relative "torch/nn/avg_pool1d"
65
+ require_relative "torch/nn/avg_pool2d"
66
+ require_relative "torch/nn/avg_pool3d"
67
+ require_relative "torch/nn/lp_poolnd"
68
+ require_relative "torch/nn/lp_pool1d"
69
+ require_relative "torch/nn/lp_pool2d"
70
+ require_relative "torch/nn/adaptive_max_poolnd"
71
+ require_relative "torch/nn/adaptive_max_pool1d"
72
+ require_relative "torch/nn/adaptive_max_pool2d"
73
+ require_relative "torch/nn/adaptive_max_pool3d"
74
+ require_relative "torch/nn/adaptive_avg_poolnd"
75
+ require_relative "torch/nn/adaptive_avg_pool1d"
76
+ require_relative "torch/nn/adaptive_avg_pool2d"
77
+ require_relative "torch/nn/adaptive_avg_pool3d"
78
78
 
79
79
  # nn padding layers
80
- require "torch/nn/reflection_padnd"
81
- require "torch/nn/reflection_pad1d"
82
- require "torch/nn/reflection_pad2d"
83
- require "torch/nn/replication_padnd"
84
- require "torch/nn/replication_pad1d"
85
- require "torch/nn/replication_pad2d"
86
- require "torch/nn/replication_pad3d"
87
- require "torch/nn/constant_padnd"
88
- require "torch/nn/constant_pad1d"
89
- require "torch/nn/constant_pad2d"
90
- require "torch/nn/constant_pad3d"
91
- require "torch/nn/zero_pad2d"
80
+ require_relative "torch/nn/reflection_padnd"
81
+ require_relative "torch/nn/reflection_pad1d"
82
+ require_relative "torch/nn/reflection_pad2d"
83
+ require_relative "torch/nn/replication_padnd"
84
+ require_relative "torch/nn/replication_pad1d"
85
+ require_relative "torch/nn/replication_pad2d"
86
+ require_relative "torch/nn/replication_pad3d"
87
+ require_relative "torch/nn/constant_padnd"
88
+ require_relative "torch/nn/constant_pad1d"
89
+ require_relative "torch/nn/constant_pad2d"
90
+ require_relative "torch/nn/constant_pad3d"
91
+ require_relative "torch/nn/zero_pad2d"
92
92
 
93
93
  # nn normalization layers
94
- require "torch/nn/batch_norm"
95
- require "torch/nn/batch_norm1d"
96
- require "torch/nn/batch_norm2d"
97
- require "torch/nn/batch_norm3d"
98
- require "torch/nn/group_norm"
99
- require "torch/nn/instance_norm"
100
- require "torch/nn/instance_norm1d"
101
- require "torch/nn/instance_norm2d"
102
- require "torch/nn/instance_norm3d"
103
- require "torch/nn/layer_norm"
104
- require "torch/nn/local_response_norm"
94
+ require_relative "torch/nn/batch_norm"
95
+ require_relative "torch/nn/batch_norm1d"
96
+ require_relative "torch/nn/batch_norm2d"
97
+ require_relative "torch/nn/batch_norm3d"
98
+ require_relative "torch/nn/group_norm"
99
+ require_relative "torch/nn/instance_norm"
100
+ require_relative "torch/nn/instance_norm1d"
101
+ require_relative "torch/nn/instance_norm2d"
102
+ require_relative "torch/nn/instance_norm3d"
103
+ require_relative "torch/nn/layer_norm"
104
+ require_relative "torch/nn/local_response_norm"
105
105
 
106
106
  # nn recurrent layers
107
- require "torch/nn/rnn_base"
108
- require "torch/nn/rnn"
109
- require "torch/nn/lstm"
110
- require "torch/nn/gru"
107
+ require_relative "torch/nn/rnn_base"
108
+ require_relative "torch/nn/rnn"
109
+ require_relative "torch/nn/lstm"
110
+ require_relative "torch/nn/gru"
111
111
 
112
112
  # nn linear layers
113
- require "torch/nn/bilinear"
114
- require "torch/nn/identity"
115
- require "torch/nn/linear"
113
+ require_relative "torch/nn/bilinear"
114
+ require_relative "torch/nn/identity"
115
+ require_relative "torch/nn/linear"
116
116
 
117
117
  # nn dropout layers
118
- require "torch/nn/dropoutnd"
119
- require "torch/nn/alpha_dropout"
120
- require "torch/nn/dropout"
121
- require "torch/nn/dropout2d"
122
- require "torch/nn/dropout3d"
123
- require "torch/nn/feature_alpha_dropout"
118
+ require_relative "torch/nn/dropoutnd"
119
+ require_relative "torch/nn/alpha_dropout"
120
+ require_relative "torch/nn/dropout"
121
+ require_relative "torch/nn/dropout2d"
122
+ require_relative "torch/nn/dropout3d"
123
+ require_relative "torch/nn/feature_alpha_dropout"
124
124
 
125
125
  # nn activations
126
- require "torch/nn/hardshrink"
127
- require "torch/nn/leaky_relu"
128
- require "torch/nn/log_sigmoid"
129
- require "torch/nn/prelu"
130
- require "torch/nn/relu"
131
- require "torch/nn/sigmoid"
132
- require "torch/nn/softplus"
133
- require "torch/nn/softshrink"
134
- require "torch/nn/softsign"
135
- require "torch/nn/tanh"
136
- require "torch/nn/tanhshrink"
126
+ require_relative "torch/nn/hardshrink"
127
+ require_relative "torch/nn/leaky_relu"
128
+ require_relative "torch/nn/log_sigmoid"
129
+ require_relative "torch/nn/prelu"
130
+ require_relative "torch/nn/relu"
131
+ require_relative "torch/nn/sigmoid"
132
+ require_relative "torch/nn/softplus"
133
+ require_relative "torch/nn/softshrink"
134
+ require_relative "torch/nn/softsign"
135
+ require_relative "torch/nn/tanh"
136
+ require_relative "torch/nn/tanhshrink"
137
137
 
138
138
  # nn activations other
139
- require "torch/nn/log_softmax"
140
- require "torch/nn/softmax"
141
- require "torch/nn/softmax2d"
142
- require "torch/nn/softmin"
139
+ require_relative "torch/nn/log_softmax"
140
+ require_relative "torch/nn/softmax"
141
+ require_relative "torch/nn/softmax2d"
142
+ require_relative "torch/nn/softmin"
143
143
 
144
144
  # nn sparse layers
145
- require "torch/nn/embedding"
146
- require "torch/nn/embedding_bag"
145
+ require_relative "torch/nn/embedding"
146
+ require_relative "torch/nn/embedding_bag"
147
147
 
148
148
  # attention is all you need
149
- require "torch/nn/multihead_attention"
150
- require "torch/nn/transformer"
149
+ require_relative "torch/nn/multihead_attention"
150
+ require_relative "torch/nn/transformer"
151
151
 
152
152
  # nn distance functions
153
- require "torch/nn/cosine_similarity"
154
- require "torch/nn/pairwise_distance"
153
+ require_relative "torch/nn/cosine_similarity"
154
+ require_relative "torch/nn/pairwise_distance"
155
155
 
156
156
  # nn loss functions
157
- require "torch/nn/loss"
158
- require "torch/nn/weighted_loss"
159
- require "torch/nn/bce_loss"
160
- require "torch/nn/bce_with_logits_loss"
161
- require "torch/nn/cosine_embedding_loss"
162
- require "torch/nn/cross_entropy_loss"
163
- require "torch/nn/ctc_loss"
164
- require "torch/nn/hinge_embedding_loss"
165
- require "torch/nn/kl_div_loss"
166
- require "torch/nn/l1_loss"
167
- require "torch/nn/margin_ranking_loss"
168
- require "torch/nn/mse_loss"
169
- require "torch/nn/multi_label_margin_loss"
170
- require "torch/nn/multi_label_soft_margin_loss"
171
- require "torch/nn/multi_margin_loss"
172
- require "torch/nn/nll_loss"
173
- require "torch/nn/poisson_nll_loss"
174
- require "torch/nn/smooth_l1_loss"
175
- require "torch/nn/soft_margin_loss"
176
- require "torch/nn/triplet_margin_loss"
157
+ require_relative "torch/nn/loss"
158
+ require_relative "torch/nn/weighted_loss"
159
+ require_relative "torch/nn/bce_loss"
160
+ require_relative "torch/nn/bce_with_logits_loss"
161
+ require_relative "torch/nn/cosine_embedding_loss"
162
+ require_relative "torch/nn/cross_entropy_loss"
163
+ require_relative "torch/nn/ctc_loss"
164
+ require_relative "torch/nn/hinge_embedding_loss"
165
+ require_relative "torch/nn/kl_div_loss"
166
+ require_relative "torch/nn/l1_loss"
167
+ require_relative "torch/nn/margin_ranking_loss"
168
+ require_relative "torch/nn/mse_loss"
169
+ require_relative "torch/nn/multi_label_margin_loss"
170
+ require_relative "torch/nn/multi_label_soft_margin_loss"
171
+ require_relative "torch/nn/multi_margin_loss"
172
+ require_relative "torch/nn/nll_loss"
173
+ require_relative "torch/nn/poisson_nll_loss"
174
+ require_relative "torch/nn/smooth_l1_loss"
175
+ require_relative "torch/nn/soft_margin_loss"
176
+ require_relative "torch/nn/triplet_margin_loss"
177
177
 
178
178
  # nn vision
179
- require "torch/nn/upsample"
179
+ require_relative "torch/nn/upsample"
180
180
 
181
181
  # nn other
182
- require "torch/nn/functional"
183
- require "torch/nn/functional_attention"
184
- require "torch/nn/init"
182
+ require_relative "torch/nn/functional"
183
+ require_relative "torch/nn/functional_attention"
184
+ require_relative "torch/nn/init"
185
185
 
186
186
  # utils
187
- require "torch/utils/data"
188
- require "torch/utils/data/data_loader"
189
- require "torch/utils/data/dataset"
190
- require "torch/utils/data/iterable_dataset"
191
- require "torch/utils/data/data_pipes/iter_data_pipe"
192
- require "torch/utils/data/data_pipes/filter_iter_data_pipe"
193
- require "torch/utils/data/data_pipes/iter/file_lister"
194
- require "torch/utils/data/data_pipes/iter/file_opener"
195
- require "torch/utils/data/data_pipes/iter/iterable_wrapper"
196
- require "torch/utils/data/data_pipes/iter/stream_wrapper"
197
- require "torch/utils/data/subset"
198
- require "torch/utils/data/tensor_dataset"
187
+ require_relative "torch/utils/data"
188
+ require_relative "torch/utils/data/data_loader"
189
+ require_relative "torch/utils/data/dataset"
190
+ require_relative "torch/utils/data/iterable_dataset"
191
+ require_relative "torch/utils/data/data_pipes/iter_data_pipe"
192
+ require_relative "torch/utils/data/data_pipes/filter_iter_data_pipe"
193
+ require_relative "torch/utils/data/data_pipes/iter/file_lister"
194
+ require_relative "torch/utils/data/data_pipes/iter/file_opener"
195
+ require_relative "torch/utils/data/data_pipes/iter/iterable_wrapper"
196
+ require_relative "torch/utils/data/data_pipes/iter/stream_wrapper"
197
+ require_relative "torch/utils/data/subset"
198
+ require_relative "torch/utils/data/tensor_dataset"
199
199
 
200
200
  # hub
201
- require "torch/hub"
201
+ require_relative "torch/hub"
202
202
 
203
203
  module Torch
204
204
  class Error < StandardError; end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: torch-rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.0
4
+ version: 0.13.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-04-13 00:00:00.000000000 Z
11
+ date: 2023-05-03 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rice
@@ -16,14 +16,14 @@ dependencies:
16
16
  requirements:
17
17
  - - ">="
18
18
  - !ruby/object:Gem::Version
19
- version: 4.0.2
19
+ version: 4.1.0
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - ">="
25
25
  - !ruby/object:Gem::Version
26
- version: 4.0.2
26
+ version: 4.1.0
27
27
  description:
28
28
  email: andrew@ankane.org
29
29
  executables: []