torch-rb 0.12.2 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,10 +8,11 @@ void init_backends(Rice::Module& m) {
8
8
  auto rb_mBackends = Rice::define_module_under(m, "Backends");
9
9
 
10
10
  Rice::define_module_under(rb_mBackends, "OpenMP")
11
- .add_handler<torch::Error>(handle_error)
12
11
  .define_singleton_function("available?", &torch::hasOpenMP);
13
12
 
14
13
  Rice::define_module_under(rb_mBackends, "MKL")
15
- .add_handler<torch::Error>(handle_error)
16
14
  .define_singleton_function("available?", &torch::hasMKL);
15
+
16
+ Rice::define_module_under(rb_mBackends, "MPS")
17
+ .define_singleton_function("available?", &torch::hasMPS);
17
18
  }
data/ext/torch/cuda.cpp CHANGED
@@ -6,7 +6,6 @@
6
6
 
7
7
  void init_cuda(Rice::Module& m) {
8
8
  Rice::define_module_under(m, "CUDA")
9
- .add_handler<torch::Error>(handle_error)
10
9
  .define_singleton_function("available?", &torch::cuda::is_available)
11
10
  .define_singleton_function("device_count", &torch::cuda::device_count)
12
11
  .define_singleton_function("manual_seed", &torch::cuda::manual_seed)
data/ext/torch/device.cpp CHANGED
@@ -6,7 +6,6 @@
6
6
 
7
7
  void init_device(Rice::Module& m) {
8
8
  Rice::define_class_under<torch::Device>(m, "Device")
9
- .add_handler<torch::Error>(handle_error)
10
9
  .define_constructor(Rice::Constructor<torch::Device, const std::string&>())
11
10
  .define_method(
12
11
  "index",
data/ext/torch/fft.cpp CHANGED
@@ -8,6 +8,5 @@
8
8
 
9
9
  void init_fft(Rice::Module& m) {
10
10
  auto rb_mFFT = Rice::define_module_under(m, "FFT");
11
- rb_mFFT.add_handler<torch::Error>(handle_error);
12
11
  add_fft_functions(rb_mFFT);
13
12
  }
@@ -7,7 +7,6 @@
7
7
  void init_generator(Rice::Module& m, Rice::Class& rb_cGenerator) {
8
8
  // https://github.com/pytorch/pytorch/blob/master/torch/csrc/Generator.cpp
9
9
  rb_cGenerator
10
- .add_handler<torch::Error>(handle_error)
11
10
  .define_singleton_function(
12
11
  "new",
13
12
  []() {
data/ext/torch/ivalue.cpp CHANGED
@@ -7,7 +7,6 @@
7
7
  void init_ivalue(Rice::Module& m, Rice::Class& rb_cIValue) {
8
8
  // https://pytorch.org/cppdocs/api/structc10_1_1_i_value.html
9
9
  rb_cIValue
10
- .add_handler<torch::Error>(handle_error)
11
10
  .define_method("bool?", &torch::IValue::isBool)
12
11
  .define_method("bool_list?", &torch::IValue::isBoolList)
13
12
  .define_method("capsule?", &torch::IValue::isCapsule)
data/ext/torch/linalg.cpp CHANGED
@@ -8,6 +8,5 @@
8
8
 
9
9
  void init_linalg(Rice::Module& m) {
10
10
  auto rb_mLinalg = Rice::define_module_under(m, "Linalg");
11
- rb_mLinalg.add_handler<torch::Error>(handle_error);
12
11
  add_linalg_functions(rb_mLinalg);
13
12
  }
data/ext/torch/nn.cpp CHANGED
@@ -14,11 +14,9 @@ class Parameter: public torch::autograd::Variable {
14
14
 
15
15
  void init_nn(Rice::Module& m) {
16
16
  auto rb_mNN = Rice::define_module_under(m, "NN");
17
- rb_mNN.add_handler<torch::Error>(handle_error);
18
17
  add_nn_functions(rb_mNN);
19
18
 
20
19
  Rice::define_module_under(rb_mNN, "Init")
21
- .add_handler<torch::Error>(handle_error)
22
20
  .define_singleton_function(
23
21
  "_calculate_gain",
24
22
  [](NonlinearityType nonlinearity, double param) {
@@ -91,7 +89,6 @@ void init_nn(Rice::Module& m) {
91
89
  });
92
90
 
93
91
  Rice::define_class_under<Parameter, torch::Tensor>(rb_mNN, "Parameter")
94
- .add_handler<torch::Error>(handle_error)
95
92
  .define_method(
96
93
  "grad",
97
94
  [](Parameter& self) {
data/ext/torch/random.cpp CHANGED
@@ -6,7 +6,6 @@
6
6
 
7
7
  void init_random(Rice::Module& m) {
8
8
  Rice::define_module_under(m, "Random")
9
- .add_handler<torch::Error>(handle_error)
10
9
  .define_singleton_function(
11
10
  "initial_seed",
12
11
  []() {
@@ -8,6 +8,5 @@
8
8
 
9
9
  void init_special(Rice::Module& m) {
10
10
  auto rb_mSpecial = Rice::define_module_under(m, "Special");
11
- rb_mSpecial.add_handler<torch::Error>(handle_error);
12
11
  add_special_functions(rb_mSpecial);
13
12
  }
data/ext/torch/tensor.cpp CHANGED
@@ -35,17 +35,17 @@ std::vector<TensorIndex> index_vector(Array a) {
35
35
  if (obj.is_instance_of(rb_cInteger)) {
36
36
  indices.push_back(Rice::detail::From_Ruby<int64_t>().convert(obj.value()));
37
37
  } else if (obj.is_instance_of(rb_cRange)) {
38
- torch::optional<int64_t> start_index = torch::nullopt;
39
- torch::optional<int64_t> stop_index = torch::nullopt;
38
+ torch::optional<c10::SymInt> start_index = torch::nullopt;
39
+ torch::optional<c10::SymInt> stop_index = torch::nullopt;
40
40
 
41
41
  Object begin = obj.call("begin");
42
42
  if (!begin.is_nil()) {
43
- start_index = Rice::detail::From_Ruby<int64_t>().convert(begin.value());
43
+ start_index = c10::SymInt(Rice::detail::From_Ruby<int64_t>().convert(begin.value()));
44
44
  }
45
45
 
46
46
  Object end = obj.call("end");
47
47
  if (!end.is_nil()) {
48
- stop_index = Rice::detail::From_Ruby<int64_t>().convert(end.value());
48
+ stop_index = c10::SymInt(Rice::detail::From_Ruby<int64_t>().convert(end.value()));
49
49
  }
50
50
 
51
51
  Object exclude_end = obj.call("exclude_end?");
@@ -96,7 +96,6 @@ static VALUE tensor__backward(int argc, VALUE* argv, VALUE self_)
96
96
 
97
97
  void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions) {
98
98
  rb_cTensor = c;
99
- rb_cTensor.add_handler<torch::Error>(handle_error);
100
99
  add_tensor_functions(rb_cTensor);
101
100
  THPVariableClass = rb_cTensor.value();
102
101
 
@@ -286,7 +285,6 @@ void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions
286
285
  });
287
286
 
288
287
  rb_cTensorOptions
289
- .add_handler<torch::Error>(handle_error)
290
288
  .define_method(
291
289
  "dtype",
292
290
  [](torch::TensorOptions& self, int dtype) {
data/ext/torch/torch.cpp CHANGED
@@ -24,7 +24,7 @@ torch::Tensor make_tensor(Rice::Array a, std::vector<int64_t> size, const torch:
24
24
  }
25
25
 
26
26
  void init_torch(Rice::Module& m) {
27
- m.add_handler<torch::Error>(handle_error);
27
+ register_handler<torch::Error>(handle_global_error);
28
28
  add_torch_functions(m);
29
29
  m.define_singleton_function(
30
30
  "grad_enabled?",
data/ext/torch/utils.h CHANGED
@@ -6,12 +6,11 @@
6
6
  #include <rice/stl.hpp>
7
7
 
8
8
  static_assert(
9
- TORCH_VERSION_MAJOR == 1 && TORCH_VERSION_MINOR == 13,
9
+ TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR == 0,
10
10
  "Incompatible LibTorch version"
11
11
  );
12
12
 
13
- // TODO find better place
14
- inline void handle_error(torch::Error const & ex) {
13
+ inline void handle_global_error(const torch::Error& ex) {
15
14
  throw Rice::Exception(rb_eRuntimeError, ex.what_without_backtrace());
16
15
  }
17
16
 
data/lib/torch/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Torch
2
- VERSION = "0.12.2"
2
+ VERSION = "0.13.1"
3
3
  end
data/lib/torch-rb.rb CHANGED
@@ -1 +1 @@
1
- require "torch"
1
+ require_relative "torch"
data/lib/torch.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # ext
2
- require "torch/ext"
2
+ require_relative "torch/ext"
3
3
 
4
4
  # stdlib
5
5
  require "fileutils"
@@ -8,197 +8,197 @@ require "set"
8
8
  require "tmpdir"
9
9
 
10
10
  # modules
11
- require "torch/inspector"
12
- require "torch/tensor"
13
- require "torch/version"
11
+ require_relative "torch/inspector"
12
+ require_relative "torch/tensor"
13
+ require_relative "torch/version"
14
14
 
15
15
  # optim
16
- require "torch/optim/optimizer"
17
- require "torch/optim/adadelta"
18
- require "torch/optim/adagrad"
19
- require "torch/optim/adam"
20
- require "torch/optim/adamax"
21
- require "torch/optim/adamw"
22
- require "torch/optim/asgd"
23
- require "torch/optim/rmsprop"
24
- require "torch/optim/rprop"
25
- require "torch/optim/sgd"
16
+ require_relative "torch/optim/optimizer"
17
+ require_relative "torch/optim/adadelta"
18
+ require_relative "torch/optim/adagrad"
19
+ require_relative "torch/optim/adam"
20
+ require_relative "torch/optim/adamax"
21
+ require_relative "torch/optim/adamw"
22
+ require_relative "torch/optim/asgd"
23
+ require_relative "torch/optim/rmsprop"
24
+ require_relative "torch/optim/rprop"
25
+ require_relative "torch/optim/sgd"
26
26
 
27
27
  # optim lr_scheduler
28
- require "torch/optim/lr_scheduler/lr_scheduler"
29
- require "torch/optim/lr_scheduler/lambda_lr"
30
- require "torch/optim/lr_scheduler/multiplicative_lr"
31
- require "torch/optim/lr_scheduler/step_lr"
32
- require "torch/optim/lr_scheduler/multi_step_lr"
33
- require "torch/optim/lr_scheduler/exponential_lr"
34
- require "torch/optim/lr_scheduler/cosine_annealing_lr"
28
+ require_relative "torch/optim/lr_scheduler/lr_scheduler"
29
+ require_relative "torch/optim/lr_scheduler/lambda_lr"
30
+ require_relative "torch/optim/lr_scheduler/multiplicative_lr"
31
+ require_relative "torch/optim/lr_scheduler/step_lr"
32
+ require_relative "torch/optim/lr_scheduler/multi_step_lr"
33
+ require_relative "torch/optim/lr_scheduler/exponential_lr"
34
+ require_relative "torch/optim/lr_scheduler/cosine_annealing_lr"
35
35
 
36
36
  # nn parameters
37
- require "torch/nn/parameter"
38
- require "torch/nn/utils"
37
+ require_relative "torch/nn/parameter"
38
+ require_relative "torch/nn/utils"
39
39
 
40
40
  # nn containers
41
- require "torch/nn/module"
42
- require "torch/nn/module_list"
43
- require "torch/nn/parameter_list"
44
- require "torch/nn/sequential"
41
+ require_relative "torch/nn/module"
42
+ require_relative "torch/nn/module_list"
43
+ require_relative "torch/nn/parameter_list"
44
+ require_relative "torch/nn/sequential"
45
45
 
46
46
  # nn convolution layers
47
- require "torch/nn/convnd"
48
- require "torch/nn/conv1d"
49
- require "torch/nn/conv2d"
50
- require "torch/nn/conv3d"
51
- require "torch/nn/unfold"
52
- require "torch/nn/fold"
47
+ require_relative "torch/nn/convnd"
48
+ require_relative "torch/nn/conv1d"
49
+ require_relative "torch/nn/conv2d"
50
+ require_relative "torch/nn/conv3d"
51
+ require_relative "torch/nn/unfold"
52
+ require_relative "torch/nn/fold"
53
53
 
54
54
  # nn pooling layers
55
- require "torch/nn/max_poolnd"
56
- require "torch/nn/max_pool1d"
57
- require "torch/nn/max_pool2d"
58
- require "torch/nn/max_pool3d"
59
- require "torch/nn/max_unpoolnd"
60
- require "torch/nn/max_unpool1d"
61
- require "torch/nn/max_unpool2d"
62
- require "torch/nn/max_unpool3d"
63
- require "torch/nn/avg_poolnd"
64
- require "torch/nn/avg_pool1d"
65
- require "torch/nn/avg_pool2d"
66
- require "torch/nn/avg_pool3d"
67
- require "torch/nn/lp_poolnd"
68
- require "torch/nn/lp_pool1d"
69
- require "torch/nn/lp_pool2d"
70
- require "torch/nn/adaptive_max_poolnd"
71
- require "torch/nn/adaptive_max_pool1d"
72
- require "torch/nn/adaptive_max_pool2d"
73
- require "torch/nn/adaptive_max_pool3d"
74
- require "torch/nn/adaptive_avg_poolnd"
75
- require "torch/nn/adaptive_avg_pool1d"
76
- require "torch/nn/adaptive_avg_pool2d"
77
- require "torch/nn/adaptive_avg_pool3d"
55
+ require_relative "torch/nn/max_poolnd"
56
+ require_relative "torch/nn/max_pool1d"
57
+ require_relative "torch/nn/max_pool2d"
58
+ require_relative "torch/nn/max_pool3d"
59
+ require_relative "torch/nn/max_unpoolnd"
60
+ require_relative "torch/nn/max_unpool1d"
61
+ require_relative "torch/nn/max_unpool2d"
62
+ require_relative "torch/nn/max_unpool3d"
63
+ require_relative "torch/nn/avg_poolnd"
64
+ require_relative "torch/nn/avg_pool1d"
65
+ require_relative "torch/nn/avg_pool2d"
66
+ require_relative "torch/nn/avg_pool3d"
67
+ require_relative "torch/nn/lp_poolnd"
68
+ require_relative "torch/nn/lp_pool1d"
69
+ require_relative "torch/nn/lp_pool2d"
70
+ require_relative "torch/nn/adaptive_max_poolnd"
71
+ require_relative "torch/nn/adaptive_max_pool1d"
72
+ require_relative "torch/nn/adaptive_max_pool2d"
73
+ require_relative "torch/nn/adaptive_max_pool3d"
74
+ require_relative "torch/nn/adaptive_avg_poolnd"
75
+ require_relative "torch/nn/adaptive_avg_pool1d"
76
+ require_relative "torch/nn/adaptive_avg_pool2d"
77
+ require_relative "torch/nn/adaptive_avg_pool3d"
78
78
 
79
79
  # nn padding layers
80
- require "torch/nn/reflection_padnd"
81
- require "torch/nn/reflection_pad1d"
82
- require "torch/nn/reflection_pad2d"
83
- require "torch/nn/replication_padnd"
84
- require "torch/nn/replication_pad1d"
85
- require "torch/nn/replication_pad2d"
86
- require "torch/nn/replication_pad3d"
87
- require "torch/nn/constant_padnd"
88
- require "torch/nn/constant_pad1d"
89
- require "torch/nn/constant_pad2d"
90
- require "torch/nn/constant_pad3d"
91
- require "torch/nn/zero_pad2d"
80
+ require_relative "torch/nn/reflection_padnd"
81
+ require_relative "torch/nn/reflection_pad1d"
82
+ require_relative "torch/nn/reflection_pad2d"
83
+ require_relative "torch/nn/replication_padnd"
84
+ require_relative "torch/nn/replication_pad1d"
85
+ require_relative "torch/nn/replication_pad2d"
86
+ require_relative "torch/nn/replication_pad3d"
87
+ require_relative "torch/nn/constant_padnd"
88
+ require_relative "torch/nn/constant_pad1d"
89
+ require_relative "torch/nn/constant_pad2d"
90
+ require_relative "torch/nn/constant_pad3d"
91
+ require_relative "torch/nn/zero_pad2d"
92
92
 
93
93
  # nn normalization layers
94
- require "torch/nn/batch_norm"
95
- require "torch/nn/batch_norm1d"
96
- require "torch/nn/batch_norm2d"
97
- require "torch/nn/batch_norm3d"
98
- require "torch/nn/group_norm"
99
- require "torch/nn/instance_norm"
100
- require "torch/nn/instance_norm1d"
101
- require "torch/nn/instance_norm2d"
102
- require "torch/nn/instance_norm3d"
103
- require "torch/nn/layer_norm"
104
- require "torch/nn/local_response_norm"
94
+ require_relative "torch/nn/batch_norm"
95
+ require_relative "torch/nn/batch_norm1d"
96
+ require_relative "torch/nn/batch_norm2d"
97
+ require_relative "torch/nn/batch_norm3d"
98
+ require_relative "torch/nn/group_norm"
99
+ require_relative "torch/nn/instance_norm"
100
+ require_relative "torch/nn/instance_norm1d"
101
+ require_relative "torch/nn/instance_norm2d"
102
+ require_relative "torch/nn/instance_norm3d"
103
+ require_relative "torch/nn/layer_norm"
104
+ require_relative "torch/nn/local_response_norm"
105
105
 
106
106
  # nn recurrent layers
107
- require "torch/nn/rnn_base"
108
- require "torch/nn/rnn"
109
- require "torch/nn/lstm"
110
- require "torch/nn/gru"
107
+ require_relative "torch/nn/rnn_base"
108
+ require_relative "torch/nn/rnn"
109
+ require_relative "torch/nn/lstm"
110
+ require_relative "torch/nn/gru"
111
111
 
112
112
  # nn linear layers
113
- require "torch/nn/bilinear"
114
- require "torch/nn/identity"
115
- require "torch/nn/linear"
113
+ require_relative "torch/nn/bilinear"
114
+ require_relative "torch/nn/identity"
115
+ require_relative "torch/nn/linear"
116
116
 
117
117
  # nn dropout layers
118
- require "torch/nn/dropoutnd"
119
- require "torch/nn/alpha_dropout"
120
- require "torch/nn/dropout"
121
- require "torch/nn/dropout2d"
122
- require "torch/nn/dropout3d"
123
- require "torch/nn/feature_alpha_dropout"
118
+ require_relative "torch/nn/dropoutnd"
119
+ require_relative "torch/nn/alpha_dropout"
120
+ require_relative "torch/nn/dropout"
121
+ require_relative "torch/nn/dropout2d"
122
+ require_relative "torch/nn/dropout3d"
123
+ require_relative "torch/nn/feature_alpha_dropout"
124
124
 
125
125
  # nn activations
126
- require "torch/nn/hardshrink"
127
- require "torch/nn/leaky_relu"
128
- require "torch/nn/log_sigmoid"
129
- require "torch/nn/prelu"
130
- require "torch/nn/relu"
131
- require "torch/nn/sigmoid"
132
- require "torch/nn/softplus"
133
- require "torch/nn/softshrink"
134
- require "torch/nn/softsign"
135
- require "torch/nn/tanh"
136
- require "torch/nn/tanhshrink"
126
+ require_relative "torch/nn/hardshrink"
127
+ require_relative "torch/nn/leaky_relu"
128
+ require_relative "torch/nn/log_sigmoid"
129
+ require_relative "torch/nn/prelu"
130
+ require_relative "torch/nn/relu"
131
+ require_relative "torch/nn/sigmoid"
132
+ require_relative "torch/nn/softplus"
133
+ require_relative "torch/nn/softshrink"
134
+ require_relative "torch/nn/softsign"
135
+ require_relative "torch/nn/tanh"
136
+ require_relative "torch/nn/tanhshrink"
137
137
 
138
138
  # nn activations other
139
- require "torch/nn/log_softmax"
140
- require "torch/nn/softmax"
141
- require "torch/nn/softmax2d"
142
- require "torch/nn/softmin"
139
+ require_relative "torch/nn/log_softmax"
140
+ require_relative "torch/nn/softmax"
141
+ require_relative "torch/nn/softmax2d"
142
+ require_relative "torch/nn/softmin"
143
143
 
144
144
  # nn sparse layers
145
- require "torch/nn/embedding"
146
- require "torch/nn/embedding_bag"
145
+ require_relative "torch/nn/embedding"
146
+ require_relative "torch/nn/embedding_bag"
147
147
 
148
148
  # attention is all you need
149
- require "torch/nn/multihead_attention"
150
- require "torch/nn/transformer"
149
+ require_relative "torch/nn/multihead_attention"
150
+ require_relative "torch/nn/transformer"
151
151
 
152
152
  # nn distance functions
153
- require "torch/nn/cosine_similarity"
154
- require "torch/nn/pairwise_distance"
153
+ require_relative "torch/nn/cosine_similarity"
154
+ require_relative "torch/nn/pairwise_distance"
155
155
 
156
156
  # nn loss functions
157
- require "torch/nn/loss"
158
- require "torch/nn/weighted_loss"
159
- require "torch/nn/bce_loss"
160
- require "torch/nn/bce_with_logits_loss"
161
- require "torch/nn/cosine_embedding_loss"
162
- require "torch/nn/cross_entropy_loss"
163
- require "torch/nn/ctc_loss"
164
- require "torch/nn/hinge_embedding_loss"
165
- require "torch/nn/kl_div_loss"
166
- require "torch/nn/l1_loss"
167
- require "torch/nn/margin_ranking_loss"
168
- require "torch/nn/mse_loss"
169
- require "torch/nn/multi_label_margin_loss"
170
- require "torch/nn/multi_label_soft_margin_loss"
171
- require "torch/nn/multi_margin_loss"
172
- require "torch/nn/nll_loss"
173
- require "torch/nn/poisson_nll_loss"
174
- require "torch/nn/smooth_l1_loss"
175
- require "torch/nn/soft_margin_loss"
176
- require "torch/nn/triplet_margin_loss"
157
+ require_relative "torch/nn/loss"
158
+ require_relative "torch/nn/weighted_loss"
159
+ require_relative "torch/nn/bce_loss"
160
+ require_relative "torch/nn/bce_with_logits_loss"
161
+ require_relative "torch/nn/cosine_embedding_loss"
162
+ require_relative "torch/nn/cross_entropy_loss"
163
+ require_relative "torch/nn/ctc_loss"
164
+ require_relative "torch/nn/hinge_embedding_loss"
165
+ require_relative "torch/nn/kl_div_loss"
166
+ require_relative "torch/nn/l1_loss"
167
+ require_relative "torch/nn/margin_ranking_loss"
168
+ require_relative "torch/nn/mse_loss"
169
+ require_relative "torch/nn/multi_label_margin_loss"
170
+ require_relative "torch/nn/multi_label_soft_margin_loss"
171
+ require_relative "torch/nn/multi_margin_loss"
172
+ require_relative "torch/nn/nll_loss"
173
+ require_relative "torch/nn/poisson_nll_loss"
174
+ require_relative "torch/nn/smooth_l1_loss"
175
+ require_relative "torch/nn/soft_margin_loss"
176
+ require_relative "torch/nn/triplet_margin_loss"
177
177
 
178
178
  # nn vision
179
- require "torch/nn/upsample"
179
+ require_relative "torch/nn/upsample"
180
180
 
181
181
  # nn other
182
- require "torch/nn/functional"
183
- require "torch/nn/functional_attention"
184
- require "torch/nn/init"
182
+ require_relative "torch/nn/functional"
183
+ require_relative "torch/nn/functional_attention"
184
+ require_relative "torch/nn/init"
185
185
 
186
186
  # utils
187
- require "torch/utils/data"
188
- require "torch/utils/data/data_loader"
189
- require "torch/utils/data/dataset"
190
- require "torch/utils/data/iterable_dataset"
191
- require "torch/utils/data/data_pipes/iter_data_pipe"
192
- require "torch/utils/data/data_pipes/filter_iter_data_pipe"
193
- require "torch/utils/data/data_pipes/iter/file_lister"
194
- require "torch/utils/data/data_pipes/iter/file_opener"
195
- require "torch/utils/data/data_pipes/iter/iterable_wrapper"
196
- require "torch/utils/data/data_pipes/iter/stream_wrapper"
197
- require "torch/utils/data/subset"
198
- require "torch/utils/data/tensor_dataset"
187
+ require_relative "torch/utils/data"
188
+ require_relative "torch/utils/data/data_loader"
189
+ require_relative "torch/utils/data/dataset"
190
+ require_relative "torch/utils/data/iterable_dataset"
191
+ require_relative "torch/utils/data/data_pipes/iter_data_pipe"
192
+ require_relative "torch/utils/data/data_pipes/filter_iter_data_pipe"
193
+ require_relative "torch/utils/data/data_pipes/iter/file_lister"
194
+ require_relative "torch/utils/data/data_pipes/iter/file_opener"
195
+ require_relative "torch/utils/data/data_pipes/iter/iterable_wrapper"
196
+ require_relative "torch/utils/data/data_pipes/iter/stream_wrapper"
197
+ require_relative "torch/utils/data/subset"
198
+ require_relative "torch/utils/data/tensor_dataset"
199
199
 
200
200
  # hub
201
- require "torch/hub"
201
+ require_relative "torch/hub"
202
202
 
203
203
  module Torch
204
204
  class Error < StandardError; end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: torch-rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.12.2
4
+ version: 0.13.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-01-31 00:00:00.000000000 Z
11
+ date: 2023-05-03 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rice
@@ -16,14 +16,14 @@ dependencies:
16
16
  requirements:
17
17
  - - ">="
18
18
  - !ruby/object:Gem::Version
19
- version: 4.0.2
19
+ version: 4.1.0
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - ">="
25
25
  - !ruby/object:Gem::Version
26
- version: 4.0.2
26
+ version: 4.1.0
27
27
  description:
28
28
  email: andrew@ankane.org
29
29
  executables: []
@@ -230,14 +230,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
230
230
  requirements:
231
231
  - - ">="
232
232
  - !ruby/object:Gem::Version
233
- version: '2.7'
233
+ version: '3'
234
234
  required_rubygems_version: !ruby/object:Gem::Requirement
235
235
  requirements:
236
236
  - - ">="
237
237
  - !ruby/object:Gem::Version
238
238
  version: '0'
239
239
  requirements: []
240
- rubygems_version: 3.4.1
240
+ rubygems_version: 3.4.10
241
241
  signing_key:
242
242
  specification_version: 4
243
243
  summary: Deep learning for Ruby, powered by LibTorch