torch-rb 0.12.2 → 0.13.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +9 -0
- data/README.md +27 -12
- data/codegen/generate_functions.rb +5 -6
- data/codegen/native_functions.yaml +1392 -593
- data/ext/torch/backends.cpp +3 -2
- data/ext/torch/cuda.cpp +0 -1
- data/ext/torch/device.cpp +0 -1
- data/ext/torch/fft.cpp +0 -1
- data/ext/torch/generator.cpp +0 -1
- data/ext/torch/ivalue.cpp +0 -1
- data/ext/torch/linalg.cpp +0 -1
- data/ext/torch/nn.cpp +0 -3
- data/ext/torch/random.cpp +0 -1
- data/ext/torch/special.cpp +0 -1
- data/ext/torch/tensor.cpp +4 -6
- data/ext/torch/torch.cpp +1 -1
- data/ext/torch/utils.h +2 -3
- data/lib/torch/version.rb +1 -1
- data/lib/torch-rb.rb +1 -1
- data/lib/torch.rb +150 -150
- metadata +6 -6
data/ext/torch/backends.cpp
CHANGED
@@ -8,10 +8,11 @@ void init_backends(Rice::Module& m) {
|
|
8
8
|
auto rb_mBackends = Rice::define_module_under(m, "Backends");
|
9
9
|
|
10
10
|
Rice::define_module_under(rb_mBackends, "OpenMP")
|
11
|
-
.add_handler<torch::Error>(handle_error)
|
12
11
|
.define_singleton_function("available?", &torch::hasOpenMP);
|
13
12
|
|
14
13
|
Rice::define_module_under(rb_mBackends, "MKL")
|
15
|
-
.add_handler<torch::Error>(handle_error)
|
16
14
|
.define_singleton_function("available?", &torch::hasMKL);
|
15
|
+
|
16
|
+
Rice::define_module_under(rb_mBackends, "MPS")
|
17
|
+
.define_singleton_function("available?", &torch::hasMPS);
|
17
18
|
}
|
data/ext/torch/cuda.cpp
CHANGED
@@ -6,7 +6,6 @@
|
|
6
6
|
|
7
7
|
void init_cuda(Rice::Module& m) {
|
8
8
|
Rice::define_module_under(m, "CUDA")
|
9
|
-
.add_handler<torch::Error>(handle_error)
|
10
9
|
.define_singleton_function("available?", &torch::cuda::is_available)
|
11
10
|
.define_singleton_function("device_count", &torch::cuda::device_count)
|
12
11
|
.define_singleton_function("manual_seed", &torch::cuda::manual_seed)
|
data/ext/torch/device.cpp
CHANGED
data/ext/torch/fft.cpp
CHANGED
data/ext/torch/generator.cpp
CHANGED
data/ext/torch/ivalue.cpp
CHANGED
@@ -7,7 +7,6 @@
|
|
7
7
|
void init_ivalue(Rice::Module& m, Rice::Class& rb_cIValue) {
|
8
8
|
// https://pytorch.org/cppdocs/api/structc10_1_1_i_value.html
|
9
9
|
rb_cIValue
|
10
|
-
.add_handler<torch::Error>(handle_error)
|
11
10
|
.define_method("bool?", &torch::IValue::isBool)
|
12
11
|
.define_method("bool_list?", &torch::IValue::isBoolList)
|
13
12
|
.define_method("capsule?", &torch::IValue::isCapsule)
|
data/ext/torch/linalg.cpp
CHANGED
data/ext/torch/nn.cpp
CHANGED
@@ -14,11 +14,9 @@ class Parameter: public torch::autograd::Variable {
|
|
14
14
|
|
15
15
|
void init_nn(Rice::Module& m) {
|
16
16
|
auto rb_mNN = Rice::define_module_under(m, "NN");
|
17
|
-
rb_mNN.add_handler<torch::Error>(handle_error);
|
18
17
|
add_nn_functions(rb_mNN);
|
19
18
|
|
20
19
|
Rice::define_module_under(rb_mNN, "Init")
|
21
|
-
.add_handler<torch::Error>(handle_error)
|
22
20
|
.define_singleton_function(
|
23
21
|
"_calculate_gain",
|
24
22
|
[](NonlinearityType nonlinearity, double param) {
|
@@ -91,7 +89,6 @@ void init_nn(Rice::Module& m) {
|
|
91
89
|
});
|
92
90
|
|
93
91
|
Rice::define_class_under<Parameter, torch::Tensor>(rb_mNN, "Parameter")
|
94
|
-
.add_handler<torch::Error>(handle_error)
|
95
92
|
.define_method(
|
96
93
|
"grad",
|
97
94
|
[](Parameter& self) {
|
data/ext/torch/random.cpp
CHANGED
data/ext/torch/special.cpp
CHANGED
data/ext/torch/tensor.cpp
CHANGED
@@ -35,17 +35,17 @@ std::vector<TensorIndex> index_vector(Array a) {
|
|
35
35
|
if (obj.is_instance_of(rb_cInteger)) {
|
36
36
|
indices.push_back(Rice::detail::From_Ruby<int64_t>().convert(obj.value()));
|
37
37
|
} else if (obj.is_instance_of(rb_cRange)) {
|
38
|
-
torch::optional<
|
39
|
-
torch::optional<
|
38
|
+
torch::optional<c10::SymInt> start_index = torch::nullopt;
|
39
|
+
torch::optional<c10::SymInt> stop_index = torch::nullopt;
|
40
40
|
|
41
41
|
Object begin = obj.call("begin");
|
42
42
|
if (!begin.is_nil()) {
|
43
|
-
start_index = Rice::detail::From_Ruby<int64_t>().convert(begin.value());
|
43
|
+
start_index = c10::SymInt(Rice::detail::From_Ruby<int64_t>().convert(begin.value()));
|
44
44
|
}
|
45
45
|
|
46
46
|
Object end = obj.call("end");
|
47
47
|
if (!end.is_nil()) {
|
48
|
-
stop_index = Rice::detail::From_Ruby<int64_t>().convert(end.value());
|
48
|
+
stop_index = c10::SymInt(Rice::detail::From_Ruby<int64_t>().convert(end.value()));
|
49
49
|
}
|
50
50
|
|
51
51
|
Object exclude_end = obj.call("exclude_end?");
|
@@ -96,7 +96,6 @@ static VALUE tensor__backward(int argc, VALUE* argv, VALUE self_)
|
|
96
96
|
|
97
97
|
void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions) {
|
98
98
|
rb_cTensor = c;
|
99
|
-
rb_cTensor.add_handler<torch::Error>(handle_error);
|
100
99
|
add_tensor_functions(rb_cTensor);
|
101
100
|
THPVariableClass = rb_cTensor.value();
|
102
101
|
|
@@ -286,7 +285,6 @@ void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions
|
|
286
285
|
});
|
287
286
|
|
288
287
|
rb_cTensorOptions
|
289
|
-
.add_handler<torch::Error>(handle_error)
|
290
288
|
.define_method(
|
291
289
|
"dtype",
|
292
290
|
[](torch::TensorOptions& self, int dtype) {
|
data/ext/torch/torch.cpp
CHANGED
@@ -24,7 +24,7 @@ torch::Tensor make_tensor(Rice::Array a, std::vector<int64_t> size, const torch:
|
|
24
24
|
}
|
25
25
|
|
26
26
|
void init_torch(Rice::Module& m) {
|
27
|
-
|
27
|
+
register_handler<torch::Error>(handle_global_error);
|
28
28
|
add_torch_functions(m);
|
29
29
|
m.define_singleton_function(
|
30
30
|
"grad_enabled?",
|
data/ext/torch/utils.h
CHANGED
@@ -6,12 +6,11 @@
|
|
6
6
|
#include <rice/stl.hpp>
|
7
7
|
|
8
8
|
static_assert(
|
9
|
-
TORCH_VERSION_MAJOR ==
|
9
|
+
TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR == 0,
|
10
10
|
"Incompatible LibTorch version"
|
11
11
|
);
|
12
12
|
|
13
|
-
|
14
|
-
inline void handle_error(torch::Error const & ex) {
|
13
|
+
inline void handle_global_error(const torch::Error& ex) {
|
15
14
|
throw Rice::Exception(rb_eRuntimeError, ex.what_without_backtrace());
|
16
15
|
}
|
17
16
|
|
data/lib/torch/version.rb
CHANGED
data/lib/torch-rb.rb
CHANGED
@@ -1 +1 @@
|
|
1
|
-
|
1
|
+
require_relative "torch"
|
data/lib/torch.rb
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
# ext
|
2
|
-
|
2
|
+
require_relative "torch/ext"
|
3
3
|
|
4
4
|
# stdlib
|
5
5
|
require "fileutils"
|
@@ -8,197 +8,197 @@ require "set"
|
|
8
8
|
require "tmpdir"
|
9
9
|
|
10
10
|
# modules
|
11
|
-
|
12
|
-
|
13
|
-
|
11
|
+
require_relative "torch/inspector"
|
12
|
+
require_relative "torch/tensor"
|
13
|
+
require_relative "torch/version"
|
14
14
|
|
15
15
|
# optim
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
16
|
+
require_relative "torch/optim/optimizer"
|
17
|
+
require_relative "torch/optim/adadelta"
|
18
|
+
require_relative "torch/optim/adagrad"
|
19
|
+
require_relative "torch/optim/adam"
|
20
|
+
require_relative "torch/optim/adamax"
|
21
|
+
require_relative "torch/optim/adamw"
|
22
|
+
require_relative "torch/optim/asgd"
|
23
|
+
require_relative "torch/optim/rmsprop"
|
24
|
+
require_relative "torch/optim/rprop"
|
25
|
+
require_relative "torch/optim/sgd"
|
26
26
|
|
27
27
|
# optim lr_scheduler
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
28
|
+
require_relative "torch/optim/lr_scheduler/lr_scheduler"
|
29
|
+
require_relative "torch/optim/lr_scheduler/lambda_lr"
|
30
|
+
require_relative "torch/optim/lr_scheduler/multiplicative_lr"
|
31
|
+
require_relative "torch/optim/lr_scheduler/step_lr"
|
32
|
+
require_relative "torch/optim/lr_scheduler/multi_step_lr"
|
33
|
+
require_relative "torch/optim/lr_scheduler/exponential_lr"
|
34
|
+
require_relative "torch/optim/lr_scheduler/cosine_annealing_lr"
|
35
35
|
|
36
36
|
# nn parameters
|
37
|
-
|
38
|
-
|
37
|
+
require_relative "torch/nn/parameter"
|
38
|
+
require_relative "torch/nn/utils"
|
39
39
|
|
40
40
|
# nn containers
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
41
|
+
require_relative "torch/nn/module"
|
42
|
+
require_relative "torch/nn/module_list"
|
43
|
+
require_relative "torch/nn/parameter_list"
|
44
|
+
require_relative "torch/nn/sequential"
|
45
45
|
|
46
46
|
# nn convolution layers
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
47
|
+
require_relative "torch/nn/convnd"
|
48
|
+
require_relative "torch/nn/conv1d"
|
49
|
+
require_relative "torch/nn/conv2d"
|
50
|
+
require_relative "torch/nn/conv3d"
|
51
|
+
require_relative "torch/nn/unfold"
|
52
|
+
require_relative "torch/nn/fold"
|
53
53
|
|
54
54
|
# nn pooling layers
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
55
|
+
require_relative "torch/nn/max_poolnd"
|
56
|
+
require_relative "torch/nn/max_pool1d"
|
57
|
+
require_relative "torch/nn/max_pool2d"
|
58
|
+
require_relative "torch/nn/max_pool3d"
|
59
|
+
require_relative "torch/nn/max_unpoolnd"
|
60
|
+
require_relative "torch/nn/max_unpool1d"
|
61
|
+
require_relative "torch/nn/max_unpool2d"
|
62
|
+
require_relative "torch/nn/max_unpool3d"
|
63
|
+
require_relative "torch/nn/avg_poolnd"
|
64
|
+
require_relative "torch/nn/avg_pool1d"
|
65
|
+
require_relative "torch/nn/avg_pool2d"
|
66
|
+
require_relative "torch/nn/avg_pool3d"
|
67
|
+
require_relative "torch/nn/lp_poolnd"
|
68
|
+
require_relative "torch/nn/lp_pool1d"
|
69
|
+
require_relative "torch/nn/lp_pool2d"
|
70
|
+
require_relative "torch/nn/adaptive_max_poolnd"
|
71
|
+
require_relative "torch/nn/adaptive_max_pool1d"
|
72
|
+
require_relative "torch/nn/adaptive_max_pool2d"
|
73
|
+
require_relative "torch/nn/adaptive_max_pool3d"
|
74
|
+
require_relative "torch/nn/adaptive_avg_poolnd"
|
75
|
+
require_relative "torch/nn/adaptive_avg_pool1d"
|
76
|
+
require_relative "torch/nn/adaptive_avg_pool2d"
|
77
|
+
require_relative "torch/nn/adaptive_avg_pool3d"
|
78
78
|
|
79
79
|
# nn padding layers
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
80
|
+
require_relative "torch/nn/reflection_padnd"
|
81
|
+
require_relative "torch/nn/reflection_pad1d"
|
82
|
+
require_relative "torch/nn/reflection_pad2d"
|
83
|
+
require_relative "torch/nn/replication_padnd"
|
84
|
+
require_relative "torch/nn/replication_pad1d"
|
85
|
+
require_relative "torch/nn/replication_pad2d"
|
86
|
+
require_relative "torch/nn/replication_pad3d"
|
87
|
+
require_relative "torch/nn/constant_padnd"
|
88
|
+
require_relative "torch/nn/constant_pad1d"
|
89
|
+
require_relative "torch/nn/constant_pad2d"
|
90
|
+
require_relative "torch/nn/constant_pad3d"
|
91
|
+
require_relative "torch/nn/zero_pad2d"
|
92
92
|
|
93
93
|
# nn normalization layers
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
94
|
+
require_relative "torch/nn/batch_norm"
|
95
|
+
require_relative "torch/nn/batch_norm1d"
|
96
|
+
require_relative "torch/nn/batch_norm2d"
|
97
|
+
require_relative "torch/nn/batch_norm3d"
|
98
|
+
require_relative "torch/nn/group_norm"
|
99
|
+
require_relative "torch/nn/instance_norm"
|
100
|
+
require_relative "torch/nn/instance_norm1d"
|
101
|
+
require_relative "torch/nn/instance_norm2d"
|
102
|
+
require_relative "torch/nn/instance_norm3d"
|
103
|
+
require_relative "torch/nn/layer_norm"
|
104
|
+
require_relative "torch/nn/local_response_norm"
|
105
105
|
|
106
106
|
# nn recurrent layers
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
107
|
+
require_relative "torch/nn/rnn_base"
|
108
|
+
require_relative "torch/nn/rnn"
|
109
|
+
require_relative "torch/nn/lstm"
|
110
|
+
require_relative "torch/nn/gru"
|
111
111
|
|
112
112
|
# nn linear layers
|
113
|
-
|
114
|
-
|
115
|
-
|
113
|
+
require_relative "torch/nn/bilinear"
|
114
|
+
require_relative "torch/nn/identity"
|
115
|
+
require_relative "torch/nn/linear"
|
116
116
|
|
117
117
|
# nn dropout layers
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
118
|
+
require_relative "torch/nn/dropoutnd"
|
119
|
+
require_relative "torch/nn/alpha_dropout"
|
120
|
+
require_relative "torch/nn/dropout"
|
121
|
+
require_relative "torch/nn/dropout2d"
|
122
|
+
require_relative "torch/nn/dropout3d"
|
123
|
+
require_relative "torch/nn/feature_alpha_dropout"
|
124
124
|
|
125
125
|
# nn activations
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
126
|
+
require_relative "torch/nn/hardshrink"
|
127
|
+
require_relative "torch/nn/leaky_relu"
|
128
|
+
require_relative "torch/nn/log_sigmoid"
|
129
|
+
require_relative "torch/nn/prelu"
|
130
|
+
require_relative "torch/nn/relu"
|
131
|
+
require_relative "torch/nn/sigmoid"
|
132
|
+
require_relative "torch/nn/softplus"
|
133
|
+
require_relative "torch/nn/softshrink"
|
134
|
+
require_relative "torch/nn/softsign"
|
135
|
+
require_relative "torch/nn/tanh"
|
136
|
+
require_relative "torch/nn/tanhshrink"
|
137
137
|
|
138
138
|
# nn activations other
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
139
|
+
require_relative "torch/nn/log_softmax"
|
140
|
+
require_relative "torch/nn/softmax"
|
141
|
+
require_relative "torch/nn/softmax2d"
|
142
|
+
require_relative "torch/nn/softmin"
|
143
143
|
|
144
144
|
# nn sparse layers
|
145
|
-
|
146
|
-
|
145
|
+
require_relative "torch/nn/embedding"
|
146
|
+
require_relative "torch/nn/embedding_bag"
|
147
147
|
|
148
148
|
# attention is all you need
|
149
|
-
|
150
|
-
|
149
|
+
require_relative "torch/nn/multihead_attention"
|
150
|
+
require_relative "torch/nn/transformer"
|
151
151
|
|
152
152
|
# nn distance functions
|
153
|
-
|
154
|
-
|
153
|
+
require_relative "torch/nn/cosine_similarity"
|
154
|
+
require_relative "torch/nn/pairwise_distance"
|
155
155
|
|
156
156
|
# nn loss functions
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
157
|
+
require_relative "torch/nn/loss"
|
158
|
+
require_relative "torch/nn/weighted_loss"
|
159
|
+
require_relative "torch/nn/bce_loss"
|
160
|
+
require_relative "torch/nn/bce_with_logits_loss"
|
161
|
+
require_relative "torch/nn/cosine_embedding_loss"
|
162
|
+
require_relative "torch/nn/cross_entropy_loss"
|
163
|
+
require_relative "torch/nn/ctc_loss"
|
164
|
+
require_relative "torch/nn/hinge_embedding_loss"
|
165
|
+
require_relative "torch/nn/kl_div_loss"
|
166
|
+
require_relative "torch/nn/l1_loss"
|
167
|
+
require_relative "torch/nn/margin_ranking_loss"
|
168
|
+
require_relative "torch/nn/mse_loss"
|
169
|
+
require_relative "torch/nn/multi_label_margin_loss"
|
170
|
+
require_relative "torch/nn/multi_label_soft_margin_loss"
|
171
|
+
require_relative "torch/nn/multi_margin_loss"
|
172
|
+
require_relative "torch/nn/nll_loss"
|
173
|
+
require_relative "torch/nn/poisson_nll_loss"
|
174
|
+
require_relative "torch/nn/smooth_l1_loss"
|
175
|
+
require_relative "torch/nn/soft_margin_loss"
|
176
|
+
require_relative "torch/nn/triplet_margin_loss"
|
177
177
|
|
178
178
|
# nn vision
|
179
|
-
|
179
|
+
require_relative "torch/nn/upsample"
|
180
180
|
|
181
181
|
# nn other
|
182
|
-
|
183
|
-
|
184
|
-
|
182
|
+
require_relative "torch/nn/functional"
|
183
|
+
require_relative "torch/nn/functional_attention"
|
184
|
+
require_relative "torch/nn/init"
|
185
185
|
|
186
186
|
# utils
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
187
|
+
require_relative "torch/utils/data"
|
188
|
+
require_relative "torch/utils/data/data_loader"
|
189
|
+
require_relative "torch/utils/data/dataset"
|
190
|
+
require_relative "torch/utils/data/iterable_dataset"
|
191
|
+
require_relative "torch/utils/data/data_pipes/iter_data_pipe"
|
192
|
+
require_relative "torch/utils/data/data_pipes/filter_iter_data_pipe"
|
193
|
+
require_relative "torch/utils/data/data_pipes/iter/file_lister"
|
194
|
+
require_relative "torch/utils/data/data_pipes/iter/file_opener"
|
195
|
+
require_relative "torch/utils/data/data_pipes/iter/iterable_wrapper"
|
196
|
+
require_relative "torch/utils/data/data_pipes/iter/stream_wrapper"
|
197
|
+
require_relative "torch/utils/data/subset"
|
198
|
+
require_relative "torch/utils/data/tensor_dataset"
|
199
199
|
|
200
200
|
# hub
|
201
|
-
|
201
|
+
require_relative "torch/hub"
|
202
202
|
|
203
203
|
module Torch
|
204
204
|
class Error < StandardError; end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: torch-rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.13.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrew Kane
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-05-03 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rice
|
@@ -16,14 +16,14 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - ">="
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: 4.0
|
19
|
+
version: 4.1.0
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
24
|
- - ">="
|
25
25
|
- !ruby/object:Gem::Version
|
26
|
-
version: 4.0
|
26
|
+
version: 4.1.0
|
27
27
|
description:
|
28
28
|
email: andrew@ankane.org
|
29
29
|
executables: []
|
@@ -230,14 +230,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
230
230
|
requirements:
|
231
231
|
- - ">="
|
232
232
|
- !ruby/object:Gem::Version
|
233
|
-
version: '
|
233
|
+
version: '3'
|
234
234
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
235
235
|
requirements:
|
236
236
|
- - ">="
|
237
237
|
- !ruby/object:Gem::Version
|
238
238
|
version: '0'
|
239
239
|
requirements: []
|
240
|
-
rubygems_version: 3.4.
|
240
|
+
rubygems_version: 3.4.10
|
241
241
|
signing_key:
|
242
242
|
specification_version: 4
|
243
243
|
summary: Deep learning for Ruby, powered by LibTorch
|