torch-rb 0.13.0 → 0.13.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/README.md +26 -13
- data/codegen/generate_functions.rb +1 -4
- data/ext/torch/backends.cpp +0 -3
- data/ext/torch/cuda.cpp +0 -1
- data/ext/torch/device.cpp +0 -1
- data/ext/torch/fft.cpp +0 -1
- data/ext/torch/generator.cpp +0 -1
- data/ext/torch/ivalue.cpp +0 -1
- data/ext/torch/linalg.cpp +0 -1
- data/ext/torch/nn.cpp +0 -3
- data/ext/torch/random.cpp +0 -1
- data/ext/torch/special.cpp +0 -1
- data/ext/torch/tensor.cpp +0 -2
- data/ext/torch/torch.cpp +1 -1
- data/ext/torch/utils.h +1 -2
- data/lib/torch/version.rb +1 -1
- data/lib/torch-rb.rb +1 -1
- data/lib/torch.rb +150 -150
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 311e86910351cc5050fb92146e1f6c9ad018a9609d0f5ca0ef51253f1b813b22
|
4
|
+
data.tar.gz: afc18a5142abecba2fdd17f669ecdf9de5b90f35e8c79a637c4e960427a08439
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 29df29a3dd0f752f4da731b84f0ca40832d49510a76784364d607977c8e90e32074f7e17ea07167e79330a97ef5f3378b19331d326301a3cc5a0e76180384ac6
|
7
|
+
data.tar.gz: 50e96247102c02cb6f5b0ddc470947f06dfb4efcc3a675ee174a334c4263c1dfe9f3be9c5bfb249239b8b4abce79f6e6dc625833081ec101180ba9a757f3421c
|
data/CHANGELOG.md
CHANGED
data/README.md
CHANGED
@@ -410,18 +410,12 @@ Here’s the list of compatible versions.
|
|
410
410
|
|
411
411
|
Torch.rb | LibTorch
|
412
412
|
--- | ---
|
413
|
-
0.13.
|
414
|
-
0.12.
|
415
|
-
0.11.
|
416
|
-
0.10.
|
417
|
-
0.9.
|
418
|
-
0.8.
|
419
|
-
0.6.0-0.7.0 | 1.8.0-1.8.1
|
420
|
-
0.5.0-0.5.3 | 1.7.0-1.7.1
|
421
|
-
0.3.0-0.4.2 | 1.6.0
|
422
|
-
0.2.0-0.2.7 | 1.5.0-1.5.1
|
423
|
-
0.1.8 | 1.4.0
|
424
|
-
0.1.0-0.1.7 | 1.3.1
|
413
|
+
0.13.x | 2.0.x
|
414
|
+
0.12.x | 1.13.x
|
415
|
+
0.11.x | 1.12.x
|
416
|
+
0.10.x | 1.11.x
|
417
|
+
0.9.x | 1.10.x
|
418
|
+
0.8.x | 1.9.x
|
425
419
|
|
426
420
|
### Homebrew
|
427
421
|
|
@@ -433,7 +427,11 @@ brew install pytorch
|
|
433
427
|
|
434
428
|
## Performance
|
435
429
|
|
436
|
-
Deep learning is significantly faster on a GPU.
|
430
|
+
Deep learning is significantly faster on a GPU.
|
431
|
+
|
432
|
+
### Linux
|
433
|
+
|
434
|
+
With Linux, install [CUDA](https://developer.nvidia.com/cuda-downloads) and [cuDNN](https://developer.nvidia.com/cudnn) and reinstall the gem.
|
437
435
|
|
438
436
|
Check if CUDA is available
|
439
437
|
|
@@ -455,6 +453,21 @@ ankane/ml-stack:torch-gpu
|
|
455
453
|
|
456
454
|
And leave the other fields in that section blank. Once the notebook is running, you can run the [MNIST example](https://github.com/ankane/ml-stack/blob/master/torch-gpu/MNIST.ipynb).
|
457
455
|
|
456
|
+
### Mac
|
457
|
+
|
458
|
+
With Apple silicon, check if Metal Performance Shaders (MPS) is available
|
459
|
+
|
460
|
+
```ruby
|
461
|
+
Torch::Backends::MPS.available?
|
462
|
+
```
|
463
|
+
|
464
|
+
Move a neural network to a GPU
|
465
|
+
|
466
|
+
```ruby
|
467
|
+
device = Torch.device("mps")
|
468
|
+
net.to(device)
|
469
|
+
```
|
470
|
+
|
458
471
|
## History
|
459
472
|
|
460
473
|
View the [changelog](https://github.com/ankane/torch.rb/blob/master/CHANGELOG.md)
|
@@ -156,10 +156,7 @@ def generate_attach_def(name, type, def_method)
|
|
156
156
|
ruby_name = ruby_name.sub(/\Asparse_/, "") if type == "sparse"
|
157
157
|
ruby_name = name if name.start_with?("__")
|
158
158
|
|
159
|
-
#
|
160
|
-
cast = RUBY_VERSION.to_f > 2.7 ? "" : "(VALUE (*)(...)) "
|
161
|
-
|
162
|
-
"rb_#{def_method}(m, \"#{ruby_name}\", #{cast}#{full_name(name, type)}, -1);"
|
159
|
+
"rb_#{def_method}(m, \"#{ruby_name}\", #{full_name(name, type)}, -1);"
|
163
160
|
end
|
164
161
|
|
165
162
|
def generate_method_def(name, functions, type, def_method)
|
data/ext/torch/backends.cpp
CHANGED
@@ -8,14 +8,11 @@ void init_backends(Rice::Module& m) {
|
|
8
8
|
auto rb_mBackends = Rice::define_module_under(m, "Backends");
|
9
9
|
|
10
10
|
Rice::define_module_under(rb_mBackends, "OpenMP")
|
11
|
-
.add_handler<torch::Error>(handle_error)
|
12
11
|
.define_singleton_function("available?", &torch::hasOpenMP);
|
13
12
|
|
14
13
|
Rice::define_module_under(rb_mBackends, "MKL")
|
15
|
-
.add_handler<torch::Error>(handle_error)
|
16
14
|
.define_singleton_function("available?", &torch::hasMKL);
|
17
15
|
|
18
16
|
Rice::define_module_under(rb_mBackends, "MPS")
|
19
|
-
.add_handler<torch::Error>(handle_error)
|
20
17
|
.define_singleton_function("available?", &torch::hasMPS);
|
21
18
|
}
|
data/ext/torch/cuda.cpp
CHANGED
@@ -6,7 +6,6 @@
|
|
6
6
|
|
7
7
|
void init_cuda(Rice::Module& m) {
|
8
8
|
Rice::define_module_under(m, "CUDA")
|
9
|
-
.add_handler<torch::Error>(handle_error)
|
10
9
|
.define_singleton_function("available?", &torch::cuda::is_available)
|
11
10
|
.define_singleton_function("device_count", &torch::cuda::device_count)
|
12
11
|
.define_singleton_function("manual_seed", &torch::cuda::manual_seed)
|
data/ext/torch/device.cpp
CHANGED
data/ext/torch/fft.cpp
CHANGED
data/ext/torch/generator.cpp
CHANGED
data/ext/torch/ivalue.cpp
CHANGED
@@ -7,7 +7,6 @@
|
|
7
7
|
void init_ivalue(Rice::Module& m, Rice::Class& rb_cIValue) {
|
8
8
|
// https://pytorch.org/cppdocs/api/structc10_1_1_i_value.html
|
9
9
|
rb_cIValue
|
10
|
-
.add_handler<torch::Error>(handle_error)
|
11
10
|
.define_method("bool?", &torch::IValue::isBool)
|
12
11
|
.define_method("bool_list?", &torch::IValue::isBoolList)
|
13
12
|
.define_method("capsule?", &torch::IValue::isCapsule)
|
data/ext/torch/linalg.cpp
CHANGED
data/ext/torch/nn.cpp
CHANGED
@@ -14,11 +14,9 @@ class Parameter: public torch::autograd::Variable {
|
|
14
14
|
|
15
15
|
void init_nn(Rice::Module& m) {
|
16
16
|
auto rb_mNN = Rice::define_module_under(m, "NN");
|
17
|
-
rb_mNN.add_handler<torch::Error>(handle_error);
|
18
17
|
add_nn_functions(rb_mNN);
|
19
18
|
|
20
19
|
Rice::define_module_under(rb_mNN, "Init")
|
21
|
-
.add_handler<torch::Error>(handle_error)
|
22
20
|
.define_singleton_function(
|
23
21
|
"_calculate_gain",
|
24
22
|
[](NonlinearityType nonlinearity, double param) {
|
@@ -91,7 +89,6 @@ void init_nn(Rice::Module& m) {
|
|
91
89
|
});
|
92
90
|
|
93
91
|
Rice::define_class_under<Parameter, torch::Tensor>(rb_mNN, "Parameter")
|
94
|
-
.add_handler<torch::Error>(handle_error)
|
95
92
|
.define_method(
|
96
93
|
"grad",
|
97
94
|
[](Parameter& self) {
|
data/ext/torch/random.cpp
CHANGED
data/ext/torch/special.cpp
CHANGED
data/ext/torch/tensor.cpp
CHANGED
@@ -96,7 +96,6 @@ static VALUE tensor__backward(int argc, VALUE* argv, VALUE self_)
|
|
96
96
|
|
97
97
|
void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions) {
|
98
98
|
rb_cTensor = c;
|
99
|
-
rb_cTensor.add_handler<torch::Error>(handle_error);
|
100
99
|
add_tensor_functions(rb_cTensor);
|
101
100
|
THPVariableClass = rb_cTensor.value();
|
102
101
|
|
@@ -286,7 +285,6 @@ void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions
|
|
286
285
|
});
|
287
286
|
|
288
287
|
rb_cTensorOptions
|
289
|
-
.add_handler<torch::Error>(handle_error)
|
290
288
|
.define_method(
|
291
289
|
"dtype",
|
292
290
|
[](torch::TensorOptions& self, int dtype) {
|
data/ext/torch/torch.cpp
CHANGED
@@ -24,7 +24,7 @@ torch::Tensor make_tensor(Rice::Array a, std::vector<int64_t> size, const torch:
|
|
24
24
|
}
|
25
25
|
|
26
26
|
void init_torch(Rice::Module& m) {
|
27
|
-
|
27
|
+
register_handler<torch::Error>(handle_global_error);
|
28
28
|
add_torch_functions(m);
|
29
29
|
m.define_singleton_function(
|
30
30
|
"grad_enabled?",
|
data/ext/torch/utils.h
CHANGED
@@ -10,8 +10,7 @@ static_assert(
|
|
10
10
|
"Incompatible LibTorch version"
|
11
11
|
);
|
12
12
|
|
13
|
-
|
14
|
-
inline void handle_error(torch::Error const & ex) {
|
13
|
+
inline void handle_global_error(const torch::Error& ex) {
|
15
14
|
throw Rice::Exception(rb_eRuntimeError, ex.what_without_backtrace());
|
16
15
|
}
|
17
16
|
|
data/lib/torch/version.rb
CHANGED
data/lib/torch-rb.rb
CHANGED
@@ -1 +1 @@
|
|
1
|
-
|
1
|
+
require_relative "torch"
|
data/lib/torch.rb
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
# ext
|
2
|
-
|
2
|
+
require_relative "torch/ext"
|
3
3
|
|
4
4
|
# stdlib
|
5
5
|
require "fileutils"
|
@@ -8,197 +8,197 @@ require "set"
|
|
8
8
|
require "tmpdir"
|
9
9
|
|
10
10
|
# modules
|
11
|
-
|
12
|
-
|
13
|
-
|
11
|
+
require_relative "torch/inspector"
|
12
|
+
require_relative "torch/tensor"
|
13
|
+
require_relative "torch/version"
|
14
14
|
|
15
15
|
# optim
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
16
|
+
require_relative "torch/optim/optimizer"
|
17
|
+
require_relative "torch/optim/adadelta"
|
18
|
+
require_relative "torch/optim/adagrad"
|
19
|
+
require_relative "torch/optim/adam"
|
20
|
+
require_relative "torch/optim/adamax"
|
21
|
+
require_relative "torch/optim/adamw"
|
22
|
+
require_relative "torch/optim/asgd"
|
23
|
+
require_relative "torch/optim/rmsprop"
|
24
|
+
require_relative "torch/optim/rprop"
|
25
|
+
require_relative "torch/optim/sgd"
|
26
26
|
|
27
27
|
# optim lr_scheduler
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
28
|
+
require_relative "torch/optim/lr_scheduler/lr_scheduler"
|
29
|
+
require_relative "torch/optim/lr_scheduler/lambda_lr"
|
30
|
+
require_relative "torch/optim/lr_scheduler/multiplicative_lr"
|
31
|
+
require_relative "torch/optim/lr_scheduler/step_lr"
|
32
|
+
require_relative "torch/optim/lr_scheduler/multi_step_lr"
|
33
|
+
require_relative "torch/optim/lr_scheduler/exponential_lr"
|
34
|
+
require_relative "torch/optim/lr_scheduler/cosine_annealing_lr"
|
35
35
|
|
36
36
|
# nn parameters
|
37
|
-
|
38
|
-
|
37
|
+
require_relative "torch/nn/parameter"
|
38
|
+
require_relative "torch/nn/utils"
|
39
39
|
|
40
40
|
# nn containers
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
41
|
+
require_relative "torch/nn/module"
|
42
|
+
require_relative "torch/nn/module_list"
|
43
|
+
require_relative "torch/nn/parameter_list"
|
44
|
+
require_relative "torch/nn/sequential"
|
45
45
|
|
46
46
|
# nn convolution layers
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
47
|
+
require_relative "torch/nn/convnd"
|
48
|
+
require_relative "torch/nn/conv1d"
|
49
|
+
require_relative "torch/nn/conv2d"
|
50
|
+
require_relative "torch/nn/conv3d"
|
51
|
+
require_relative "torch/nn/unfold"
|
52
|
+
require_relative "torch/nn/fold"
|
53
53
|
|
54
54
|
# nn pooling layers
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
55
|
+
require_relative "torch/nn/max_poolnd"
|
56
|
+
require_relative "torch/nn/max_pool1d"
|
57
|
+
require_relative "torch/nn/max_pool2d"
|
58
|
+
require_relative "torch/nn/max_pool3d"
|
59
|
+
require_relative "torch/nn/max_unpoolnd"
|
60
|
+
require_relative "torch/nn/max_unpool1d"
|
61
|
+
require_relative "torch/nn/max_unpool2d"
|
62
|
+
require_relative "torch/nn/max_unpool3d"
|
63
|
+
require_relative "torch/nn/avg_poolnd"
|
64
|
+
require_relative "torch/nn/avg_pool1d"
|
65
|
+
require_relative "torch/nn/avg_pool2d"
|
66
|
+
require_relative "torch/nn/avg_pool3d"
|
67
|
+
require_relative "torch/nn/lp_poolnd"
|
68
|
+
require_relative "torch/nn/lp_pool1d"
|
69
|
+
require_relative "torch/nn/lp_pool2d"
|
70
|
+
require_relative "torch/nn/adaptive_max_poolnd"
|
71
|
+
require_relative "torch/nn/adaptive_max_pool1d"
|
72
|
+
require_relative "torch/nn/adaptive_max_pool2d"
|
73
|
+
require_relative "torch/nn/adaptive_max_pool3d"
|
74
|
+
require_relative "torch/nn/adaptive_avg_poolnd"
|
75
|
+
require_relative "torch/nn/adaptive_avg_pool1d"
|
76
|
+
require_relative "torch/nn/adaptive_avg_pool2d"
|
77
|
+
require_relative "torch/nn/adaptive_avg_pool3d"
|
78
78
|
|
79
79
|
# nn padding layers
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
80
|
+
require_relative "torch/nn/reflection_padnd"
|
81
|
+
require_relative "torch/nn/reflection_pad1d"
|
82
|
+
require_relative "torch/nn/reflection_pad2d"
|
83
|
+
require_relative "torch/nn/replication_padnd"
|
84
|
+
require_relative "torch/nn/replication_pad1d"
|
85
|
+
require_relative "torch/nn/replication_pad2d"
|
86
|
+
require_relative "torch/nn/replication_pad3d"
|
87
|
+
require_relative "torch/nn/constant_padnd"
|
88
|
+
require_relative "torch/nn/constant_pad1d"
|
89
|
+
require_relative "torch/nn/constant_pad2d"
|
90
|
+
require_relative "torch/nn/constant_pad3d"
|
91
|
+
require_relative "torch/nn/zero_pad2d"
|
92
92
|
|
93
93
|
# nn normalization layers
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
94
|
+
require_relative "torch/nn/batch_norm"
|
95
|
+
require_relative "torch/nn/batch_norm1d"
|
96
|
+
require_relative "torch/nn/batch_norm2d"
|
97
|
+
require_relative "torch/nn/batch_norm3d"
|
98
|
+
require_relative "torch/nn/group_norm"
|
99
|
+
require_relative "torch/nn/instance_norm"
|
100
|
+
require_relative "torch/nn/instance_norm1d"
|
101
|
+
require_relative "torch/nn/instance_norm2d"
|
102
|
+
require_relative "torch/nn/instance_norm3d"
|
103
|
+
require_relative "torch/nn/layer_norm"
|
104
|
+
require_relative "torch/nn/local_response_norm"
|
105
105
|
|
106
106
|
# nn recurrent layers
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
107
|
+
require_relative "torch/nn/rnn_base"
|
108
|
+
require_relative "torch/nn/rnn"
|
109
|
+
require_relative "torch/nn/lstm"
|
110
|
+
require_relative "torch/nn/gru"
|
111
111
|
|
112
112
|
# nn linear layers
|
113
|
-
|
114
|
-
|
115
|
-
|
113
|
+
require_relative "torch/nn/bilinear"
|
114
|
+
require_relative "torch/nn/identity"
|
115
|
+
require_relative "torch/nn/linear"
|
116
116
|
|
117
117
|
# nn dropout layers
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
118
|
+
require_relative "torch/nn/dropoutnd"
|
119
|
+
require_relative "torch/nn/alpha_dropout"
|
120
|
+
require_relative "torch/nn/dropout"
|
121
|
+
require_relative "torch/nn/dropout2d"
|
122
|
+
require_relative "torch/nn/dropout3d"
|
123
|
+
require_relative "torch/nn/feature_alpha_dropout"
|
124
124
|
|
125
125
|
# nn activations
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
126
|
+
require_relative "torch/nn/hardshrink"
|
127
|
+
require_relative "torch/nn/leaky_relu"
|
128
|
+
require_relative "torch/nn/log_sigmoid"
|
129
|
+
require_relative "torch/nn/prelu"
|
130
|
+
require_relative "torch/nn/relu"
|
131
|
+
require_relative "torch/nn/sigmoid"
|
132
|
+
require_relative "torch/nn/softplus"
|
133
|
+
require_relative "torch/nn/softshrink"
|
134
|
+
require_relative "torch/nn/softsign"
|
135
|
+
require_relative "torch/nn/tanh"
|
136
|
+
require_relative "torch/nn/tanhshrink"
|
137
137
|
|
138
138
|
# nn activations other
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
139
|
+
require_relative "torch/nn/log_softmax"
|
140
|
+
require_relative "torch/nn/softmax"
|
141
|
+
require_relative "torch/nn/softmax2d"
|
142
|
+
require_relative "torch/nn/softmin"
|
143
143
|
|
144
144
|
# nn sparse layers
|
145
|
-
|
146
|
-
|
145
|
+
require_relative "torch/nn/embedding"
|
146
|
+
require_relative "torch/nn/embedding_bag"
|
147
147
|
|
148
148
|
# attention is all you need
|
149
|
-
|
150
|
-
|
149
|
+
require_relative "torch/nn/multihead_attention"
|
150
|
+
require_relative "torch/nn/transformer"
|
151
151
|
|
152
152
|
# nn distance functions
|
153
|
-
|
154
|
-
|
153
|
+
require_relative "torch/nn/cosine_similarity"
|
154
|
+
require_relative "torch/nn/pairwise_distance"
|
155
155
|
|
156
156
|
# nn loss functions
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
157
|
+
require_relative "torch/nn/loss"
|
158
|
+
require_relative "torch/nn/weighted_loss"
|
159
|
+
require_relative "torch/nn/bce_loss"
|
160
|
+
require_relative "torch/nn/bce_with_logits_loss"
|
161
|
+
require_relative "torch/nn/cosine_embedding_loss"
|
162
|
+
require_relative "torch/nn/cross_entropy_loss"
|
163
|
+
require_relative "torch/nn/ctc_loss"
|
164
|
+
require_relative "torch/nn/hinge_embedding_loss"
|
165
|
+
require_relative "torch/nn/kl_div_loss"
|
166
|
+
require_relative "torch/nn/l1_loss"
|
167
|
+
require_relative "torch/nn/margin_ranking_loss"
|
168
|
+
require_relative "torch/nn/mse_loss"
|
169
|
+
require_relative "torch/nn/multi_label_margin_loss"
|
170
|
+
require_relative "torch/nn/multi_label_soft_margin_loss"
|
171
|
+
require_relative "torch/nn/multi_margin_loss"
|
172
|
+
require_relative "torch/nn/nll_loss"
|
173
|
+
require_relative "torch/nn/poisson_nll_loss"
|
174
|
+
require_relative "torch/nn/smooth_l1_loss"
|
175
|
+
require_relative "torch/nn/soft_margin_loss"
|
176
|
+
require_relative "torch/nn/triplet_margin_loss"
|
177
177
|
|
178
178
|
# nn vision
|
179
|
-
|
179
|
+
require_relative "torch/nn/upsample"
|
180
180
|
|
181
181
|
# nn other
|
182
|
-
|
183
|
-
|
184
|
-
|
182
|
+
require_relative "torch/nn/functional"
|
183
|
+
require_relative "torch/nn/functional_attention"
|
184
|
+
require_relative "torch/nn/init"
|
185
185
|
|
186
186
|
# utils
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
187
|
+
require_relative "torch/utils/data"
|
188
|
+
require_relative "torch/utils/data/data_loader"
|
189
|
+
require_relative "torch/utils/data/dataset"
|
190
|
+
require_relative "torch/utils/data/iterable_dataset"
|
191
|
+
require_relative "torch/utils/data/data_pipes/iter_data_pipe"
|
192
|
+
require_relative "torch/utils/data/data_pipes/filter_iter_data_pipe"
|
193
|
+
require_relative "torch/utils/data/data_pipes/iter/file_lister"
|
194
|
+
require_relative "torch/utils/data/data_pipes/iter/file_opener"
|
195
|
+
require_relative "torch/utils/data/data_pipes/iter/iterable_wrapper"
|
196
|
+
require_relative "torch/utils/data/data_pipes/iter/stream_wrapper"
|
197
|
+
require_relative "torch/utils/data/subset"
|
198
|
+
require_relative "torch/utils/data/tensor_dataset"
|
199
199
|
|
200
200
|
# hub
|
201
|
-
|
201
|
+
require_relative "torch/hub"
|
202
202
|
|
203
203
|
module Torch
|
204
204
|
class Error < StandardError; end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: torch-rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.13.
|
4
|
+
version: 0.13.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrew Kane
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-05-03 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rice
|
@@ -16,14 +16,14 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - ">="
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: 4.0
|
19
|
+
version: 4.1.0
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
24
|
- - ">="
|
25
25
|
- !ruby/object:Gem::Version
|
26
|
-
version: 4.0
|
26
|
+
version: 4.1.0
|
27
27
|
description:
|
28
28
|
email: andrew@ankane.org
|
29
29
|
executables: []
|