torch-rb 0.1.1 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +40 -0
  3. data/LICENSE.txt +46 -22
  4. data/README.md +73 -9
  5. data/ext/torch/ext.cpp +148 -315
  6. data/ext/torch/extconf.rb +6 -0
  7. data/ext/torch/nn_functions.cpp +615 -0
  8. data/ext/torch/nn_functions.hpp +6 -0
  9. data/ext/torch/templates.cpp +55 -0
  10. data/ext/torch/templates.hpp +298 -0
  11. data/ext/torch/tensor_functions.cpp +1920 -0
  12. data/ext/torch/tensor_functions.hpp +6 -0
  13. data/ext/torch/torch_functions.cpp +2975 -0
  14. data/ext/torch/torch_functions.hpp +6 -0
  15. data/lib/torch.rb +236 -112
  16. data/lib/torch/ext.bundle +0 -0
  17. data/lib/torch/inspector.rb +52 -25
  18. data/lib/torch/native/dispatcher.rb +48 -0
  19. data/lib/torch/native/function.rb +109 -0
  20. data/lib/torch/native/generator.rb +168 -0
  21. data/lib/torch/native/native_functions.yaml +6837 -0
  22. data/lib/torch/native/parser.rb +134 -0
  23. data/lib/torch/nn/alpha_dropout.rb +9 -0
  24. data/lib/torch/nn/avg_pool1d.rb +18 -0
  25. data/lib/torch/nn/avg_pool2d.rb +19 -0
  26. data/lib/torch/nn/avg_pool3d.rb +19 -0
  27. data/lib/torch/nn/avg_poolnd.rb +9 -0
  28. data/lib/torch/nn/batch_norm.rb +75 -0
  29. data/lib/torch/nn/batch_norm1d.rb +11 -0
  30. data/lib/torch/nn/batch_norm2d.rb +11 -0
  31. data/lib/torch/nn/batch_norm3d.rb +11 -0
  32. data/lib/torch/nn/bce_loss.rb +13 -0
  33. data/lib/torch/nn/bce_with_logits_loss.rb +15 -0
  34. data/lib/torch/nn/bilinear.rb +38 -0
  35. data/lib/torch/nn/constant_pad1d.rb +10 -0
  36. data/lib/torch/nn/constant_pad2d.rb +10 -0
  37. data/lib/torch/nn/constant_pad3d.rb +10 -0
  38. data/lib/torch/nn/constant_padnd.rb +18 -0
  39. data/lib/torch/nn/conv1d.rb +22 -0
  40. data/lib/torch/nn/conv2d.rb +16 -39
  41. data/lib/torch/nn/conv3d.rb +22 -0
  42. data/lib/torch/nn/convnd.rb +41 -0
  43. data/lib/torch/nn/cosine_embedding_loss.rb +14 -0
  44. data/lib/torch/nn/cosine_similarity.rb +15 -0
  45. data/lib/torch/nn/cross_entropy_loss.rb +14 -0
  46. data/lib/torch/nn/ctc_loss.rb +15 -0
  47. data/lib/torch/nn/dropout.rb +9 -0
  48. data/lib/torch/nn/dropout2d.rb +9 -0
  49. data/lib/torch/nn/dropout3d.rb +9 -0
  50. data/lib/torch/nn/dropoutnd.rb +15 -0
  51. data/lib/torch/nn/embedding.rb +52 -0
  52. data/lib/torch/nn/embedding_bag.rb +34 -0
  53. data/lib/torch/nn/feature_alpha_dropout.rb +9 -0
  54. data/lib/torch/nn/fold.rb +20 -0
  55. data/lib/torch/nn/functional.rb +419 -16
  56. data/lib/torch/nn/group_norm.rb +36 -0
  57. data/lib/torch/nn/gru.rb +49 -0
  58. data/lib/torch/nn/hardshrink.rb +18 -0
  59. data/lib/torch/nn/hinge_embedding_loss.rb +14 -0
  60. data/lib/torch/nn/identity.rb +14 -0
  61. data/lib/torch/nn/init.rb +58 -1
  62. data/lib/torch/nn/instance_norm.rb +20 -0
  63. data/lib/torch/nn/instance_norm1d.rb +18 -0
  64. data/lib/torch/nn/instance_norm2d.rb +11 -0
  65. data/lib/torch/nn/instance_norm3d.rb +11 -0
  66. data/lib/torch/nn/kl_div_loss.rb +13 -0
  67. data/lib/torch/nn/l1_loss.rb +13 -0
  68. data/lib/torch/nn/layer_norm.rb +35 -0
  69. data/lib/torch/nn/leaky_relu.rb +20 -0
  70. data/lib/torch/nn/linear.rb +12 -11
  71. data/lib/torch/nn/local_response_norm.rb +21 -0
  72. data/lib/torch/nn/log_sigmoid.rb +9 -0
  73. data/lib/torch/nn/log_softmax.rb +14 -0
  74. data/lib/torch/nn/loss.rb +10 -0
  75. data/lib/torch/nn/lp_pool1d.rb +9 -0
  76. data/lib/torch/nn/lp_pool2d.rb +9 -0
  77. data/lib/torch/nn/lp_poolnd.rb +22 -0
  78. data/lib/torch/nn/lstm.rb +66 -0
  79. data/lib/torch/nn/margin_ranking_loss.rb +14 -0
  80. data/lib/torch/nn/max_pool1d.rb +9 -0
  81. data/lib/torch/nn/max_pool2d.rb +9 -0
  82. data/lib/torch/nn/max_pool3d.rb +9 -0
  83. data/lib/torch/nn/max_poolnd.rb +19 -0
  84. data/lib/torch/nn/max_unpool1d.rb +16 -0
  85. data/lib/torch/nn/max_unpool2d.rb +16 -0
  86. data/lib/torch/nn/max_unpool3d.rb +16 -0
  87. data/lib/torch/nn/max_unpoolnd.rb +9 -0
  88. data/lib/torch/nn/module.rb +191 -19
  89. data/lib/torch/nn/mse_loss.rb +2 -2
  90. data/lib/torch/nn/multi_label_margin_loss.rb +13 -0
  91. data/lib/torch/nn/multi_label_soft_margin_loss.rb +13 -0
  92. data/lib/torch/nn/multi_margin_loss.rb +17 -0
  93. data/lib/torch/nn/nll_loss.rb +14 -0
  94. data/lib/torch/nn/pairwise_distance.rb +16 -0
  95. data/lib/torch/nn/parameter.rb +4 -0
  96. data/lib/torch/nn/poisson_nll_loss.rb +16 -0
  97. data/lib/torch/nn/prelu.rb +19 -0
  98. data/lib/torch/nn/reflection_pad1d.rb +10 -0
  99. data/lib/torch/nn/reflection_pad2d.rb +10 -0
  100. data/lib/torch/nn/reflection_padnd.rb +13 -0
  101. data/lib/torch/nn/relu.rb +8 -3
  102. data/lib/torch/nn/replication_pad1d.rb +10 -0
  103. data/lib/torch/nn/replication_pad2d.rb +10 -0
  104. data/lib/torch/nn/replication_pad3d.rb +10 -0
  105. data/lib/torch/nn/replication_padnd.rb +13 -0
  106. data/lib/torch/nn/rnn.rb +22 -0
  107. data/lib/torch/nn/rnn_base.rb +198 -0
  108. data/lib/torch/nn/sequential.rb +1 -10
  109. data/lib/torch/nn/sigmoid.rb +9 -0
  110. data/lib/torch/nn/smooth_l1_loss.rb +13 -0
  111. data/lib/torch/nn/soft_margin_loss.rb +13 -0
  112. data/lib/torch/nn/softmax.rb +18 -0
  113. data/lib/torch/nn/softmax2d.rb +10 -0
  114. data/lib/torch/nn/softmin.rb +14 -0
  115. data/lib/torch/nn/softplus.rb +19 -0
  116. data/lib/torch/nn/softshrink.rb +18 -0
  117. data/lib/torch/nn/softsign.rb +9 -0
  118. data/lib/torch/nn/tanh.rb +9 -0
  119. data/lib/torch/nn/tanhshrink.rb +9 -0
  120. data/lib/torch/nn/triplet_margin_loss.rb +18 -0
  121. data/lib/torch/nn/unfold.rb +19 -0
  122. data/lib/torch/nn/utils.rb +25 -0
  123. data/lib/torch/nn/weighted_loss.rb +10 -0
  124. data/lib/torch/nn/zero_pad2d.rb +9 -0
  125. data/lib/torch/optim/adadelta.rb +57 -0
  126. data/lib/torch/optim/adagrad.rb +71 -0
  127. data/lib/torch/optim/adam.rb +81 -0
  128. data/lib/torch/optim/adamax.rb +68 -0
  129. data/lib/torch/optim/adamw.rb +82 -0
  130. data/lib/torch/optim/asgd.rb +65 -0
  131. data/lib/torch/optim/lr_scheduler/lr_scheduler.rb +33 -0
  132. data/lib/torch/optim/lr_scheduler/step_lr.rb +17 -0
  133. data/lib/torch/optim/optimizer.rb +62 -0
  134. data/lib/torch/optim/rmsprop.rb +76 -0
  135. data/lib/torch/optim/rprop.rb +68 -0
  136. data/lib/torch/optim/sgd.rb +60 -0
  137. data/lib/torch/random.rb +10 -0
  138. data/lib/torch/tensor.rb +90 -30
  139. data/lib/torch/utils/data/data_loader.rb +15 -0
  140. data/lib/torch/utils/data/tensor_dataset.rb +8 -1
  141. data/lib/torch/version.rb +1 -1
  142. metadata +122 -3
@@ -0,0 +1,60 @@
1
+ # ported from https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py
2
+ module Torch
3
+ module Optim
4
+ class SGD < Optimizer
5
+ def initialize(params, lr:, momentum: 0, dampening: 0, weight_decay: 0, nesterov: false)
6
+ raise ArgumentError, "Invalid learning rate: #{lr}" if lr < 0.0
7
+ raise ArgumentError, "Invalid momentum value: #{momentum}" if momentum < 0.0
8
+ raise ArgumentError, "Invalid weight_decay value: #{weight_decay}" if weight_decay < 0.0
9
+
10
+ defaults = {lr: lr, momentum: momentum, dampening: dampening, weight_decay: weight_decay, nesterov: nesterov}
11
+
12
+ if nesterov && (momentum <= 0 || dampening != 0)
13
+ raise ArgumentError, "Nesterov momentum requires a momentum and zero dampening"
14
+ end
15
+
16
+ super(params, defaults)
17
+ end
18
+
19
+ def step(closure = nil)
20
+ loss = nil
21
+ if closure
22
+ loss = closure.call
23
+ end
24
+
25
+ @param_groups.each do |group|
26
+ weight_decay = group[:weight_decay]
27
+ momentum = group[:momentum]
28
+ dampening = group[:dampening]
29
+ nesterov = group[:nesterov]
30
+
31
+ group[:params].each do |p|
32
+ next unless p.grad
33
+ d_p = p.grad.data
34
+ if weight_decay != 0
35
+ d_p.add!(weight_decay, p.data)
36
+ end
37
+ if momentum != 0
38
+ param_state = @state[p]
39
+ if !param_state.key(:momentum_buffer)
40
+ buf = param_state[:momentum_buffer] = Torch.clone(d_p).detach
41
+ else
42
+ buf = param_state[:momentum_buffer]
43
+ buf.mul!(momentum).add!(1 - dampening, d_p)
44
+ end
45
+ if nesterov
46
+ d_p = d_p.add(momentum, buf)
47
+ else
48
+ d_p = buf
49
+ end
50
+ end
51
+
52
+ p.data.add!(-group[:lr], d_p)
53
+ end
54
+ end
55
+
56
+ loss
57
+ end
58
+ end
59
+ end
60
+ end
@@ -0,0 +1,10 @@
1
+ module Torch
2
+ module Random
3
+ class << self
4
+ # not available through LibTorch
5
+ def initial_seed
6
+ raise NotImplementedYet
7
+ end
8
+ end
9
+ end
10
+ end
@@ -5,12 +5,8 @@ module Torch
5
5
 
6
6
  alias_method :requires_grad?, :requires_grad
7
7
 
8
- def self.new(*size)
9
- if size.first.is_a?(Tensor)
10
- size.first
11
- else
12
- Torch.rand(*size)
13
- end
8
+ def self.new(*args)
9
+ FloatTensor.new(*args)
14
10
  end
15
11
 
16
12
  def dtype
@@ -28,12 +24,18 @@ module Torch
28
24
  end
29
25
 
30
26
  def to_a
31
- reshape(_data, shape)
27
+ reshape_arr(_flat_data, shape)
28
+ end
29
+
30
+ # TODO support dtype
31
+ def to(device, non_blocking: false, copy: false)
32
+ device = Device.new(device) if device.is_a?(String)
33
+ _to(device, _dtype, non_blocking, copy)
32
34
  end
33
35
 
34
36
  def size(dim = nil)
35
37
  if dim
36
- _size(dim)
38
+ _size_int(dim)
37
39
  else
38
40
  shape
39
41
  end
@@ -43,27 +45,32 @@ module Torch
43
45
  dim.times.map { |i| size(i) }
44
46
  end
45
47
 
46
- def view(*size)
47
- _view(size)
48
+ # mirror Python len()
49
+ def length
50
+ size(0)
48
51
  end
49
52
 
50
53
  def item
51
54
  if numel != 1
52
55
  raise Error, "only one element tensors can be converted to Ruby scalars"
53
56
  end
54
- _data.first
57
+ _flat_data.first
58
+ end
59
+
60
+ # unsure if this is correct
61
+ def new
62
+ Torch.empty(0, dtype: dtype)
55
63
  end
56
64
 
57
- def data
58
- Torch.tensor(to_a)
65
+ def backward(gradient = nil)
66
+ _backward(gradient)
59
67
  end
60
68
 
61
69
  # TODO read directly from memory
62
70
  def numo
63
- raise Error, "Numo not found" unless defined?(Numo::NArray)
64
71
  cls = Torch._dtype_to_numo[dtype]
65
72
  raise Error, "Cannot convert #{dtype} to Numo" unless cls
66
- cls.cast(_data).reshape(*shape)
73
+ cls.cast(_flat_data).reshape(*shape)
67
74
  end
68
75
 
69
76
  def new_ones(*size, **options)
@@ -74,14 +81,29 @@ module Torch
74
81
  _requires_grad!(requires_grad)
75
82
  end
76
83
 
77
- # operations
78
- %w(add sub mul div remainder pow neg sum mean num norm min max dot matmul exp log unsqueeze).each do |op|
79
- define_method(op) do |*args, **options, &block|
80
- if options.any?
81
- Torch.send(op, self, *args, **options, &block)
82
- else
83
- Torch.send(op, self, *args, &block)
84
- end
84
+ def type(dtype)
85
+ enum = DTYPE_TO_ENUM[dtype]
86
+ raise Error, "Unknown type: #{dtype}" unless enum
87
+ _type(enum)
88
+ end
89
+
90
+ def reshape(*size)
91
+ # Python doesn't check if size == 1, just ignores later arguments
92
+ size = size.first if size.size == 1 && size.first.is_a?(Array)
93
+ _reshape(size)
94
+ end
95
+
96
+ def view(*size)
97
+ size = size.first if size.size == 1 && size.first.is_a?(Array)
98
+ _view(size)
99
+ end
100
+
101
+ # value and other are swapped for some methods
102
+ def add!(value = 1, other)
103
+ if other.is_a?(Numeric)
104
+ _add__scalar(other, value)
105
+ else
106
+ _add__tensor(other, value)
85
107
  end
86
108
  end
87
109
 
@@ -117,18 +139,56 @@ module Torch
117
139
  item <=> other
118
140
  end
119
141
 
120
- # TODO use accessor C++ method
121
- def [](index, *args)
122
- v = _access(index)
123
- args.each do |i|
124
- v = v._access(i)
142
+ # based on python_variable_indexing.cpp
143
+ def [](*indexes)
144
+ result = self
145
+ dim = 0
146
+ indexes.each do |index|
147
+ if index.is_a?(Numeric)
148
+ result = result._select_int(dim, index)
149
+ elsif index.is_a?(Range)
150
+ finish = index.end
151
+ finish += 1 unless index.exclude_end?
152
+ result = result._slice_tensor(dim, index.begin, finish, 1)
153
+ dim += 1
154
+ elsif index.nil?
155
+ result = result.unsqueeze(dim)
156
+ dim += 1
157
+ elsif index == true
158
+ result = result.unsqueeze(dim)
159
+ # TODO handle false
160
+ else
161
+ raise Error, "Unsupported index type: #{index.class.name}"
162
+ end
163
+ end
164
+ result
165
+ end
166
+
167
+ # TODO
168
+ # based on python_variable_indexing.cpp
169
+ def []=(index, value)
170
+ raise ArgumentError, "Tensor does not support deleting items" if value.nil?
171
+
172
+ value = Torch.tensor(value) unless value.is_a?(Tensor)
173
+
174
+ if index.is_a?(Numeric)
175
+ copy_to(_select_int(0, index), value)
176
+ elsif index.is_a?(Range)
177
+ finish = index.end
178
+ finish += 1 unless index.exclude_end?
179
+ copy_to(_slice_tensor(0, index.begin, finish, 1), value)
180
+ else
181
+ raise Error, "Unsupported index type: #{index.class.name}"
125
182
  end
126
- v
127
183
  end
128
184
 
129
185
  private
130
186
 
131
- def reshape(arr, dims)
187
+ def copy_to(dst, src)
188
+ dst.copy!(src)
189
+ end
190
+
191
+ def reshape_arr(arr, dims)
132
192
  if dims.empty?
133
193
  arr
134
194
  else
@@ -2,10 +2,25 @@ module Torch
2
2
  module Utils
3
3
  module Data
4
4
  class DataLoader
5
+ include Enumerable
6
+
7
+ attr_reader :dataset
8
+
5
9
  def initialize(dataset, batch_size: 1)
6
10
  @dataset = dataset
7
11
  @batch_size = batch_size
8
12
  end
13
+
14
+ def each
15
+ size.times do |i|
16
+ start_index = i * @batch_size
17
+ yield @dataset[start_index...(start_index + @batch_size)]
18
+ end
19
+ end
20
+
21
+ def size
22
+ (@dataset.size / @batch_size.to_f).ceil
23
+ end
9
24
  end
10
25
  end
11
26
  end
@@ -3,11 +3,18 @@ module Torch
3
3
  module Data
4
4
  class TensorDataset
5
5
  def initialize(*tensors)
6
+ unless tensors.all? { |t| t.size(0) == tensors[0].size(0) }
7
+ raise Error, "Tensors must all have same dim 0 size"
8
+ end
6
9
  @tensors = tensors
7
10
  end
8
11
 
9
12
  def [](index)
10
- tensors.map { |t| t[index] }
13
+ @tensors.map { |t| t[index] }
14
+ end
15
+
16
+ def size
17
+ @tensors[0].size(0)
11
18
  end
12
19
  end
13
20
  end
@@ -1,3 +1,3 @@
1
1
  module Torch
2
- VERSION = "0.1.1"
2
+ VERSION = "0.1.6"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: torch-rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 0.1.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-11-27 00:00:00.000000000 Z
11
+ date: 2019-12-10 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rice
@@ -106,26 +106,145 @@ files:
106
106
  - README.md
107
107
  - ext/torch/ext.cpp
108
108
  - ext/torch/extconf.rb
109
+ - ext/torch/nn_functions.cpp
110
+ - ext/torch/nn_functions.hpp
111
+ - ext/torch/templates.cpp
112
+ - ext/torch/templates.hpp
113
+ - ext/torch/tensor_functions.cpp
114
+ - ext/torch/tensor_functions.hpp
115
+ - ext/torch/torch_functions.cpp
116
+ - ext/torch/torch_functions.hpp
109
117
  - lib/torch-rb.rb
110
118
  - lib/torch.rb
111
119
  - lib/torch/ext.bundle
112
120
  - lib/torch/inspector.rb
121
+ - lib/torch/native/dispatcher.rb
122
+ - lib/torch/native/function.rb
123
+ - lib/torch/native/generator.rb
124
+ - lib/torch/native/native_functions.yaml
125
+ - lib/torch/native/parser.rb
126
+ - lib/torch/nn/alpha_dropout.rb
127
+ - lib/torch/nn/avg_pool1d.rb
128
+ - lib/torch/nn/avg_pool2d.rb
129
+ - lib/torch/nn/avg_pool3d.rb
130
+ - lib/torch/nn/avg_poolnd.rb
131
+ - lib/torch/nn/batch_norm.rb
132
+ - lib/torch/nn/batch_norm1d.rb
133
+ - lib/torch/nn/batch_norm2d.rb
134
+ - lib/torch/nn/batch_norm3d.rb
135
+ - lib/torch/nn/bce_loss.rb
136
+ - lib/torch/nn/bce_with_logits_loss.rb
137
+ - lib/torch/nn/bilinear.rb
138
+ - lib/torch/nn/constant_pad1d.rb
139
+ - lib/torch/nn/constant_pad2d.rb
140
+ - lib/torch/nn/constant_pad3d.rb
141
+ - lib/torch/nn/constant_padnd.rb
142
+ - lib/torch/nn/conv1d.rb
113
143
  - lib/torch/nn/conv2d.rb
144
+ - lib/torch/nn/conv3d.rb
145
+ - lib/torch/nn/convnd.rb
146
+ - lib/torch/nn/cosine_embedding_loss.rb
147
+ - lib/torch/nn/cosine_similarity.rb
148
+ - lib/torch/nn/cross_entropy_loss.rb
149
+ - lib/torch/nn/ctc_loss.rb
150
+ - lib/torch/nn/dropout.rb
151
+ - lib/torch/nn/dropout2d.rb
152
+ - lib/torch/nn/dropout3d.rb
153
+ - lib/torch/nn/dropoutnd.rb
154
+ - lib/torch/nn/embedding.rb
155
+ - lib/torch/nn/embedding_bag.rb
156
+ - lib/torch/nn/feature_alpha_dropout.rb
157
+ - lib/torch/nn/fold.rb
114
158
  - lib/torch/nn/functional.rb
159
+ - lib/torch/nn/group_norm.rb
160
+ - lib/torch/nn/gru.rb
161
+ - lib/torch/nn/hardshrink.rb
162
+ - lib/torch/nn/hinge_embedding_loss.rb
163
+ - lib/torch/nn/identity.rb
115
164
  - lib/torch/nn/init.rb
165
+ - lib/torch/nn/instance_norm.rb
166
+ - lib/torch/nn/instance_norm1d.rb
167
+ - lib/torch/nn/instance_norm2d.rb
168
+ - lib/torch/nn/instance_norm3d.rb
169
+ - lib/torch/nn/kl_div_loss.rb
170
+ - lib/torch/nn/l1_loss.rb
171
+ - lib/torch/nn/layer_norm.rb
172
+ - lib/torch/nn/leaky_relu.rb
116
173
  - lib/torch/nn/linear.rb
174
+ - lib/torch/nn/local_response_norm.rb
175
+ - lib/torch/nn/log_sigmoid.rb
176
+ - lib/torch/nn/log_softmax.rb
177
+ - lib/torch/nn/loss.rb
178
+ - lib/torch/nn/lp_pool1d.rb
179
+ - lib/torch/nn/lp_pool2d.rb
180
+ - lib/torch/nn/lp_poolnd.rb
181
+ - lib/torch/nn/lstm.rb
182
+ - lib/torch/nn/margin_ranking_loss.rb
183
+ - lib/torch/nn/max_pool1d.rb
184
+ - lib/torch/nn/max_pool2d.rb
185
+ - lib/torch/nn/max_pool3d.rb
186
+ - lib/torch/nn/max_poolnd.rb
187
+ - lib/torch/nn/max_unpool1d.rb
188
+ - lib/torch/nn/max_unpool2d.rb
189
+ - lib/torch/nn/max_unpool3d.rb
190
+ - lib/torch/nn/max_unpoolnd.rb
117
191
  - lib/torch/nn/module.rb
118
192
  - lib/torch/nn/mse_loss.rb
193
+ - lib/torch/nn/multi_label_margin_loss.rb
194
+ - lib/torch/nn/multi_label_soft_margin_loss.rb
195
+ - lib/torch/nn/multi_margin_loss.rb
196
+ - lib/torch/nn/nll_loss.rb
197
+ - lib/torch/nn/pairwise_distance.rb
119
198
  - lib/torch/nn/parameter.rb
199
+ - lib/torch/nn/poisson_nll_loss.rb
200
+ - lib/torch/nn/prelu.rb
201
+ - lib/torch/nn/reflection_pad1d.rb
202
+ - lib/torch/nn/reflection_pad2d.rb
203
+ - lib/torch/nn/reflection_padnd.rb
120
204
  - lib/torch/nn/relu.rb
205
+ - lib/torch/nn/replication_pad1d.rb
206
+ - lib/torch/nn/replication_pad2d.rb
207
+ - lib/torch/nn/replication_pad3d.rb
208
+ - lib/torch/nn/replication_padnd.rb
209
+ - lib/torch/nn/rnn.rb
210
+ - lib/torch/nn/rnn_base.rb
121
211
  - lib/torch/nn/sequential.rb
212
+ - lib/torch/nn/sigmoid.rb
213
+ - lib/torch/nn/smooth_l1_loss.rb
214
+ - lib/torch/nn/soft_margin_loss.rb
215
+ - lib/torch/nn/softmax.rb
216
+ - lib/torch/nn/softmax2d.rb
217
+ - lib/torch/nn/softmin.rb
218
+ - lib/torch/nn/softplus.rb
219
+ - lib/torch/nn/softshrink.rb
220
+ - lib/torch/nn/softsign.rb
221
+ - lib/torch/nn/tanh.rb
222
+ - lib/torch/nn/tanhshrink.rb
223
+ - lib/torch/nn/triplet_margin_loss.rb
224
+ - lib/torch/nn/unfold.rb
225
+ - lib/torch/nn/utils.rb
226
+ - lib/torch/nn/weighted_loss.rb
227
+ - lib/torch/nn/zero_pad2d.rb
228
+ - lib/torch/optim/adadelta.rb
229
+ - lib/torch/optim/adagrad.rb
230
+ - lib/torch/optim/adam.rb
231
+ - lib/torch/optim/adamax.rb
232
+ - lib/torch/optim/adamw.rb
233
+ - lib/torch/optim/asgd.rb
234
+ - lib/torch/optim/lr_scheduler/lr_scheduler.rb
235
+ - lib/torch/optim/lr_scheduler/step_lr.rb
236
+ - lib/torch/optim/optimizer.rb
237
+ - lib/torch/optim/rmsprop.rb
238
+ - lib/torch/optim/rprop.rb
239
+ - lib/torch/optim/sgd.rb
240
+ - lib/torch/random.rb
122
241
  - lib/torch/tensor.rb
123
242
  - lib/torch/utils/data/data_loader.rb
124
243
  - lib/torch/utils/data/tensor_dataset.rb
125
244
  - lib/torch/version.rb
126
245
  homepage: https://github.com/ankane/torch-rb
127
246
  licenses:
128
- - MIT
247
+ - BSD-3-Clause
129
248
  metadata: {}
130
249
  post_install_message:
131
250
  rdoc_options: []