rubynetic 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/bin/console +6 -0
- data/bin/setup_libtorch +6 -0
- data/ext/torch/rubynetic/Makefile +270 -0
- data/ext/torch/rubynetic/extconf.rb +32 -0
- data/ext/torch/rubynetic/rubynetic.bundle +0 -0
- data/ext/torch/rubynetic/rubynetic.cpp +526 -0
- data/ext/torch/rubynetic/rubynetic.o +0 -0
- data/lib/rubynetic/libtorch_downloader.rb +82 -0
- data/lib/rubynetic/libtorch_installer.rb +14 -0
- data/lib/rubynetic/tensor.rb +97 -0
- data/lib/rubynetic/torch.rb +18 -0
- data/lib/rubynetic.rb +7 -0
- metadata +58 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 927177e3fbecd3934e7a5acc174fd851b0b53f31df2ab03311bb188b90c47f4b
|
4
|
+
data.tar.gz: 78497d7135bf6a3baa19455395b0eef89f745c1417efa506c004a9bca487d462
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 9084a711875862b7770d91f06564c683c67c689999bb1477abb4e44a911edcb612c397808764dc2033117b61a75de836edc00603415cd5a9792de97004100c6e
|
7
|
+
data.tar.gz: 86770080fbb7c8df640d3dd16ffc048049c9092ff372ed8229ef094eff749a2b9b068516928f3ea463bab4c913268f33586439059baebd4b1fe40ad4dd374562
|
data/bin/console
ADDED
data/bin/setup_libtorch
ADDED
@@ -0,0 +1,270 @@
|
|
1
|
+
|
2
|
+
SHELL = /bin/sh
|
3
|
+
|
4
|
+
# V=0 quiet, V=1 verbose. other values don't work.
|
5
|
+
V = 0
|
6
|
+
V0 = $(V:0=)
|
7
|
+
Q1 = $(V:1=)
|
8
|
+
Q = $(Q1:0=@)
|
9
|
+
ECHO1 = $(V:1=@ :)
|
10
|
+
ECHO = $(ECHO1:0=@ echo)
|
11
|
+
NULLCMD = :
|
12
|
+
|
13
|
+
#### Start of system configuration section. ####
|
14
|
+
|
15
|
+
srcdir = .
|
16
|
+
topdir = /Users/alex/.rvm/rubies/ruby-3.3.1/include/ruby-3.3.0
|
17
|
+
hdrdir = $(topdir)
|
18
|
+
arch_hdrdir = /Users/alex/.rvm/rubies/ruby-3.3.1/include/ruby-3.3.0/x86_64-darwin23
|
19
|
+
PATH_SEPARATOR = :
|
20
|
+
VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby
|
21
|
+
prefix = $(DESTDIR)/Users/alex/.rvm/rubies/ruby-3.3.1
|
22
|
+
rubysitearchprefix = $(rubylibprefix)/$(sitearch)
|
23
|
+
rubyarchprefix = $(rubylibprefix)/$(arch)
|
24
|
+
rubylibprefix = $(libdir)/$(RUBY_BASE_NAME)
|
25
|
+
exec_prefix = $(prefix)
|
26
|
+
vendorarchhdrdir = $(vendorhdrdir)/$(sitearch)
|
27
|
+
sitearchhdrdir = $(sitehdrdir)/$(sitearch)
|
28
|
+
rubyarchhdrdir = $(rubyhdrdir)/$(arch)
|
29
|
+
vendorhdrdir = $(rubyhdrdir)/vendor_ruby
|
30
|
+
sitehdrdir = $(rubyhdrdir)/site_ruby
|
31
|
+
rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME)
|
32
|
+
vendorarchdir = $(vendorlibdir)/$(sitearch)
|
33
|
+
vendorlibdir = $(vendordir)/$(ruby_version)
|
34
|
+
vendordir = $(rubylibprefix)/vendor_ruby
|
35
|
+
sitearchdir = $(sitelibdir)/$(sitearch)
|
36
|
+
sitelibdir = $(sitedir)/$(ruby_version)
|
37
|
+
sitedir = $(rubylibprefix)/site_ruby
|
38
|
+
rubyarchdir = $(rubylibdir)/$(arch)
|
39
|
+
rubylibdir = $(rubylibprefix)/$(ruby_version)
|
40
|
+
sitearchincludedir = $(includedir)/$(sitearch)
|
41
|
+
archincludedir = $(includedir)/$(arch)
|
42
|
+
sitearchlibdir = $(libdir)/$(sitearch)
|
43
|
+
archlibdir = $(libdir)/$(arch)
|
44
|
+
ridir = $(datarootdir)/$(RI_BASE_NAME)
|
45
|
+
mandir = $(datarootdir)/man
|
46
|
+
localedir = $(datarootdir)/locale
|
47
|
+
libdir = $(exec_prefix)/lib
|
48
|
+
psdir = $(docdir)
|
49
|
+
pdfdir = $(docdir)
|
50
|
+
dvidir = $(docdir)
|
51
|
+
htmldir = $(docdir)
|
52
|
+
infodir = $(datarootdir)/info
|
53
|
+
docdir = $(datarootdir)/doc/$(PACKAGE)
|
54
|
+
oldincludedir = $(DESTDIR)/usr/include
|
55
|
+
includedir = $(SDKROOT)$(prefix)/include
|
56
|
+
runstatedir = $(localstatedir)/run
|
57
|
+
localstatedir = $(prefix)/var
|
58
|
+
sharedstatedir = $(prefix)/com
|
59
|
+
sysconfdir = $(prefix)/etc
|
60
|
+
datadir = $(datarootdir)
|
61
|
+
datarootdir = $(prefix)/share
|
62
|
+
libexecdir = $(exec_prefix)/libexec
|
63
|
+
sbindir = $(exec_prefix)/sbin
|
64
|
+
bindir = $(exec_prefix)/bin
|
65
|
+
archdir = $(rubyarchdir)
|
66
|
+
|
67
|
+
|
68
|
+
CC_WRAPPER =
|
69
|
+
CC = gcc
|
70
|
+
CXX = g++ -std=gnu++11
|
71
|
+
LIBRUBY = $(LIBRUBY_SO)
|
72
|
+
LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a
|
73
|
+
LIBRUBYARG_SHARED = -l$(RUBY_SO_NAME)
|
74
|
+
LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static -framework CoreFoundation $(MAINLIBS)
|
75
|
+
empty =
|
76
|
+
OUTFLAG = -o $(empty)
|
77
|
+
COUTFLAG = -o $(empty)
|
78
|
+
CSRCFLAG = $(empty)
|
79
|
+
|
80
|
+
RUBY_EXTCONF_H =
|
81
|
+
cflags = -fdeclspec $(optflags) $(debugflags) $(warnflags)
|
82
|
+
cxxflags =
|
83
|
+
optflags = -O3 -fno-fast-math
|
84
|
+
debugflags = -ggdb3
|
85
|
+
warnflags = -Wall -Wextra -Wextra-tokens -Wdeprecated-declarations -Wdivision-by-zero -Wdiv-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wmisleading-indentation -Wundef
|
86
|
+
cppflags =
|
87
|
+
CCDLFLAGS = -fno-common
|
88
|
+
CFLAGS = $(CCDLFLAGS) -O3 -I/usr/local/opt/libyaml/include -I/usr/local/opt/libksba/include -I/usr/local/opt/readline/include -I/usr/local/opt/zlib/include -I/usr/local/opt/openssl@1.1/include $(cflags) -fno-common -pipe $(ARCH_FLAG)
|
89
|
+
INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir)
|
90
|
+
DEFS =
|
91
|
+
CPPFLAGS = -I/Users/alex/libtorch/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT $(DEFS) $(cppflags)
|
92
|
+
CXXFLAGS = $(CCDLFLAGS) -fdeclspec -std=c++17 -I/Users/alex/libtorch/include -I/Users/alex/libtorch/include/torch/csrc/api/include -I/Users/alex/libtorch/include/ATen/core $(ARCH_FLAG)
|
93
|
+
ldflags = -L. -L/usr/local/opt/libyaml/lib -L/usr/local/opt/libksba/lib -L/usr/local/opt/readline/lib -L/usr/local/opt/zlib/lib -L/usr/local/opt/openssl@1.1/lib -fstack-protector-strong -L/Users/alex/libtorch/lib -ltorch -ltorch_cpu -lc10
|
94
|
+
dldflags = -L/usr/local/opt/libyaml/lib -L/usr/local/opt/libksba/lib -L/usr/local/opt/readline/lib -L/usr/local/opt/zlib/lib -L/usr/local/opt/openssl@1.1/lib -Wl,-undefined,dynamic_lookup
|
95
|
+
ARCH_FLAG =
|
96
|
+
DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG)
|
97
|
+
LDSHARED = $(CC) -dynamic -bundle
|
98
|
+
LDSHAREDXX = $(CXX) -dynamic -bundle
|
99
|
+
AR = /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ar
|
100
|
+
EXEEXT =
|
101
|
+
|
102
|
+
RUBY_INSTALL_NAME = $(RUBY_BASE_NAME)
|
103
|
+
RUBY_SO_NAME = ruby.3.3
|
104
|
+
RUBYW_INSTALL_NAME =
|
105
|
+
RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version)
|
106
|
+
RUBYW_BASE_NAME = rubyw
|
107
|
+
RUBY_BASE_NAME = ruby
|
108
|
+
|
109
|
+
arch = x86_64-darwin23
|
110
|
+
sitearch = $(arch)
|
111
|
+
ruby_version = 3.3.0
|
112
|
+
ruby = $(bindir)/$(RUBY_BASE_NAME)
|
113
|
+
RUBY = $(ruby)
|
114
|
+
BUILTRUBY = $(bindir)/$(RUBY_BASE_NAME)
|
115
|
+
ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h
|
116
|
+
|
117
|
+
RM = rm -f
|
118
|
+
RM_RF = rm -fr
|
119
|
+
RMDIRS = rmdir -p
|
120
|
+
MAKEDIRS = /usr/local/opt/coreutils/bin/gmkdir -p
|
121
|
+
INSTALL = /usr/local/opt/coreutils/bin/ginstall -c
|
122
|
+
INSTALL_PROG = $(INSTALL) -m 0755
|
123
|
+
INSTALL_DATA = $(INSTALL) -m 644
|
124
|
+
COPY = cp
|
125
|
+
TOUCH = exit >
|
126
|
+
|
127
|
+
#### End of system configuration section. ####
|
128
|
+
|
129
|
+
preload =
|
130
|
+
libpath = . $(libdir) /Users/alex/libtorch/lib
|
131
|
+
LIBPATH = -L. -L$(libdir) -L/Users/alex/libtorch/lib
|
132
|
+
DEFFILE =
|
133
|
+
|
134
|
+
CLEANFILES = mkmf.log
|
135
|
+
DISTCLEANFILES =
|
136
|
+
DISTCLEANDIRS =
|
137
|
+
|
138
|
+
extout =
|
139
|
+
extout_prefix =
|
140
|
+
target_prefix = /rubynetic
|
141
|
+
LOCAL_LIBS =
|
142
|
+
LIBS = $(LIBRUBYARG_SHARED) -lpthread
|
143
|
+
ORIG_SRCS = rubynetic.cpp
|
144
|
+
SRCS = $(ORIG_SRCS)
|
145
|
+
OBJS = rubynetic.o
|
146
|
+
HDRS =
|
147
|
+
LOCAL_HDRS =
|
148
|
+
TARGET = rubynetic
|
149
|
+
TARGET_NAME = rubynetic
|
150
|
+
TARGET_ENTRY = Init_$(TARGET_NAME)
|
151
|
+
DLLIB = $(TARGET).bundle
|
152
|
+
EXTSTATIC =
|
153
|
+
STATIC_LIB =
|
154
|
+
|
155
|
+
TIMESTAMP_DIR = .
|
156
|
+
BINDIR = $(bindir)
|
157
|
+
RUBYCOMMONDIR = $(sitedir)$(target_prefix)
|
158
|
+
RUBYLIBDIR = $(sitelibdir)$(target_prefix)
|
159
|
+
RUBYARCHDIR = $(sitearchdir)$(target_prefix)
|
160
|
+
HDRDIR = $(sitehdrdir)$(target_prefix)
|
161
|
+
ARCHHDRDIR = $(sitearchhdrdir)$(target_prefix)
|
162
|
+
TARGET_SO_DIR =
|
163
|
+
TARGET_SO = $(TARGET_SO_DIR)$(DLLIB)
|
164
|
+
CLEANLIBS = $(TARGET_SO) $(TARGET_SO).dSYM
|
165
|
+
CLEANOBJS = $(OBJS) *.bak
|
166
|
+
TARGET_SO_DIR_TIMESTAMP = $(TIMESTAMP_DIR)/.sitearchdir.-.rubynetic.time
|
167
|
+
|
168
|
+
all: $(DLLIB)
|
169
|
+
static: $(STATIC_LIB)
|
170
|
+
.PHONY: all install static install-so install-rb
|
171
|
+
.PHONY: clean clean-so clean-static clean-rb
|
172
|
+
|
173
|
+
clean-static::
|
174
|
+
clean-rb-default::
|
175
|
+
clean-rb::
|
176
|
+
clean-so::
|
177
|
+
clean: clean-so clean-static clean-rb-default clean-rb
|
178
|
+
-$(Q)$(RM_RF) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time
|
179
|
+
|
180
|
+
distclean-rb-default::
|
181
|
+
distclean-rb::
|
182
|
+
distclean-so::
|
183
|
+
distclean-static::
|
184
|
+
distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb
|
185
|
+
-$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log
|
186
|
+
-$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES)
|
187
|
+
-$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true
|
188
|
+
|
189
|
+
realclean: distclean
|
190
|
+
install: install-so install-rb
|
191
|
+
|
192
|
+
install-so: $(DLLIB) $(TARGET_SO_DIR_TIMESTAMP)
|
193
|
+
$(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR)
|
194
|
+
clean-static::
|
195
|
+
-$(Q)$(RM) $(STATIC_LIB)
|
196
|
+
install-rb: pre-install-rb do-install-rb install-rb-default
|
197
|
+
install-rb-default: pre-install-rb-default do-install-rb-default
|
198
|
+
pre-install-rb: Makefile
|
199
|
+
pre-install-rb-default: Makefile
|
200
|
+
do-install-rb:
|
201
|
+
do-install-rb-default:
|
202
|
+
pre-install-rb-default:
|
203
|
+
@$(NULLCMD)
|
204
|
+
$(TARGET_SO_DIR_TIMESTAMP):
|
205
|
+
$(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR)
|
206
|
+
$(Q) $(TOUCH) $@
|
207
|
+
|
208
|
+
site-install: site-install-so site-install-rb
|
209
|
+
site-install-so: install-so
|
210
|
+
site-install-rb: install-rb
|
211
|
+
|
212
|
+
.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S
|
213
|
+
|
214
|
+
.cc.o:
|
215
|
+
$(ECHO) compiling $(<)
|
216
|
+
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
217
|
+
|
218
|
+
.cc.S:
|
219
|
+
$(ECHO) translating $(<)
|
220
|
+
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
221
|
+
|
222
|
+
.mm.o:
|
223
|
+
$(ECHO) compiling $(<)
|
224
|
+
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
225
|
+
|
226
|
+
.mm.S:
|
227
|
+
$(ECHO) translating $(<)
|
228
|
+
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
229
|
+
|
230
|
+
.cxx.o:
|
231
|
+
$(ECHO) compiling $(<)
|
232
|
+
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
233
|
+
|
234
|
+
.cxx.S:
|
235
|
+
$(ECHO) translating $(<)
|
236
|
+
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
237
|
+
|
238
|
+
.cpp.o:
|
239
|
+
$(ECHO) compiling $(<)
|
240
|
+
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
241
|
+
|
242
|
+
.cpp.S:
|
243
|
+
$(ECHO) translating $(<)
|
244
|
+
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
245
|
+
|
246
|
+
.c.o:
|
247
|
+
$(ECHO) compiling $(<)
|
248
|
+
$(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
249
|
+
|
250
|
+
.c.S:
|
251
|
+
$(ECHO) translating $(<)
|
252
|
+
$(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
253
|
+
|
254
|
+
.m.o:
|
255
|
+
$(ECHO) compiling $(<)
|
256
|
+
$(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
257
|
+
|
258
|
+
.m.S:
|
259
|
+
$(ECHO) translating $(<)
|
260
|
+
$(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
261
|
+
|
262
|
+
$(TARGET_SO): $(OBJS) Makefile
|
263
|
+
$(ECHO) linking shared-object rubynetic/$(DLLIB)
|
264
|
+
-$(Q)$(RM) $(@)
|
265
|
+
$(Q) $(LDSHAREDXX) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS)
|
266
|
+
$(Q) $(POSTLINK)
|
267
|
+
|
268
|
+
|
269
|
+
|
270
|
+
$(OBJS): $(HDRS) $(ruby_headers)
|
@@ -0,0 +1,32 @@
|
|
1
|
+
require 'mkmf'
|
2
|
+
require_relative '../../../lib/rubynetic/libtorch_installer'
|
3
|
+
|
4
|
+
# ✅ Определяем путь установки LibTorch
|
5
|
+
libtorch_path = ENV['LIBTORCH_INSTALL_PATH'] || "/opt/homebrew"
|
6
|
+
|
7
|
+
# ✅ Проверяем, установлен ли LibTorch
|
8
|
+
unless File.directory?("#{libtorch_path}/include/torch")
|
9
|
+
puts "🔄 LibTorch not found, starting installation.."
|
10
|
+
LibTorchInstaller.install(version: "2.1.0")
|
11
|
+
end
|
12
|
+
|
13
|
+
# ✅ Проверяем снова после установки
|
14
|
+
unless File.directory?("#{libtorch_path}/include/torch")
|
15
|
+
raise "❌ Error: LibTorch not found on path #{libtorch_path}/include/torch. Check the installation!"
|
16
|
+
end
|
17
|
+
|
18
|
+
# ✅ Указываем пути для компилятора
|
19
|
+
$CXXFLAGS += " -std=c++17"
|
20
|
+
$CXXFLAGS += " -I#{libtorch_path}/include"
|
21
|
+
$CXXFLAGS += " -I#{libtorch_path}/include/torch/csrc/api/include"
|
22
|
+
$CXXFLAGS += " -I#{libtorch_path}/include/ATen/core"
|
23
|
+
|
24
|
+
# ✅ Пути для линковщика
|
25
|
+
$LDFLAGS += " -L#{libtorch_path}/lib"
|
26
|
+
$LDFLAGS += " -ltorch -ltorch_cpu -lc10"
|
27
|
+
|
28
|
+
# ✅ Указываем пути для `mkmf`
|
29
|
+
dir_config("rubynetic", "#{libtorch_path}/include", "#{libtorch_path}/lib")
|
30
|
+
|
31
|
+
# ✅ Создаём Makefile для компиляции расширения
|
32
|
+
create_makefile("rubynetic/rubynetic")
|
Binary file
|
@@ -0,0 +1,526 @@
|
|
1
|
+
#include <ruby.h>
|
2
|
+
#include <torch/torch.h>
|
3
|
+
#include <vector>
|
4
|
+
#include <iostream>
|
5
|
+
#include <sstream>
|
6
|
+
#include <iomanip>
|
7
|
+
|
8
|
+
// Функция для преобразования Ruby массива в std::vector<float>
|
9
|
+
std::vector<float> flatten_array(VALUE arr)
|
10
|
+
{
|
11
|
+
std::vector<float> data;
|
12
|
+
|
13
|
+
for (long i = 0; i < RARRAY_LEN(arr); i++)
|
14
|
+
{
|
15
|
+
VALUE item = rb_ary_entry(arr, i);
|
16
|
+
|
17
|
+
if (RB_TYPE_P(item, T_ARRAY))
|
18
|
+
{
|
19
|
+
std::vector<float> nested = flatten_array(item);
|
20
|
+
data.insert(data.end(), nested.begin(), nested.end());
|
21
|
+
}
|
22
|
+
else if (RB_TYPE_P(item, T_FLOAT) || RB_TYPE_P(item, T_FIXNUM))
|
23
|
+
{
|
24
|
+
data.push_back(NUM2DBL(item));
|
25
|
+
}
|
26
|
+
else
|
27
|
+
{
|
28
|
+
rb_raise(rb_eTypeError, "Array must contain only numbers or nested arrays");
|
29
|
+
}
|
30
|
+
}
|
31
|
+
|
32
|
+
return data;
|
33
|
+
}
|
34
|
+
|
35
|
+
extern "C" VALUE tensor_to_array(VALUE self, VALUE tensor_object)
|
36
|
+
{
|
37
|
+
torch::Tensor* tensor;
|
38
|
+
Data_Get_Struct(tensor_object, torch::Tensor, tensor);
|
39
|
+
|
40
|
+
if (!tensor) {
|
41
|
+
rb_raise(rb_eRuntimeError, "tensor_to_array: Tensor is NULL!");
|
42
|
+
return Qnil;
|
43
|
+
}
|
44
|
+
|
45
|
+
try {
|
46
|
+
// Приводим к CPU и преобразуем в std::vector
|
47
|
+
auto tensor_data = tensor->contiguous().to(torch::kCPU).data_ptr<float>();
|
48
|
+
std::vector<float> data(tensor_data, tensor_data + tensor->numel());
|
49
|
+
|
50
|
+
// Получаем размеры
|
51
|
+
auto sizes = tensor->sizes();
|
52
|
+
if (sizes.size() == 1) {
|
53
|
+
// Одномерный массив
|
54
|
+
VALUE rb_array = rb_ary_new();
|
55
|
+
for (float v : data) {
|
56
|
+
rb_ary_push(rb_array, rb_float_new(v));
|
57
|
+
}
|
58
|
+
return rb_array;
|
59
|
+
} else if (sizes.size() == 2) {
|
60
|
+
// Двумерный массив (матрица)
|
61
|
+
VALUE rb_matrix = rb_ary_new();
|
62
|
+
int rows = sizes[0], cols = sizes[1];
|
63
|
+
|
64
|
+
for (int i = 0; i < rows; ++i) {
|
65
|
+
VALUE rb_row = rb_ary_new();
|
66
|
+
for (int j = 0; j < cols; ++j) {
|
67
|
+
rb_ary_push(rb_row, rb_float_new(data[i * cols + j]));
|
68
|
+
}
|
69
|
+
rb_ary_push(rb_matrix, rb_row);
|
70
|
+
}
|
71
|
+
return rb_matrix;
|
72
|
+
} else {
|
73
|
+
rb_raise(rb_eNotImpError, "tensor_to_array: Only supports 1D and 2D tensors.");
|
74
|
+
return Qnil;
|
75
|
+
}
|
76
|
+
}
|
77
|
+
catch (const c10::Error& e) {
|
78
|
+
rb_raise(rb_eRuntimeError, "tensor_to_array failed: %s", e.what());
|
79
|
+
return Qnil;
|
80
|
+
}
|
81
|
+
}
|
82
|
+
|
83
|
+
|
84
|
+
// Функция для получения размерности тензора
|
85
|
+
std::vector<int64_t> get_tensor_shape(VALUE arr)
|
86
|
+
{
|
87
|
+
std::vector<int64_t> shape;
|
88
|
+
|
89
|
+
while (RB_TYPE_P(arr, T_ARRAY))
|
90
|
+
{
|
91
|
+
shape.push_back(RARRAY_LEN(arr));
|
92
|
+
arr = rb_ary_entry(arr, 0);
|
93
|
+
}
|
94
|
+
|
95
|
+
return shape;
|
96
|
+
}
|
97
|
+
|
98
|
+
// Функция для преобразования размера тензора в строку
|
99
|
+
std::string tensor_size_to_string(const c10::ArrayRef<int64_t>& sizes) {
|
100
|
+
std::ostringstream oss;
|
101
|
+
oss << "[";
|
102
|
+
for (size_t i = 0; i < sizes.size(); ++i) {
|
103
|
+
oss << sizes[i];
|
104
|
+
if (i != sizes.size() - 1) oss << ", ";
|
105
|
+
}
|
106
|
+
oss << "]";
|
107
|
+
return oss.str();
|
108
|
+
}
|
109
|
+
|
110
|
+
|
111
|
+
extern "C" VALUE tensor_shape(VALUE self, VALUE tensor_object)
|
112
|
+
{
|
113
|
+
torch::Tensor *tensor;
|
114
|
+
Data_Get_Struct(tensor_object, torch::Tensor, tensor);
|
115
|
+
|
116
|
+
if (!tensor)
|
117
|
+
{
|
118
|
+
rb_raise(rb_eRuntimeError, "tensor_shape: Tensor is NULL!");
|
119
|
+
return Qnil;
|
120
|
+
}
|
121
|
+
|
122
|
+
std::vector<int64_t> shape_vec = tensor->sizes().vec();
|
123
|
+
VALUE shape_array = rb_ary_new();
|
124
|
+
for (int64_t dim : shape_vec)
|
125
|
+
{
|
126
|
+
rb_ary_push(shape_array, INT2NUM(dim));
|
127
|
+
}
|
128
|
+
|
129
|
+
return shape_array;
|
130
|
+
}
|
131
|
+
|
132
|
+
// Создание тензора из многомерного массива
|
133
|
+
extern "C" VALUE tensor_from_data(VALUE self, VALUE arr)
|
134
|
+
{
|
135
|
+
if (!RB_TYPE_P(arr, T_ARRAY))
|
136
|
+
{
|
137
|
+
rb_raise(rb_eTypeError, "Expected an array of numbers or nested arrays");
|
138
|
+
return Qnil;
|
139
|
+
}
|
140
|
+
|
141
|
+
std::vector<float> data = flatten_array(arr);
|
142
|
+
std::vector<int64_t> shape = get_tensor_shape(arr);
|
143
|
+
|
144
|
+
// Создаём тензор нужной формы
|
145
|
+
torch::Tensor *tensor = new torch::Tensor(torch::tensor(data, torch::dtype(torch::kFloat32)).reshape(shape));
|
146
|
+
|
147
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, tensor);
|
148
|
+
}
|
149
|
+
|
150
|
+
// Вывод тензора
|
151
|
+
extern "C" VALUE tensor_print(VALUE self, VALUE tensor_obj)
|
152
|
+
{
|
153
|
+
torch::Tensor *tensor;
|
154
|
+
Data_Get_Struct(tensor_obj, torch::Tensor, tensor);
|
155
|
+
|
156
|
+
if (tensor == nullptr)
|
157
|
+
{
|
158
|
+
rb_raise(rb_eRuntimeError, "tensor_print: передан NULL-тензор!");
|
159
|
+
return Qnil;
|
160
|
+
}
|
161
|
+
|
162
|
+
return Qnil;
|
163
|
+
}
|
164
|
+
|
165
|
+
// Вывод тензора в стиле PyTorch
|
166
|
+
extern "C" VALUE tensor_to_string(VALUE self, VALUE tensor_obj)
|
167
|
+
{
|
168
|
+
torch::Tensor *tensor;
|
169
|
+
Data_Get_Struct(tensor_obj, torch::Tensor, tensor);
|
170
|
+
|
171
|
+
if (tensor == nullptr)
|
172
|
+
{
|
173
|
+
rb_raise(rb_eRuntimeError, "tensor_to_string: передан NULL-тензор!");
|
174
|
+
return Qnil;
|
175
|
+
}
|
176
|
+
|
177
|
+
std::ostringstream oss;
|
178
|
+
oss << "tensor([";
|
179
|
+
|
180
|
+
// Устанавливаем точность до 4 знаков после запятой
|
181
|
+
oss << std::fixed << std::setprecision(4);
|
182
|
+
|
183
|
+
// Получаем размерность тензора
|
184
|
+
std::vector<int64_t> shape = tensor->sizes().vec();
|
185
|
+
|
186
|
+
if (shape.size() == 1)
|
187
|
+
{
|
188
|
+
// ✅ 1D тензор: tensor([0.1234, 0.5678])
|
189
|
+
oss << "[";
|
190
|
+
for (int i = 0; i < shape[0]; i++)
|
191
|
+
{
|
192
|
+
oss << tensor->index({i}).item<float>();
|
193
|
+
if (i < shape[0] - 1)
|
194
|
+
oss << ", ";
|
195
|
+
}
|
196
|
+
oss << "]";
|
197
|
+
}
|
198
|
+
else
|
199
|
+
{
|
200
|
+
// ✅ 2D и выше: tensor([[0.1234, 0.5678], [0.9876, 0.4321]])
|
201
|
+
oss << "\n";
|
202
|
+
for (int i = 0; i < shape[0]; i++)
|
203
|
+
{
|
204
|
+
oss << " [";
|
205
|
+
for (int j = 0; j < shape[1]; j++)
|
206
|
+
{
|
207
|
+
oss << tensor->index({i, j}).item<float>();
|
208
|
+
if (j < shape[1] - 1)
|
209
|
+
oss << ", ";
|
210
|
+
}
|
211
|
+
oss << "]";
|
212
|
+
if (i < shape[0] - 1)
|
213
|
+
oss << ",\n";
|
214
|
+
}
|
215
|
+
}
|
216
|
+
|
217
|
+
oss << "])";
|
218
|
+
return rb_str_new_cstr(oss.str().c_str());
|
219
|
+
}
|
220
|
+
|
221
|
+
// Создание тензра, заполненного нулями
|
222
|
+
extern "C" VALUE tensor_zeros(VALUE self, VALUE shape_array)
|
223
|
+
{
|
224
|
+
if (!RB_TYPE_P(shape_array, T_ARRAY))
|
225
|
+
{
|
226
|
+
rb_raise(rb_eTypeError, "Expected an array of shape dimensions");
|
227
|
+
return Qnil;
|
228
|
+
}
|
229
|
+
|
230
|
+
std::vector<int64_t> shape;
|
231
|
+
for (long i = 0; i < RARRAY_LEN(shape_array); i++)
|
232
|
+
{
|
233
|
+
if (!RB_TYPE_P(rb_ary_entry(shape_array, i), T_FIXNUM))
|
234
|
+
{
|
235
|
+
rb_raise(rb_eTypeError, "All dimensions must be integers");
|
236
|
+
return Qnil;
|
237
|
+
}
|
238
|
+
shape.push_back(NUM2INT(rb_ary_entry(shape_array, i)));
|
239
|
+
}
|
240
|
+
|
241
|
+
if (shape.empty())
|
242
|
+
{
|
243
|
+
rb_raise(rb_eArgError, "Shape cannot be empty");
|
244
|
+
return Qnil;
|
245
|
+
}
|
246
|
+
|
247
|
+
torch::Tensor *tensor = new torch::Tensor(torch::zeros(shape));
|
248
|
+
|
249
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, tensor);
|
250
|
+
}
|
251
|
+
|
252
|
+
// Создание тензора, заполненного единицами
|
253
|
+
extern "C" VALUE tensor_ones(VALUE self, VALUE shape_array)
|
254
|
+
{
|
255
|
+
if (!RB_TYPE_P(shape_array, T_ARRAY))
|
256
|
+
{
|
257
|
+
rb_raise(rb_eTypeError, "Expected an array of shape dimensions");
|
258
|
+
return Qnil;
|
259
|
+
}
|
260
|
+
|
261
|
+
std::vector<int64_t> shape;
|
262
|
+
for (long i = 0; i < RARRAY_LEN(shape_array); i++)
|
263
|
+
{
|
264
|
+
shape.push_back(NUM2INT(rb_ary_entry(shape_array, i)));
|
265
|
+
}
|
266
|
+
|
267
|
+
torch::Tensor *tensor = new torch::Tensor(torch::ones(shape));
|
268
|
+
|
269
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, tensor);
|
270
|
+
}
|
271
|
+
|
272
|
+
// Создание тензора со случайными значениями
|
273
|
+
extern "C" VALUE tensor_rand(int argc, VALUE *argv, VALUE self)
|
274
|
+
{
|
275
|
+
if (argc < 1)
|
276
|
+
{
|
277
|
+
rb_raise(rb_eArgError, "Expected at least one argument (shape)");
|
278
|
+
return Qnil;
|
279
|
+
}
|
280
|
+
|
281
|
+
std::vector<int64_t> shape;
|
282
|
+
double min = 0.0, max = 1.0; // Значения по умолчанию
|
283
|
+
|
284
|
+
// Если последние 2 аргумента - числа, считаем их `min` и `max`
|
285
|
+
if (argc >= 3 && (RB_TYPE_P(argv[argc - 2], T_FLOAT) || RB_TYPE_P(argv[argc - 2], T_FIXNUM)) &&
|
286
|
+
(RB_TYPE_P(argv[argc - 1], T_FLOAT) || RB_TYPE_P(argv[argc - 1], T_FIXNUM)))
|
287
|
+
{
|
288
|
+
min = NUM2DBL(argv[argc - 2]);
|
289
|
+
max = NUM2DBL(argv[argc - 1]);
|
290
|
+
argc -= 2; // Убираем два последних аргумента
|
291
|
+
}
|
292
|
+
|
293
|
+
for (int i = 0; i < argc; i++)
|
294
|
+
{
|
295
|
+
if (!RB_TYPE_P(argv[i], T_FIXNUM))
|
296
|
+
{
|
297
|
+
rb_raise(rb_eTypeError, "Shape dimensions must be integers");
|
298
|
+
return Qnil;
|
299
|
+
}
|
300
|
+
shape.push_back(NUM2INT(argv[i]));
|
301
|
+
}
|
302
|
+
|
303
|
+
if (shape.empty())
|
304
|
+
{
|
305
|
+
rb_raise(rb_eArgError, "Shape cannot be empty");
|
306
|
+
return Qnil;
|
307
|
+
}
|
308
|
+
|
309
|
+
if (min >= max)
|
310
|
+
{
|
311
|
+
rb_raise(rb_eArgError, "min must be less than max");
|
312
|
+
return Qnil;
|
313
|
+
}
|
314
|
+
|
315
|
+
torch::Tensor *tensor = new torch::Tensor((max - min) * torch::rand(shape) + min);
|
316
|
+
|
317
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, tensor);
|
318
|
+
}
|
319
|
+
|
320
|
+
// Сложение тензоров (a + b)
|
321
|
+
extern "C" VALUE tensor_add(VALUE self, VALUE tensor1_object, VALUE tensor2_object)
|
322
|
+
{
|
323
|
+
torch::Tensor* tensor1;
|
324
|
+
torch::Tensor* tensor2;
|
325
|
+
|
326
|
+
Data_Get_Struct(tensor1_object, torch::Tensor, tensor1);
|
327
|
+
Data_Get_Struct(tensor2_object, torch::Tensor, tensor2);
|
328
|
+
|
329
|
+
if (!tensor1 || !tensor2) {
|
330
|
+
rb_raise(rb_eRuntimeError, "tensor_add: One of the tensors is NULL!");
|
331
|
+
return Qnil;
|
332
|
+
}
|
333
|
+
|
334
|
+
try {
|
335
|
+
// ❌ УБИРАЕМ expand() или reshape()
|
336
|
+
// Просто складываем тензоры, если их размеры совпадают
|
337
|
+
torch::Tensor* result = new torch::Tensor(*tensor1 + *tensor2);
|
338
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, result);
|
339
|
+
}
|
340
|
+
catch (const c10::Error& e) {
|
341
|
+
std::ostringstream oss;
|
342
|
+
oss << "tensor_add: Broadcasting failed. Tensor sizes are incompatible. Got [";
|
343
|
+
for (auto s : tensor1->sizes()) oss << s << " ";
|
344
|
+
oss << "] and [";
|
345
|
+
for (auto s : tensor2->sizes()) oss << s << " ";
|
346
|
+
oss << "]";
|
347
|
+
|
348
|
+
rb_raise(rb_eArgError, "%s", oss.str().c_str());
|
349
|
+
return Qnil;
|
350
|
+
}
|
351
|
+
}
|
352
|
+
|
353
|
+
// Вычитание тензоров (a - b)
|
354
|
+
extern "C" VALUE tensor_sub(VALUE self, VALUE tensor1_object, VALUE tensor2_object)
|
355
|
+
{
|
356
|
+
torch::Tensor* tensor1;
|
357
|
+
torch::Tensor* tensor2;
|
358
|
+
|
359
|
+
Data_Get_Struct(tensor1_object, torch::Tensor, tensor1);
|
360
|
+
Data_Get_Struct(tensor2_object, torch::Tensor, tensor2);
|
361
|
+
|
362
|
+
if (!tensor1 || !tensor2) {
|
363
|
+
rb_raise(rb_eRuntimeError, "tensor_sub: One of the tensors is NULL!");
|
364
|
+
return Qnil;
|
365
|
+
}
|
366
|
+
|
367
|
+
if (tensor1->sizes() != tensor2->sizes()) {
|
368
|
+
std::ostringstream size1, size2;
|
369
|
+
size1 << "[";
|
370
|
+
for (auto s : tensor1->sizes()) size1 << s << " ";
|
371
|
+
size1 << "]";
|
372
|
+
|
373
|
+
size2 << "[";
|
374
|
+
for (auto s : tensor2->sizes()) size2 << s << " ";
|
375
|
+
size2 << "]";
|
376
|
+
|
377
|
+
std::string error_message = "tensor_sub: Tensor sizes must match exactly. Got " + size1.str() + " and " + size2.str();
|
378
|
+
rb_raise(rb_eArgError, "%s", error_message.c_str());
|
379
|
+
return Qnil;
|
380
|
+
}
|
381
|
+
|
382
|
+
try {
|
383
|
+
torch::Tensor* result = new torch::Tensor(*tensor1 - *tensor2);
|
384
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, result);
|
385
|
+
}
|
386
|
+
catch (const c10::Error& e) {
|
387
|
+
rb_raise(rb_eArgError, "tensor_sub: PyTorch error during subtraction");
|
388
|
+
return Qnil;
|
389
|
+
}
|
390
|
+
}
|
391
|
+
|
392
|
+
|
393
|
+
// Поэлиментное умножение тензоров (a * b)
|
394
|
+
extern "C" VALUE tensor_mul(VALUE self, VALUE tensor1_object, VALUE tensor2_object)
|
395
|
+
{
|
396
|
+
torch::Tensor* tensor1;
|
397
|
+
torch::Tensor* tensor2;
|
398
|
+
|
399
|
+
Data_Get_Struct(tensor1_object, torch::Tensor, tensor1);
|
400
|
+
Data_Get_Struct(tensor2_object, torch::Tensor, tensor2);
|
401
|
+
|
402
|
+
if (!tensor1 || !tensor2) {
|
403
|
+
rb_raise(rb_eRuntimeError, "tensor_mul: One of the tensors is NULL!");
|
404
|
+
return Qnil;
|
405
|
+
}
|
406
|
+
|
407
|
+
try {
|
408
|
+
// Применяем PyTorch broadcasting через `broadcast_tensors`
|
409
|
+
auto tensors = torch::broadcast_tensors({*tensor1, *tensor2});
|
410
|
+
torch::Tensor expanded_t1 = tensors[0];
|
411
|
+
torch::Tensor expanded_t2 = tensors[1];
|
412
|
+
|
413
|
+
torch::Tensor* result = new torch::Tensor(expanded_t1 * expanded_t2);
|
414
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, result);
|
415
|
+
}
|
416
|
+
catch (const c10::Error& e) {
|
417
|
+
std::ostringstream oss;
|
418
|
+
oss << "tensor_mul: Broadcasting failed. Tensor sizes are incompatible. Got [";
|
419
|
+
for (auto s : tensor1->sizes()) oss << s << " ";
|
420
|
+
oss << "] and [";
|
421
|
+
for (auto s : tensor2->sizes()) oss << s << " ";
|
422
|
+
oss << "]";
|
423
|
+
|
424
|
+
rb_raise(rb_eArgError, "%s", oss.str().c_str());
|
425
|
+
return Qnil;
|
426
|
+
}
|
427
|
+
}
|
428
|
+
|
429
|
+
|
430
|
+
// Поэлиментное деление тензоров (a / b)
|
431
|
+
extern "C" VALUE tensor_div(VALUE self, VALUE tensor1_object, VALUE tensor2_object)
|
432
|
+
{
|
433
|
+
torch::Tensor* tensor1;
|
434
|
+
torch::Tensor* tensor2;
|
435
|
+
|
436
|
+
Data_Get_Struct(tensor1_object, torch::Tensor, tensor1);
|
437
|
+
Data_Get_Struct(tensor2_object, torch::Tensor, tensor2);
|
438
|
+
|
439
|
+
if (!tensor1 || !tensor2) {
|
440
|
+
rb_raise(rb_eRuntimeError, "tensor_div: One of the tensors is NULL!");
|
441
|
+
return Qnil;
|
442
|
+
}
|
443
|
+
|
444
|
+
try {
|
445
|
+
// Используем PyTorch broadcasting через `broadcast_tensors`
|
446
|
+
auto tensors = torch::broadcast_tensors({*tensor1, *tensor2});
|
447
|
+
torch::Tensor expanded_t1 = tensors[0];
|
448
|
+
torch::Tensor expanded_t2 = tensors[1];
|
449
|
+
|
450
|
+
// Поэлементное деление
|
451
|
+
torch::Tensor* result = new torch::Tensor(expanded_t1 / expanded_t2);
|
452
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, result);
|
453
|
+
}
|
454
|
+
catch (const c10::Error& e) {
|
455
|
+
std::ostringstream oss;
|
456
|
+
oss << "tensor_div: Broadcasting failed. Tensor sizes are incompatible. Got [";
|
457
|
+
for (auto s : tensor1->sizes()) oss << s << " ";
|
458
|
+
oss << "] and [";
|
459
|
+
for (auto s : tensor2->sizes()) oss << s << " ";
|
460
|
+
oss << "]";
|
461
|
+
|
462
|
+
rb_raise(rb_eArgError, "%s", oss.str().c_str());
|
463
|
+
return Qnil;
|
464
|
+
}
|
465
|
+
}
|
466
|
+
|
467
|
+
// Скалярное произведение (a @ b)
|
468
|
+
extern "C" VALUE tensor_dot(VALUE self, VALUE tensor1_object, VALUE tensor2_object)
|
469
|
+
{
|
470
|
+
torch::Tensor* tensor1;
|
471
|
+
torch::Tensor* tensor2;
|
472
|
+
|
473
|
+
Data_Get_Struct(tensor1_object, torch::Tensor, tensor1);
|
474
|
+
Data_Get_Struct(tensor2_object, torch::Tensor, tensor2);
|
475
|
+
|
476
|
+
if (!tensor1 || !tensor2) {
|
477
|
+
rb_raise(rb_eRuntimeError, "tensor_dot: One of the tensors is NULL!");
|
478
|
+
return Qnil;
|
479
|
+
}
|
480
|
+
|
481
|
+
try {
|
482
|
+
torch::Tensor* result;
|
483
|
+
|
484
|
+
// Если оба тензора 1D (вектора), делаем dot product
|
485
|
+
if (tensor1->dim() == 1 && tensor2->dim() == 1) {
|
486
|
+
double scalar = torch::dot(*tensor1, *tensor2).item<double>();
|
487
|
+
return DBL2NUM(scalar); // ✅ Возвращаем число, а не объект!
|
488
|
+
}
|
489
|
+
// Если матричное умножение
|
490
|
+
else {
|
491
|
+
result = new torch::Tensor(torch::matmul(*tensor1, *tensor2));
|
492
|
+
return Data_Wrap_Struct(rb_cObject, NULL, free, result);
|
493
|
+
}
|
494
|
+
}
|
495
|
+
catch (const c10::Error& e) {
|
496
|
+
std::ostringstream oss;
|
497
|
+
oss << "tensor_dot: Tensors have incompatible shapes for multiplication. Got [";
|
498
|
+
for (auto s : tensor1->sizes()) oss << s << " ";
|
499
|
+
oss << "] and [";
|
500
|
+
for (auto s : tensor2->sizes()) oss << s << " ";
|
501
|
+
oss << "]";
|
502
|
+
|
503
|
+
rb_raise(rb_eArgError, "%s", oss.str().c_str());
|
504
|
+
return Qnil;
|
505
|
+
}
|
506
|
+
}
|
507
|
+
|
508
|
+
// Инициализация модуля Rubynetic
|
509
|
+
extern "C" void Init_rubynetic()
|
510
|
+
{
|
511
|
+
VALUE mRubynetic = rb_define_module("Rubynetic");
|
512
|
+
|
513
|
+
rb_define_singleton_method(mRubynetic, "tensor_from_data", RUBY_METHOD_FUNC(tensor_from_data), 1);
|
514
|
+
rb_define_singleton_method(mRubynetic, "tensor_print", RUBY_METHOD_FUNC(tensor_print), 1);
|
515
|
+
rb_define_singleton_method(mRubynetic, "tensor_to_string", RUBY_METHOD_FUNC(tensor_to_string), 1);
|
516
|
+
rb_define_singleton_method(mRubynetic, "tensor_zeros", RUBY_METHOD_FUNC(tensor_zeros), 1);
|
517
|
+
rb_define_singleton_method(mRubynetic, "tensor_ones", RUBY_METHOD_FUNC(tensor_ones), 1);
|
518
|
+
rb_define_singleton_method(mRubynetic, "tensor_rand", RUBY_METHOD_FUNC(tensor_rand), -1);
|
519
|
+
rb_define_singleton_method(mRubynetic, "tensor_to_array", RUBY_METHOD_FUNC(tensor_to_array), 1);
|
520
|
+
rb_define_singleton_method(mRubynetic, "tensor_add", RUBY_METHOD_FUNC(tensor_add), 2);
|
521
|
+
rb_define_singleton_method(mRubynetic, "tensor_sub", RUBY_METHOD_FUNC(tensor_sub), 2);
|
522
|
+
rb_define_singleton_method(mRubynetic, "tensor_mul", RUBY_METHOD_FUNC(tensor_mul), 2);
|
523
|
+
rb_define_singleton_method(mRubynetic, "tensor_div", RUBY_METHOD_FUNC(tensor_div), 2);
|
524
|
+
rb_define_singleton_method(mRubynetic, "tensor_dot", RUBY_METHOD_FUNC(tensor_dot), 2);
|
525
|
+
rb_define_singleton_method(mRubynetic, "tensor_shape", RUBY_METHOD_FUNC(tensor_shape), 1);
|
526
|
+
}
|
Binary file
|
@@ -0,0 +1,82 @@
|
|
1
|
+
require "open-uri"
|
2
|
+
require "fileutils"
|
3
|
+
require "tmpdir"
|
4
|
+
|
5
|
+
module LibTorchDownloader
|
6
|
+
BASE_URL = "https://download.pytorch.org/libtorch"
|
7
|
+
|
8
|
+
def self.download_and_install(os, version = "2.1.0", variant = "cpu")
|
9
|
+
url = case os
|
10
|
+
when "macos"
|
11
|
+
"#{BASE_URL}/#{variant}/libtorch-macos-#{version}.zip"
|
12
|
+
when "linux"
|
13
|
+
"#{BASE_URL}/#{variant}/libtorch-cxx11-abi-shared-with-deps-#{version}+#{variant}.zip"
|
14
|
+
when "windows"
|
15
|
+
"#{BASE_URL}/#{variant}/libtorch-win-shared-with-deps-#{version}.zip"
|
16
|
+
else
|
17
|
+
raise "❌ OS not supported: #{os}"
|
18
|
+
end
|
19
|
+
|
20
|
+
install_path = File.expand_path("~/libtorch") # 📌 Основная директория установки
|
21
|
+
temp_path = Dir.mktmpdir("libtorch_") # 🟢 Временная директория для распаковки
|
22
|
+
zip_file = File.join(Dir.tmpdir, "libtorch.zip")
|
23
|
+
|
24
|
+
puts "📥 Downloading LibTorch #{version} (#{variant}) for #{os}..."
|
25
|
+
URI.open(url) do |remote_file|
|
26
|
+
File.open(zip_file, "wb") { |file| file.write(remote_file.read) }
|
27
|
+
end
|
28
|
+
|
29
|
+
puts "📦 Extracting to temp folder: #{temp_path}..."
|
30
|
+
FileUtils.mkdir_p(temp_path)
|
31
|
+
|
32
|
+
unless system("which unzip > /dev/null")
|
33
|
+
raise "❌ 'unzip' command not found. Please install unzip."
|
34
|
+
end
|
35
|
+
|
36
|
+
system("unzip -o #{zip_file} -d #{temp_path}")
|
37
|
+
|
38
|
+
# 📌 Определяем путь, где оказались файлы после извлечения
|
39
|
+
extracted_dir = "#{temp_path}/libtorch"
|
40
|
+
puts "🔍 Extracted files are in: #{extracted_dir}"
|
41
|
+
|
42
|
+
# 📌 Исправляем структуру директорий, если файлы оказались в `libtorch/libtorch`
|
43
|
+
if Dir.exist?("#{extracted_dir}/libtorch")
|
44
|
+
puts "🔄 Fixing extraction structure: Moving inner libtorch/* to root"
|
45
|
+
extracted_dir = "#{extracted_dir}/libtorch"
|
46
|
+
end
|
47
|
+
|
48
|
+
# 📌 Убедимся, что `install_path` существует
|
49
|
+
FileUtils.mkdir_p(install_path)
|
50
|
+
|
51
|
+
# 📌 Переносим файлы, но только если они существуют
|
52
|
+
Dir.glob("#{extracted_dir}/*").each do |file|
|
53
|
+
dest = File.join(install_path, File.basename(file))
|
54
|
+
if File.exist?(file) || File.directory?(file)
|
55
|
+
puts "📂 Moving: #{file} -> #{dest}"
|
56
|
+
FileUtils.mv(file, dest)
|
57
|
+
else
|
58
|
+
puts "⚠️ Warning: File or directory does not exist - #{file}"
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
puts "🛠 Setting up environment..."
|
63
|
+
ENV["LIBTORCH_INSTALL_PATH"] = install_path
|
64
|
+
|
65
|
+
case os
|
66
|
+
when "macos"
|
67
|
+
system("echo 'export LIBTORCH_INSTALL_PATH=#{install_path}' >> ~/.zshrc")
|
68
|
+
system("echo 'export DYLD_LIBRARY_PATH=#{install_path}/lib:$DYLD_LIBRARY_PATH' >> ~/.zshrc")
|
69
|
+
when "linux"
|
70
|
+
system("echo 'export LIBTORCH_INSTALL_PATH=#{install_path}' >> ~/.bashrc")
|
71
|
+
system("echo 'export LD_LIBRARY_PATH=#{install_path}/lib:$LD_LIBRARY_PATH' >> ~/.bashrc")
|
72
|
+
when "windows"
|
73
|
+
system("setx PATH \"#{install_path}\\lib;%PATH%\"")
|
74
|
+
end
|
75
|
+
|
76
|
+
puts "✅ Installation complete! LibTorch accessible at: #{install_path}"
|
77
|
+
|
78
|
+
# Очистка временных файлов
|
79
|
+
FileUtils.rm_rf(temp_path)
|
80
|
+
File.delete(zip_file) if File.exist?(zip_file)
|
81
|
+
end
|
82
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
require_relative "libtorch_downloader"
|
2
|
+
|
3
|
+
module LibTorchInstaller
|
4
|
+
def self.install(version: "2.1.0", variant: "cpu")
|
5
|
+
os = case RbConfig::CONFIG["host_os"]
|
6
|
+
when /darwin/ then "macos"
|
7
|
+
when /mswin|mingw|cygwin/ then "windows"
|
8
|
+
when /linux/ then "linux"
|
9
|
+
else raise "❌ ОС не поддерживается!"
|
10
|
+
end
|
11
|
+
|
12
|
+
LibTorchDownloader.download_and_install(os, version, variant)
|
13
|
+
end
|
14
|
+
end
|
@@ -0,0 +1,97 @@
|
|
1
|
+
# lib/rubynetic/rubynetic.rb
|
2
|
+
module Rubynetic
|
3
|
+
class Tensor
|
4
|
+
attr_reader :tensor
|
5
|
+
|
6
|
+
def initialize(data)
|
7
|
+
raise TypeError, "Expected an array, got #{data.class}" unless data.is_a?(Array)
|
8
|
+
@tensor = Rubynetic.tensor_from_data(data)
|
9
|
+
end
|
10
|
+
|
11
|
+
def self.valid_data?(data)
|
12
|
+
return false unless data.is_a?(Array)
|
13
|
+
|
14
|
+
# Проверяем, содержит ли массив только числа или массивы одинаковой длины
|
15
|
+
if data.all? { |item| item.is_a?(Numeric) }
|
16
|
+
return true
|
17
|
+
elsif data.all? { |item| item.is_a?(Array) && item.all? { |num| num.is_a?(Numeric) } }
|
18
|
+
return true
|
19
|
+
end
|
20
|
+
|
21
|
+
false
|
22
|
+
end
|
23
|
+
|
24
|
+
def inspect(precision = 2)
|
25
|
+
array = Rubynetic.tensor_to_array(@tensor) rescue @tensor
|
26
|
+
|
27
|
+
case array
|
28
|
+
when Float, Integer
|
29
|
+
return "tensor(#{format("%.#{precision}f", array)})"
|
30
|
+
when Array
|
31
|
+
if array.all? { |e| e.is_a?(Array) }
|
32
|
+
# ✅ Многомерный тензор (матрица)
|
33
|
+
formatted_values = array.map do |row|
|
34
|
+
" [" + row.map { |v| format("%.#{precision}f", v) }.join(', ') + "]"
|
35
|
+
end
|
36
|
+
return "tensor([\n" + formatted_values.join("\n") + "\n])"
|
37
|
+
else
|
38
|
+
# ✅ Одномерный тензор (вектор)
|
39
|
+
formatted_values = array.map { |v| format("%.#{precision}f", v) }
|
40
|
+
return "tensor([" + formatted_values.join(', ') + "])"
|
41
|
+
end
|
42
|
+
else
|
43
|
+
return "tensor(#{array})"
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
|
48
|
+
def to_s
|
49
|
+
inspect
|
50
|
+
end
|
51
|
+
|
52
|
+
def print
|
53
|
+
puts inspect
|
54
|
+
end
|
55
|
+
|
56
|
+
def shape
|
57
|
+
Rubynetic.tensor_shape(@tensor) # ✅ Получаем shape тензора через C++ API
|
58
|
+
end
|
59
|
+
|
60
|
+
def self.zeros(*shape)
|
61
|
+
zeros_tensor = Rubynetic.tensor_zeros(shape)
|
62
|
+
Tensor.allocate.tap { |t| t.instance_variable_set(:@tensor, zeros_tensor) }
|
63
|
+
end
|
64
|
+
|
65
|
+
def self.ones(*shape)
|
66
|
+
ones_tensor = Rubynetic.tensor_ones(shape)
|
67
|
+
Tensor.allocate.tap { |t| t.instance_variable_set(:@tensor, ones_tensor) }
|
68
|
+
end
|
69
|
+
|
70
|
+
|
71
|
+
def self.rand(*shape, min: 0.0, max: 1.0)
|
72
|
+
rand_tensor = Rubynetic.tensor_rand(*shape.map(&:to_i), min.to_f, max.to_f)
|
73
|
+
Tensor.allocate.tap { |t| t.instance_variable_set(:@tensor, rand_tensor) }
|
74
|
+
end
|
75
|
+
|
76
|
+
def +(other)
|
77
|
+
Tensor.allocate.tap { |t| t.instance_variable_set(:@tensor, Rubynetic.tensor_add(@tensor, other.tensor)) }
|
78
|
+
end
|
79
|
+
|
80
|
+
def -(other)
|
81
|
+
Tensor.allocate.tap { |t| t.instance_variable_set(:@tensor, Rubynetic.tensor_sub(@tensor, other.tensor)) }
|
82
|
+
end
|
83
|
+
|
84
|
+
def *(other)
|
85
|
+
Tensor.allocate.tap { |t| t.instance_variable_set(:@tensor, Rubynetic.tensor_mul(@tensor, other.tensor)) }
|
86
|
+
end
|
87
|
+
|
88
|
+
def /(other)
|
89
|
+
Tensor.allocate.tap { |t| t.instance_variable_set(:@tensor, Rubynetic.tensor_div(@tensor, other.tensor)) }
|
90
|
+
end
|
91
|
+
|
92
|
+
def dot(other)
|
93
|
+
Tensor.allocate.tap { |t| t.instance_variable_set(:@tensor, Rubynetic.tensor_dot(@tensor, other.tensor)) }
|
94
|
+
end
|
95
|
+
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
require_relative "libtorch_installer"
|
2
|
+
|
3
|
+
module Rubynetic
|
4
|
+
module Torch
|
5
|
+
RUBYNETIC_LIB_PATH = File.expand_path("../../lib/rubynetic/rubynetic.bundle", __dir__)
|
6
|
+
|
7
|
+
unless File.exist?(RUBYNETIC_LIB_PATH)
|
8
|
+
puts "🔧 Not found `rubynetic.bundle`, we are trying to install `LibTorch`..."
|
9
|
+
LibTorchInstaller.install
|
10
|
+
end
|
11
|
+
|
12
|
+
require RUBYNETIC_LIB_PATH
|
13
|
+
|
14
|
+
# Проверяем, что C++ методы загружены
|
15
|
+
raise NameError, "Not found method tensor_from_data" unless Rubynetic.respond_to?(:tensor_from_data)
|
16
|
+
raise NameError, "Not found method tensor_print" unless Rubynetic.respond_to?(:tensor_print)
|
17
|
+
end
|
18
|
+
end
|
data/lib/rubynetic.rb
ADDED
metadata
ADDED
@@ -0,0 +1,58 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: rubynetic
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.3.2
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Alexey Cherebayev
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2025-03-12 00:00:00.000000000 Z
|
12
|
+
dependencies: []
|
13
|
+
description: This gem downloads and installs the LibTorch library for macOS, Windows,
|
14
|
+
and Linux, and also includes the C++ extension.
|
15
|
+
email:
|
16
|
+
- devalex360@email.com
|
17
|
+
executables:
|
18
|
+
- setup_libtorch
|
19
|
+
extensions:
|
20
|
+
- ext/torch/rubynetic/extconf.rb
|
21
|
+
extra_rdoc_files: []
|
22
|
+
files:
|
23
|
+
- bin/console
|
24
|
+
- bin/setup_libtorch
|
25
|
+
- ext/torch/rubynetic/Makefile
|
26
|
+
- ext/torch/rubynetic/extconf.rb
|
27
|
+
- ext/torch/rubynetic/rubynetic.bundle
|
28
|
+
- ext/torch/rubynetic/rubynetic.cpp
|
29
|
+
- ext/torch/rubynetic/rubynetic.o
|
30
|
+
- lib/rubynetic.rb
|
31
|
+
- lib/rubynetic/libtorch_downloader.rb
|
32
|
+
- lib/rubynetic/libtorch_installer.rb
|
33
|
+
- lib/rubynetic/tensor.rb
|
34
|
+
- lib/rubynetic/torch.rb
|
35
|
+
homepage: https://github.com/AlexChe360/Rubynetic
|
36
|
+
licenses:
|
37
|
+
- MIT
|
38
|
+
metadata: {}
|
39
|
+
post_install_message:
|
40
|
+
rdoc_options: []
|
41
|
+
require_paths:
|
42
|
+
- lib
|
43
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - ">="
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '3.0'
|
48
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
49
|
+
requirements:
|
50
|
+
- - ">="
|
51
|
+
- !ruby/object:Gem::Version
|
52
|
+
version: '0'
|
53
|
+
requirements: []
|
54
|
+
rubygems_version: 3.5.9
|
55
|
+
signing_key:
|
56
|
+
specification_version: 4
|
57
|
+
summary: Automatic installation of LibTorch for Ruby
|
58
|
+
test_files: []
|