TensorArray 0.0.1a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 TensorArray-Creators
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,19 @@
1
+ Metadata-Version: 2.1
2
+ Name: TensorArray
3
+ Version: 0.0.1a0
4
+ Summary: A machine learning package
5
+ Author-email: TensorArray-Creators <noob_taken@outlook.com>
6
+ Project-URL: Homepage, https://github.com/BigNoobWasTaken/TensorArray-Python
7
+ Project-URL: Issues, https://github.com/BigNoobWasTaken/TensorArray-Python/issues
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Environment :: GPU :: NVIDIA CUDA :: 12
12
+ Requires-Python: >=3.8
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE
15
+
16
+ # Tensor-Array-Python
17
+ This machine learning library using [Tensor-Array](https://github.com/Tensor-Array/Tensor-Array) library
18
+
19
+ This project is still in alpha version, we are trying to make this look like the main framework but it is easier to code.
@@ -0,0 +1,20 @@
1
+ tensor_array/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ tensor_array/activation.py,sha256=zqscUcoAqoZsiRN8mOLjOAyaKnroFgTQdxuxYFXBN7s,322
3
+ tensor_array/core/__init__.py,sha256=Wz1zYL75GTyBCbFKFvjNjDVdh6E4npstHClPXrEeCas,136
4
+ tensor_array/layers/__init__.py,sha256=WqKCNIfpWt0vKgPibGs_hLOsog2BXsqkZCKpiEcJz14,57
5
+ tensor_array/layers/layer.py,sha256=NBuZPdaIaui4ZBYxy_f2L5mPcsdiO8KqFQcS-BWTFE8,6422
6
+ tensor_array/layers/parameter.py,sha256=13pYvLl0IMVIoVolYiPDpN-KB37fzy_U7FibRyvrryA,72
7
+ tensor_array/layers/attention/__init__.py,sha256=GhIlCdr64wafd1hTNAVAwcirXNTFiin0yyC_0Muh6M4,70
8
+ tensor_array/layers/attention/attention.py,sha256=d8u1tTrCHEffjnwxGgm9gq16JTIaond40SR_Pa6v5v0,1598
9
+ tensor_array/layers/attention/transformer.py,sha256=uzuPk4uOWL-kwDHMjIanuA3oopfXyNmMnDRQohQAQjg,838
10
+ tensor_array/layers/normalization/__init__.py,sha256=WygUO1GTeW1UOeZ0vNLNaFZrtcEVlHVADDOnr4a0wRo,59
11
+ tensor_array/layers/normalization/normalization.py,sha256=t4cwoPEXEgqXfQOp5ZkKjb-jPBJ59dwVYBapdcYX4X8,58
12
+ tensor_array/layers/util/__init__.py,sha256=yTbs7IqBcvTT6DwB7FqAixCzOgWGqmkvk4l64Qgs5-c,169
13
+ tensor_array/layers/util/activation.py,sha256=7P2ydio6aiXeh5ygBNowERH_pzfONQQuHfpG92xb3d4,337
14
+ tensor_array/layers/util/linear.py,sha256=zCktqHCRz7gr8EIHA5qWg--moMCloZvVWkDC5zZUNnY,573
15
+ tensor_array/layers/util/sequential.py,sha256=ZR7DieysFh-ot12Oxi2S4GePscO7o75uRSHQ6ZzALhc,490
16
+ TensorArray-0.0.1a0.dist-info/LICENSE,sha256=dwa4jJce6OUzSE8wx_T1u7gJHRszgJnb5pBLrz__xkk,1077
17
+ TensorArray-0.0.1a0.dist-info/METADATA,sha256=UOQRCykxNcF7vCMvm6wHpyOTJtrSzMtJO0xbGFaRJ2Y,843
18
+ TensorArray-0.0.1a0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
19
+ TensorArray-0.0.1a0.dist-info/top_level.txt,sha256=o65g4Z8Rpjh8WDCwgtBA-2oGlqc-gC6J_I2jL5bBLdE,13
20
+ TensorArray-0.0.1a0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ tensor_array
File without changes
@@ -0,0 +1,12 @@
1
+ from tensor_array.core import Tensor
2
+ from tensor_array.core import zeros
3
+
4
+ def relu(input):
5
+ tensor_zeros = zeros(shape = input.shape(), dtype = input.dtype())
6
+ return (input > tensor_zeros).condition(input, tensor_zeros)
7
+
8
+ def sigmoid(input):
9
+ return input.sigmoid()
10
+
11
+ def softmax(input, dim = 0):
12
+ return input
@@ -0,0 +1,3 @@
1
+ from tensor_array.core.tensor2 import Tensor
2
+ from tensor_array.core.tensor2 import zeros
3
+ from tensor_array.core.tensor2 import DataType
@@ -0,0 +1,2 @@
1
+ from .layer import Layer
2
+ from .parameter import Parameter
@@ -0,0 +1 @@
1
+ from tensor_array.layers.attention.attention import MultiheadAttention
@@ -0,0 +1,34 @@
1
+ from typing import Any
2
+ from .. import Layer
3
+ from ..util import Linear
4
+ from tensor_array.core import Tensor
5
+ from tensor_array.activation import softmax
6
+
7
+ def scaled_dot_product_attention(q, k, v, mask = None):
8
+ attn_scores = q @ k.transpose(len(k.shape()) - 2, len(k.shape()) - 1)
9
+ attn_probs = softmax(attn_scores, len(attn_scores.shape()) - 1)
10
+ return attn_probs @ v
11
+
12
+ class MultiheadAttention(Layer):
13
+ def __init__(self, d_model, n_head) -> None:
14
+ super().__init__()
15
+ self.linear_q = Linear(d_model)
16
+ self.linear_k = Linear(d_model)
17
+ self.linear_v = Linear(d_model)
18
+ self.linear_o = Linear(d_model)
19
+ self.n_head = n_head
20
+
21
+ def calculate(self, input_q, input_k, input_v, mask = None) -> Any:
22
+ temp_q = self.linear_q(input_q)
23
+ temp_k = self.linear_k(input_k)
24
+ temp_v = self.linear_v(input_v)
25
+
26
+ temp_q = temp_q.reshape((temp_q.shape()[0], temp_q.shape()[1], self.n_head, temp_q.shape()[-1] / self.n_head)).transpose(1, 2)
27
+ temp_k = temp_k.reshape((temp_k.shape()[0], temp_k.shape()[1], self.n_head, temp_k.shape()[-1] / self.n_head)).transpose(1, 2)
28
+ temp_v = temp_v.reshape((temp_v.shape()[0], temp_v.shape()[1], self.n_head, temp_v.shape()[-1] / self.n_head)).transpose(1, 2)
29
+
30
+ attention_output = scaled_dot_product_attention(temp_q, temp_k, temp_v, mask)
31
+
32
+ attention_output = attention_output.transpose(1, 2)
33
+ attention_output = attention_output.reshape((temp_q.shape()[0], temp_q.shape()[1], temp_q.shape[-2] * temp_q.shape[-1]))
34
+ return self.linear_o(attention_output)
@@ -0,0 +1,24 @@
1
+ from typing import Any
2
+ from .. import Layer
3
+ from .attention import MultiheadAttention
4
+ from tensor_array.activation import relu
5
+ from ..util import Sequential
6
+ from ..util import Linear
7
+ from ..util import Activation
8
+
9
+ class TransformerEncoderImpl(Layer):
10
+ def __init__(self, d_model, n_head, ff_size) -> None:
11
+ self.feed_forward = Sequential([
12
+ Linear(ff_size),
13
+ Activation(relu),
14
+ Linear(d_model)
15
+ ])
16
+ self.multihead_attn = MultiheadAttention(d_model, n_head)
17
+ self.layer_norm_1
18
+ self.layer_norm_2
19
+
20
+ def calculate(self, input) -> Any:
21
+ attn_output = self.multihead_attn(input, input, input)
22
+ attn_output = self.layer_norm_1(input + attn_output)
23
+ ff_output = self.feed_forward(attn_output)
24
+ return self.layer_norm_2(attn_output + ff_output)
@@ -0,0 +1,142 @@
1
+ from collections import OrderedDict, namedtuple
2
+ from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
3
+ from typing import Any
4
+ from tensor_array.core import Tensor
5
+ from .parameter import Parameter
6
+
7
+ class Layer:
8
+ """
9
+ Layer class
10
+ """
11
+ is_running: bool
12
+ _layers: Dict[str, Optional['Layer']]
13
+ _parameters: Dict[str, Optional[Parameter]]
14
+ _tensors: Dict[str, Optional[Tensor]]
15
+
16
+ def __init__(self) -> None:
17
+ super().__setattr__('is_running', False)
18
+ super().__setattr__('_layers', OrderedDict())
19
+ super().__setattr__('_parameters', OrderedDict())
20
+ super().__setattr__('_tensors', OrderedDict())
21
+
22
+ def __call__(self, *args: Any, **kwds: Any) -> Any:
23
+ if not self.__dict__['is_running']:
24
+ list_arg = (t.shape() for t in args if isinstance(t, Tensor))
25
+ dict_kwargs = {
26
+ key: val.shape()
27
+ for key, val in kwds
28
+ if isinstance(val, Tensor)
29
+ }
30
+ self.layer_init(*list_arg, **dict_kwargs)
31
+ super().__setattr__('is_running', True)
32
+ return self.calculate(*args, **kwds)
33
+
34
+ def layer_init(self, *args: Tuple, **kwds: Tuple) -> None:
35
+ pass
36
+
37
+ def calculate(self, *args: Any, **kwds: Any) -> Any:
38
+ pass
39
+
40
+ def register_parameter(self, name: str, param: Optional[Parameter]) -> None:
41
+ if '_parameters' not in self.__dict__:
42
+ raise AttributeError("cannot assign parameter before Module.__init__() call")
43
+ elif not isinstance(name, str):
44
+ raise TypeError(f"parameter name should be a string. Got {name}")
45
+ elif '.' in name:
46
+ raise KeyError("parameter name can't contain \".\"")
47
+ elif name == '':
48
+ raise KeyError("parameter name can't be empty string \"\"")
49
+ elif hasattr(self, name) and name not in self._parameters:
50
+ raise KeyError(f"attribute '{name}' already exists")
51
+ elif not isinstance(param, Parameter) and param is not None:
52
+ raise TypeError(f"cannot assign '{param}' object to parameter '{name}' "
53
+ "(tensor_array.util.Parameter or None required)")
54
+ else:
55
+ self._parameters[name] = param
56
+
57
+ def register_tensor(self, name: str, param: Optional[Tensor]) -> None:
58
+ if '_tensors' not in self.__dict__:
59
+ raise AttributeError("cannot assign tensor before Module.__init__() call")
60
+ elif not isinstance(name, str):
61
+ raise TypeError(f"tensor name should be a string. Got {name}")
62
+ elif '.' in name:
63
+ raise KeyError("tensor name can't contain \".\"")
64
+ elif name == '':
65
+ raise KeyError("tensor name can't be empty string \"\"")
66
+ elif hasattr(self, name) and name not in self._tensors:
67
+ raise KeyError(f"attribute '{name}' already exists")
68
+ elif not isinstance(param, Tensor) and param is not None:
69
+ raise TypeError(f"cannot assign '{param}' object to parameter '{name}' "
70
+ "(tensor_array.core.tensor2.Tensor or None required)")
71
+ else:
72
+ self._tensors[name] = param
73
+
74
+ def register_layer(self, name: str, layer: Optional['Layer']) -> None:
75
+ if not isinstance(layer, Layer) and layer is not None:
76
+ raise TypeError(f"{layer} is not a Layer subclass")
77
+ elif not isinstance(name, str):
78
+ raise TypeError(f"layer name should be a string. Got {name}")
79
+ elif hasattr(self, name) and name not in self._layers:
80
+ raise KeyError(f"attribute '{name}' already exists")
81
+ elif '.' in name:
82
+ raise KeyError(f"layer name can't contain \".\", got: {name}")
83
+ elif name == '':
84
+ raise KeyError("layer name can't be empty string \"\"")
85
+ self._layers[name] = layer
86
+
87
+ def __setattr__(self, __name: str, __value: Any) -> None:
88
+ def remove_from(*dicts_or_sets):
89
+ for d in dicts_or_sets:
90
+ if __name in d:
91
+ if isinstance(d, dict):
92
+ del d[__name]
93
+ else:
94
+ d.discard(__name)
95
+
96
+ params = self.__dict__.get('_parameters')
97
+ layers = self.__dict__.get('_layers')
98
+ tensors = self.__dict__.get('_tensors')
99
+ if (params is not None and __name in params) or (layers is not None and __name in layers) or (tensors is not None and __name in tensors):
100
+ raise TypeError(f"cannot assign '{__value}' as parameter '{__name}'")
101
+ elif isinstance(__value, Parameter):
102
+ if params is None:
103
+ raise AttributeError("cannot assign parameters before Layer.__init__() call")
104
+ remove_from(self.__dict__, self._layers, self._tensors)
105
+ self.register_parameter(__name, __value)
106
+ elif isinstance(__value, Tensor):
107
+ if layers is None:
108
+ raise AttributeError("cannot assign layers before Layer.__init__() call")
109
+ remove_from(self.__dict__, self._parameters, self._layers)
110
+ self.register_tensor(__name, __value)
111
+ elif isinstance(__value, Layer):
112
+ if tensors is None:
113
+ raise AttributeError("cannot assign layers before Layer.__init__() call")
114
+ remove_from(self.__dict__, self._parameters, self._tensors)
115
+ self.register_layer(__name, __value)
116
+ else:
117
+ super().__setattr__(__name, __value)
118
+
119
+ def __getattr__(self, __name: str) -> Any:
120
+ if '_parameters' in self.__dict__:
121
+ _parameters = self.__dict__['_parameters']
122
+ if __name in _parameters:
123
+ return _parameters[__name]
124
+ if '_tensors' in self.__dict__:
125
+ _tensors = self.__dict__['_tensors']
126
+ if __name in _tensors:
127
+ return _tensors[__name]
128
+ if '_layers' in self.__dict__:
129
+ _layers = self.__dict__['_layers']
130
+ if __name in _layers:
131
+ return _layers[__name]
132
+ return super().__getattr__(__name)
133
+
134
+ def __delattr__(self, __name: str) -> None:
135
+ if __name in self._parameters:
136
+ del self._parameters[__name]
137
+ elif __name in self._tensors:
138
+ del self._tensors[__name]
139
+ elif __name in self._layers:
140
+ del self._layers[__name]
141
+ else:
142
+ super().__delattr__(__name)
@@ -0,0 +1 @@
1
+ from tensor_array.layers.normalization import Normalization
@@ -0,0 +1,4 @@
1
+ from .. import Layer
2
+
3
+ class Normalization(Layer):
4
+ pass
@@ -0,0 +1,4 @@
1
+ from tensor_array.core import Tensor
2
+
3
+ class Parameter(Tensor):
4
+ pass
@@ -0,0 +1,3 @@
1
+ from tensor_array.layers.util.activation import Activation
2
+ from tensor_array.layers.util.linear import Linear
3
+ from tensor_array.layers.util.sequential import Sequential
@@ -0,0 +1,10 @@
1
+ from .. import Layer
2
+ from typing import Any, Callable
3
+
4
+ class Activation(Layer):
5
+ def __init__(self, activation_function: Callable) -> None:
6
+ super().__init__()
7
+ self.activation_function = activation_function
8
+
9
+ def calculate(self, *args: Any, **kwds: Any) -> Any:
10
+ return self.activation_function(*args, **kwds)
@@ -0,0 +1,20 @@
1
+ from .. import Layer
2
+ from .. import Parameter
3
+ from tensor_array.core import Tensor
4
+ from tensor_array.core import zeros
5
+ from tensor_array.core import DataType
6
+ from typing import Any
7
+
8
+
9
+ class Linear(Layer):
10
+ def __init__(self, bias) -> None:
11
+ super().__init__()
12
+ self.bias_shape = bias
13
+ self.b = Parameter(zeros(shape = (bias,), dtype = DataType.FLOAT))
14
+
15
+ def layer_init(self, t):
16
+ self.w = Parameter(zeros(shape = (t[-1], self.bias_shape), dtype = DataType.FLOAT))
17
+
18
+ def calculate(self, t):
19
+ return t @ self.w + self.b
20
+
@@ -0,0 +1,17 @@
1
+ from .. import Layer
2
+ from .. import Parameter
3
+ from tensor_array.core import Tensor
4
+ from tensor_array.core import zeros
5
+ from tensor_array.core import DataType
6
+ from typing import Any, List, OrderedDict
7
+
8
+
9
+ class Sequential(Layer):
10
+ def __init__(self, _layers: OrderedDict[str, Layer]) -> None:
11
+ self._layers = _layers
12
+
13
+ def calculate(self, t):
14
+ tensorloop = t
15
+ for _, content in self._layers:
16
+ tensorloop = content(tensorloop)
17
+ return tensorloop