openarchx 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openarchx/__init__.py +11 -0
- openarchx/core/tensor.py +179 -0
- openarchx/cuda/__init__.py +27 -0
- openarchx/cuda/cuda_ops.py +296 -0
- openarchx/layers/activations.py +63 -0
- openarchx/layers/base.py +40 -0
- openarchx/layers/cnn.py +145 -0
- openarchx/layers/transformer.py +131 -0
- openarchx/nn/__init__.py +26 -0
- openarchx/nn/activations.py +127 -0
- openarchx/nn/containers.py +174 -0
- openarchx/nn/dropout.py +121 -0
- openarchx/nn/layers.py +338 -0
- openarchx/nn/losses.py +156 -0
- openarchx/nn/module.py +18 -0
- openarchx/nn/padding.py +120 -0
- openarchx/nn/pooling.py +318 -0
- openarchx/nn/rnn.py +226 -0
- openarchx/nn/transformers.py +187 -0
- openarchx/optimizers/adam.py +49 -0
- openarchx/optimizers/adaptive.py +63 -0
- openarchx/optimizers/base.py +24 -0
- openarchx/optimizers/modern.py +98 -0
- openarchx/optimizers/optx.py +91 -0
- openarchx/optimizers/sgd.py +63 -0
- openarchx/quantum/circuit.py +92 -0
- openarchx/quantum/gates.py +126 -0
- openarchx/utils/__init__.py +50 -0
- openarchx/utils/data.py +229 -0
- openarchx/utils/huggingface.py +288 -0
- openarchx/utils/losses.py +21 -0
- openarchx/utils/model_io.py +553 -0
- openarchx/utils/pytorch.py +420 -0
- openarchx/utils/tensorflow.py +467 -0
- openarchx/utils/transforms.py +259 -0
- openarchx-0.1.0.dist-info/METADATA +180 -0
- openarchx-0.1.0.dist-info/RECORD +43 -0
- openarchx-0.1.0.dist-info/WHEEL +5 -0
- openarchx-0.1.0.dist-info/licenses/LICENSE +21 -0
- openarchx-0.1.0.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_cuda_ops.py +205 -0
- tests/test_integrations.py +236 -0
@@ -0,0 +1,259 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from ..core.tensor import Tensor
|
3
|
+
|
4
|
+
|
5
|
+
class Transform:
|
6
|
+
"""Base class for all transforms."""
|
7
|
+
|
8
|
+
def __call__(self, x):
|
9
|
+
"""Apply the transform to input x."""
|
10
|
+
raise NotImplementedError("Transform must implement __call__ method")
|
11
|
+
|
12
|
+
|
13
|
+
class Compose:
|
14
|
+
"""Composes several transforms together."""
|
15
|
+
|
16
|
+
def __init__(self, transforms):
|
17
|
+
"""
|
18
|
+
Args:
|
19
|
+
transforms (list): List of transforms to compose.
|
20
|
+
"""
|
21
|
+
self.transforms = transforms
|
22
|
+
|
23
|
+
def __call__(self, x):
|
24
|
+
"""Apply all transforms sequentially."""
|
25
|
+
for t in self.transforms:
|
26
|
+
x = t(x)
|
27
|
+
return x
|
28
|
+
|
29
|
+
|
30
|
+
class ToTensor:
|
31
|
+
"""Convert a numpy.ndarray to OpenArchX Tensor."""
|
32
|
+
|
33
|
+
def __call__(self, x):
|
34
|
+
"""
|
35
|
+
Args:
|
36
|
+
x: NumPy array or list to be converted to tensor.
|
37
|
+
Returns:
|
38
|
+
OpenArchX Tensor.
|
39
|
+
"""
|
40
|
+
if isinstance(x, Tensor):
|
41
|
+
return x
|
42
|
+
if not isinstance(x, np.ndarray):
|
43
|
+
x = np.array(x, dtype=np.float32)
|
44
|
+
return Tensor(x)
|
45
|
+
|
46
|
+
|
47
|
+
class Normalize:
|
48
|
+
"""Normalize a tensor with mean and standard deviation."""
|
49
|
+
|
50
|
+
def __init__(self, mean, std):
|
51
|
+
"""
|
52
|
+
Args:
|
53
|
+
mean: Mean value for each channel/feature.
|
54
|
+
std: Standard deviation for each channel/feature.
|
55
|
+
"""
|
56
|
+
self.mean = np.array(mean, dtype=np.float32)
|
57
|
+
self.std = np.array(std, dtype=np.float32)
|
58
|
+
|
59
|
+
def __call__(self, x):
|
60
|
+
"""
|
61
|
+
Args:
|
62
|
+
x: Tensor to be normalized.
|
63
|
+
Returns:
|
64
|
+
Normalized Tensor.
|
65
|
+
"""
|
66
|
+
if isinstance(x, Tensor):
|
67
|
+
return Tensor((x.data - self.mean) / self.std)
|
68
|
+
return (x - self.mean) / self.std
|
69
|
+
|
70
|
+
|
71
|
+
class RandomCrop:
|
72
|
+
"""Crop randomly the image in a sample."""
|
73
|
+
|
74
|
+
def __init__(self, output_size):
|
75
|
+
"""
|
76
|
+
Args:
|
77
|
+
output_size (tuple or int): Desired output size. If int, square crop is made.
|
78
|
+
"""
|
79
|
+
if isinstance(output_size, int):
|
80
|
+
self.output_size = (output_size, output_size)
|
81
|
+
else:
|
82
|
+
self.output_size = output_size
|
83
|
+
|
84
|
+
def __call__(self, x):
|
85
|
+
"""
|
86
|
+
Args:
|
87
|
+
x: Image to be cropped.
|
88
|
+
Returns:
|
89
|
+
Cropped image.
|
90
|
+
"""
|
91
|
+
h, w = x.shape[-2:]
|
92
|
+
new_h, new_w = self.output_size
|
93
|
+
|
94
|
+
top = np.random.randint(0, h - new_h + 1)
|
95
|
+
left = np.random.randint(0, w - new_w + 1)
|
96
|
+
|
97
|
+
if isinstance(x, Tensor):
|
98
|
+
x_data = x.data
|
99
|
+
if len(x_data.shape) == 2:
|
100
|
+
x_data = x_data[top:top + new_h, left:left + new_w]
|
101
|
+
elif len(x_data.shape) == 3:
|
102
|
+
x_data = x_data[:, top:top + new_h, left:left + new_w]
|
103
|
+
else:
|
104
|
+
x_data = x_data[..., top:top + new_h, left:left + new_w]
|
105
|
+
return Tensor(x_data)
|
106
|
+
else:
|
107
|
+
if len(x.shape) == 2:
|
108
|
+
return x[top:top + new_h, left:left + new_w]
|
109
|
+
elif len(x.shape) == 3:
|
110
|
+
return x[:, top:top + new_h, left:left + new_w]
|
111
|
+
else:
|
112
|
+
return x[..., top:top + new_h, left:left + new_w]
|
113
|
+
|
114
|
+
|
115
|
+
class RandomHorizontalFlip:
|
116
|
+
"""Randomly flip the image horizontally."""
|
117
|
+
|
118
|
+
def __init__(self, p=0.5):
|
119
|
+
"""
|
120
|
+
Args:
|
121
|
+
p (float): Probability of flipping.
|
122
|
+
"""
|
123
|
+
self.p = p
|
124
|
+
|
125
|
+
def __call__(self, x):
|
126
|
+
"""
|
127
|
+
Args:
|
128
|
+
x: Image to be flipped.
|
129
|
+
Returns:
|
130
|
+
Flipped image with probability p.
|
131
|
+
"""
|
132
|
+
if np.random.random() < self.p:
|
133
|
+
if isinstance(x, Tensor):
|
134
|
+
# Handle the last dimension for channels first or last
|
135
|
+
if len(x.data.shape) == 3: # Assume channels-first (C, H, W)
|
136
|
+
return Tensor(x.data[:, :, ::-1])
|
137
|
+
elif len(x.data.shape) == 2: # No channels
|
138
|
+
return Tensor(x.data[:, ::-1])
|
139
|
+
else: # Batch images or other dimensions
|
140
|
+
return Tensor(np.flip(x.data, axis=-1))
|
141
|
+
else:
|
142
|
+
# Handle the last dimension for channels first or last
|
143
|
+
if len(x.shape) == 3: # Assume channels-first (C, H, W)
|
144
|
+
return x[:, :, ::-1]
|
145
|
+
elif len(x.shape) == 2: # No channels
|
146
|
+
return x[:, ::-1]
|
147
|
+
else: # Batch images or other dimensions
|
148
|
+
return np.flip(x, axis=-1)
|
149
|
+
return x
|
150
|
+
|
151
|
+
|
152
|
+
class Resize:
|
153
|
+
"""Resize the image to a given size."""
|
154
|
+
|
155
|
+
def __init__(self, output_size):
|
156
|
+
"""
|
157
|
+
Args:
|
158
|
+
output_size (tuple or int): Desired output size. If int, square resize is made.
|
159
|
+
"""
|
160
|
+
if isinstance(output_size, int):
|
161
|
+
self.output_size = (output_size, output_size)
|
162
|
+
else:
|
163
|
+
self.output_size = output_size
|
164
|
+
|
165
|
+
def __call__(self, x):
|
166
|
+
"""
|
167
|
+
Args:
|
168
|
+
x: Image to be resized.
|
169
|
+
Returns:
|
170
|
+
Resized image.
|
171
|
+
"""
|
172
|
+
# Check if scipy is available for resize
|
173
|
+
try:
|
174
|
+
from scipy.ndimage import zoom
|
175
|
+
except ImportError:
|
176
|
+
raise ImportError("SciPy is required for resize. Please install it with 'pip install scipy'")
|
177
|
+
|
178
|
+
new_h, new_w = self.output_size
|
179
|
+
|
180
|
+
if isinstance(x, Tensor):
|
181
|
+
x_data = x.data
|
182
|
+
if len(x_data.shape) == 2: # (H, W)
|
183
|
+
h, w = x_data.shape
|
184
|
+
zoom_h, zoom_w = new_h / h, new_w / w
|
185
|
+
return Tensor(zoom(x_data, (zoom_h, zoom_w)))
|
186
|
+
elif len(x_data.shape) == 3: # (C, H, W)
|
187
|
+
c, h, w = x_data.shape
|
188
|
+
zoom_h, zoom_w = new_h / h, new_w / w
|
189
|
+
return Tensor(zoom(x_data, (1, zoom_h, zoom_w)))
|
190
|
+
else: # Batch or other dimensions
|
191
|
+
raise ValueError("Unsupported tensor shape for resize")
|
192
|
+
else:
|
193
|
+
if len(x.shape) == 2: # (H, W)
|
194
|
+
h, w = x.shape
|
195
|
+
zoom_h, zoom_w = new_h / h, new_w / w
|
196
|
+
return zoom(x, (zoom_h, zoom_w))
|
197
|
+
elif len(x.shape) == 3: # (C, H, W)
|
198
|
+
c, h, w = x.shape
|
199
|
+
zoom_h, zoom_w = new_h / h, new_w / w
|
200
|
+
return zoom(x, (1, zoom_h, zoom_w))
|
201
|
+
else: # Batch or other dimensions
|
202
|
+
raise ValueError("Unsupported array shape for resize")
|
203
|
+
|
204
|
+
|
205
|
+
class TorchTransformAdapter:
|
206
|
+
"""Adapter for using PyTorch transforms with OpenArchX."""
|
207
|
+
|
208
|
+
def __init__(self, torch_transform):
|
209
|
+
"""
|
210
|
+
Args:
|
211
|
+
torch_transform: A PyTorch transform or composition of transforms.
|
212
|
+
"""
|
213
|
+
import importlib.util
|
214
|
+
if importlib.util.find_spec("torch") is None:
|
215
|
+
raise ImportError("PyTorch is required for this adapter. Please install it with 'pip install torch'")
|
216
|
+
|
217
|
+
import torch
|
218
|
+
self.torch_transform = torch_transform
|
219
|
+
|
220
|
+
def __call__(self, x):
|
221
|
+
"""
|
222
|
+
Args:
|
223
|
+
x: Input data to transform.
|
224
|
+
Returns:
|
225
|
+
Transformed data as OpenArchX Tensor.
|
226
|
+
"""
|
227
|
+
import torch
|
228
|
+
import numpy as np
|
229
|
+
|
230
|
+
# Convert to torch tensor if needed
|
231
|
+
if isinstance(x, np.ndarray):
|
232
|
+
x_torch = torch.from_numpy(x)
|
233
|
+
elif isinstance(x, Tensor):
|
234
|
+
x_torch = torch.from_numpy(x.data)
|
235
|
+
else:
|
236
|
+
x_torch = x
|
237
|
+
|
238
|
+
# Apply torch transform
|
239
|
+
result = self.torch_transform(x_torch)
|
240
|
+
|
241
|
+
# Convert back to numpy/Tensor
|
242
|
+
if isinstance(result, torch.Tensor):
|
243
|
+
result = result.numpy()
|
244
|
+
|
245
|
+
return Tensor(result) if not isinstance(x, Tensor) else result
|
246
|
+
|
247
|
+
|
248
|
+
class TransformFactory:
|
249
|
+
"""Factory for creating transforms from various sources."""
|
250
|
+
|
251
|
+
@staticmethod
|
252
|
+
def from_torch(torch_transform):
|
253
|
+
"""Create a transform from a PyTorch transform."""
|
254
|
+
return TorchTransformAdapter(torch_transform)
|
255
|
+
|
256
|
+
@staticmethod
|
257
|
+
def compose(transforms):
|
258
|
+
"""Create a composition of transforms."""
|
259
|
+
return Compose(transforms)
|
@@ -0,0 +1,180 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: openarchx
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: A lightweight and extensible deep learning framework with native model serialization
|
5
|
+
Home-page: https://github.com/openarchx/openarchx
|
6
|
+
Author: OpenArchX Team
|
7
|
+
Author-email: OpenArchX Team <info@openarchx.org>
|
8
|
+
License-Expression: MIT
|
9
|
+
Project-URL: Homepage, https://github.com/openarchx/openarchx
|
10
|
+
Project-URL: Bug Tracker, https://github.com/openarchx/openarchx/issues
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
12
|
+
Classifier: Operating System :: OS Independent
|
13
|
+
Requires-Python: >=3.7
|
14
|
+
Description-Content-Type: text/markdown
|
15
|
+
License-File: LICENSE
|
16
|
+
Requires-Dist: numpy>=1.19.0
|
17
|
+
Provides-Extra: pytorch
|
18
|
+
Requires-Dist: torch>=1.7.0; extra == "pytorch"
|
19
|
+
Provides-Extra: tensorflow
|
20
|
+
Requires-Dist: tensorflow>=2.4.0; extra == "tensorflow"
|
21
|
+
Provides-Extra: huggingface
|
22
|
+
Requires-Dist: transformers>=4.0.0; extra == "huggingface"
|
23
|
+
Requires-Dist: datasets>=1.0.0; extra == "huggingface"
|
24
|
+
Provides-Extra: all
|
25
|
+
Requires-Dist: torch>=1.7.0; extra == "all"
|
26
|
+
Requires-Dist: tensorflow>=2.4.0; extra == "all"
|
27
|
+
Requires-Dist: transformers>=4.0.0; extra == "all"
|
28
|
+
Requires-Dist: datasets>=1.0.0; extra == "all"
|
29
|
+
Dynamic: author
|
30
|
+
Dynamic: home-page
|
31
|
+
Dynamic: license-file
|
32
|
+
Dynamic: requires-python
|
33
|
+
|
34
|
+
# OpenArchX
|
35
|
+
|
36
|
+
[](https://badge.fury.io/py/openarchx)
|
37
|
+
[](https://github.com/openarchx/openarchx/blob/main/LICENSE)
|
38
|
+
[](https://pypi.org/project/openarchx/)
|
39
|
+
|
40
|
+
A lightweight and extensible deep learning framework in pure Python with native model serialization support.
|
41
|
+
|
42
|
+
## Features
|
43
|
+
|
44
|
+
- Simple and clean API inspired by modern deep learning frameworks
|
45
|
+
- Native `.oaxm` model serialization format
|
46
|
+
- Seamless integration with PyTorch, TensorFlow, and Hugging Face
|
47
|
+
- Framework-agnostic design for maximum flexibility
|
48
|
+
- Pure Python implementation with minimal dependencies
|
49
|
+
|
50
|
+
## Installation
|
51
|
+
|
52
|
+
### Basic Installation
|
53
|
+
|
54
|
+
```bash
|
55
|
+
pip install openarchx
|
56
|
+
```
|
57
|
+
|
58
|
+
### With Framework Integration Support
|
59
|
+
|
60
|
+
```bash
|
61
|
+
# For PyTorch integration
|
62
|
+
pip install openarchx[pytorch]
|
63
|
+
|
64
|
+
# For TensorFlow integration
|
65
|
+
pip install openarchx[tensorflow]
|
66
|
+
|
67
|
+
# For Hugging Face integration
|
68
|
+
pip install openarchx[huggingface]
|
69
|
+
|
70
|
+
# For all integrations
|
71
|
+
pip install openarchx[all]
|
72
|
+
```
|
73
|
+
|
74
|
+
## Quick Start
|
75
|
+
|
76
|
+
```python
|
77
|
+
import numpy as np
|
78
|
+
import openarchx as ox
|
79
|
+
from openarchx.nn import Sequential, Dense, ReLU
|
80
|
+
from openarchx.core import Tensor
|
81
|
+
from openarchx.utils import save_model, load_model
|
82
|
+
|
83
|
+
# Create a model
|
84
|
+
model = Sequential([
|
85
|
+
Dense(10, input_dim=5),
|
86
|
+
ReLU(),
|
87
|
+
Dense(1)
|
88
|
+
])
|
89
|
+
|
90
|
+
# Generate dummy data
|
91
|
+
X = np.random.randn(100, 5).astype(np.float32)
|
92
|
+
y = np.sum(X * np.array([0.2, 0.5, -0.3, 0.7, -0.1]), axis=1, keepdims=True)
|
93
|
+
X, y = Tensor(X), Tensor(y)
|
94
|
+
|
95
|
+
# Train the model
|
96
|
+
optimizer = ox.optim.SGD(model.parameters(), learning_rate=0.01)
|
97
|
+
loss_fn = ox.losses.MSELoss()
|
98
|
+
|
99
|
+
for epoch in range(10):
|
100
|
+
# Forward pass
|
101
|
+
y_pred = model(X)
|
102
|
+
loss = loss_fn(y_pred, y)
|
103
|
+
|
104
|
+
# Backward pass
|
105
|
+
optimizer.zero_grad()
|
106
|
+
loss.backward()
|
107
|
+
optimizer.step()
|
108
|
+
|
109
|
+
print(f"Epoch {epoch}: Loss = {loss.data}")
|
110
|
+
|
111
|
+
# Save the model to .oaxm format
|
112
|
+
save_model(model, "my_model.oaxm")
|
113
|
+
|
114
|
+
# Load the model
|
115
|
+
loaded_model = load_model("my_model.oaxm", model_class=Sequential)
|
116
|
+
```
|
117
|
+
|
118
|
+
## Model Serialization with .oaxm
|
119
|
+
|
120
|
+
OpenArchX provides a native model serialization format called `.oaxm` (OpenArchX Model):
|
121
|
+
|
122
|
+
```python
|
123
|
+
# Save a model with metadata
|
124
|
+
metadata = {
|
125
|
+
"description": "My trained model",
|
126
|
+
"version": "1.0.0",
|
127
|
+
"author": "Your Name"
|
128
|
+
}
|
129
|
+
save_model(model, "model.oaxm", metadata=metadata)
|
130
|
+
|
131
|
+
# Convert from PyTorch
|
132
|
+
from openarchx.utils import convert_from_pytorch
|
133
|
+
convert_from_pytorch(torch_model, "converted_model.oaxm")
|
134
|
+
|
135
|
+
# Convert from TensorFlow
|
136
|
+
from openarchx.utils import convert_from_tensorflow
|
137
|
+
convert_from_tensorflow(tf_model, "converted_model.oaxm")
|
138
|
+
```
|
139
|
+
|
140
|
+
## Framework Integration
|
141
|
+
|
142
|
+
### PyTorch Integration
|
143
|
+
|
144
|
+
```python
|
145
|
+
import torch
|
146
|
+
import torch.nn as nn
|
147
|
+
from openarchx.utils import get_pytorch_model_adapter
|
148
|
+
|
149
|
+
# Convert PyTorch model to OpenArchX
|
150
|
+
pt_model = nn.Sequential(
|
151
|
+
nn.Linear(10, 5),
|
152
|
+
nn.ReLU(),
|
153
|
+
nn.Linear(5, 1)
|
154
|
+
)
|
155
|
+
|
156
|
+
# Create an adapter to use the PyTorch model in OpenArchX
|
157
|
+
adapted_model = get_pytorch_model_adapter(pt_model)
|
158
|
+
output = adapted_model(Tensor(np.random.randn(1, 10)))
|
159
|
+
```
|
160
|
+
|
161
|
+
### TensorFlow Integration
|
162
|
+
|
163
|
+
```python
|
164
|
+
import tensorflow as tf
|
165
|
+
from openarchx.utils import get_tensorflow_model_adapter
|
166
|
+
|
167
|
+
# Use TensorFlow model in OpenArchX
|
168
|
+
tf_model = tf.keras.Sequential([
|
169
|
+
tf.keras.layers.Dense(5, activation='relu', input_shape=(10,)),
|
170
|
+
tf.keras.layers.Dense(1)
|
171
|
+
])
|
172
|
+
|
173
|
+
# Create an adapter to use the TensorFlow model in OpenArchX
|
174
|
+
adapted_model = get_tensorflow_model_adapter(tf_model)
|
175
|
+
output = adapted_model(Tensor(np.random.randn(1, 10)))
|
176
|
+
```
|
177
|
+
|
178
|
+
## License
|
179
|
+
|
180
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
@@ -0,0 +1,43 @@
|
|
1
|
+
openarchx/__init__.py,sha256=pG8ssyLIjKFRsA8W0pDJvgCm0HI2kGSi22CaT21g2kI,205
|
2
|
+
openarchx/core/tensor.py,sha256=NS2-KZQudUPksg0ZKbwrybyt9OedpO7w_FwfTkbpz-c,6894
|
3
|
+
openarchx/cuda/__init__.py,sha256=J8aKj9Jxch7yaI0p12GkzkbAeIAWK5pPTmkLQF_1BQI,725
|
4
|
+
openarchx/cuda/cuda_ops.py,sha256=N7tzwYH4Va2J-W0ehUeCkcY2BdQC2xlze_fnQqD4NA8,10617
|
5
|
+
openarchx/layers/activations.py,sha256=sqdFKSRCsLGgOjtRbB2qpUWB2pIFQPHPqJ1RoWHMrDk,1930
|
6
|
+
openarchx/layers/base.py,sha256=TUFn3hgRKvi6aib66i2rwOf_SHozE-ycvFSBIPe2X94,1292
|
7
|
+
openarchx/layers/cnn.py,sha256=qWwUiSMaQdFgyTOCwVKqN_IrGxBSex4_Py3IWChj8PY,6060
|
8
|
+
openarchx/layers/transformer.py,sha256=l_MonyVYRmJIKfcdYaBMmh5PfmfsbPx8-pacgE7AswQ,5617
|
9
|
+
openarchx/nn/__init__.py,sha256=6nm-YuCydqUInkDV8ub22wBhy1iB85AMEKeIwIFRWGY,572
|
10
|
+
openarchx/nn/activations.py,sha256=68FLmEQRb6ue7Ij24aLa-WMxHjvdOxsJm5-QtYcwR64,4758
|
11
|
+
openarchx/nn/containers.py,sha256=PdAPUbXN8U7ynDJQx6Rrjdykpn0RUZlMUsP9O5IO_9c,4756
|
12
|
+
openarchx/nn/dropout.py,sha256=HAqQxiv3ETc0mY1cp1pwTKj6_G4AK8caJj7kGetnn1o,4121
|
13
|
+
openarchx/nn/layers.py,sha256=mBGJwDFEQQ4QX2xzPLqT2MaZ66g0CtxAiPGJ1hloFVg,13870
|
14
|
+
openarchx/nn/losses.py,sha256=_ijSBLiJwsx9hW3PMTkxE2AayvnIwISJKSAjEy0qnhI,5877
|
15
|
+
openarchx/nn/module.py,sha256=B4vj8DxwyevL7xdGgoUQIbUxuPMxom_TfHWVLJ1avZk,583
|
16
|
+
openarchx/nn/padding.py,sha256=AlfNE3KaYvy_AhU2C5Ri_iFFp23Mvh-CKY-ZbdxoMHI,4845
|
17
|
+
openarchx/nn/pooling.py,sha256=FOyub46ZH31Rf6fPRvYzK_8U-EBgGksN_As0ZD9xhBw,14068
|
18
|
+
openarchx/nn/rnn.py,sha256=_vMxzB4Dj03bXmYn84cms8Oq3h4ZHP2r2AWcGVk-JDg,9040
|
19
|
+
openarchx/nn/transformers.py,sha256=0AyNqsTIWy56f7-QuE5rEmBEa4bTJ9PHUfoChOefKJQ,7914
|
20
|
+
openarchx/optimizers/adam.py,sha256=DBtTUtHFMxkWm2eM2mcwGgEGqKtvV84PeCmtobIxBao,1861
|
21
|
+
openarchx/optimizers/adaptive.py,sha256=MxMJ1WXk6tDk9PFpiuy28qfIpSZYtHcSQ2p6re80FVw,2780
|
22
|
+
openarchx/optimizers/base.py,sha256=5O9hDPs5U2P9-69VDMeHw5WgMvsiVjj1TBRII8itX0o,750
|
23
|
+
openarchx/optimizers/modern.py,sha256=pJnQnUsGdN6nyyeyglEqkkyO06XKSU89LhK4KTx_oks,4151
|
24
|
+
openarchx/optimizers/optx.py,sha256=969dFa642fCYKtBFg-kuYwS_D_RudDomWwr0Agv5Jwc,3923
|
25
|
+
openarchx/optimizers/sgd.py,sha256=sYdlx-nmPY8Alsj55GGd5Xz44JBtYrEg2iWbp1z3v-0,2585
|
26
|
+
openarchx/quantum/circuit.py,sha256=gPrk-tSxeff6oHnv6k6mFycWK2daNpndAvejzG2l-H8,3203
|
27
|
+
openarchx/quantum/gates.py,sha256=zR788dMYGdZKNKSqSENzlga_--K7MPzNFjHfY7h__IM,4626
|
28
|
+
openarchx/utils/__init__.py,sha256=oSli5jb-kVAmrmLfxgj_hYv2GVeex9Bj5hAVDhh3l_c,1912
|
29
|
+
openarchx/utils/data.py,sha256=YFok4Nappy9faAAyDNy-gTfIjtyG3_7dStQAiadF3Nc,8396
|
30
|
+
openarchx/utils/huggingface.py,sha256=udNu7Sosixb3ld5TIVIINKPsyS1jf6Gm88k6lzz_OnY,11308
|
31
|
+
openarchx/utils/losses.py,sha256=fgD4AFVauPC2HrvZjKoU6rx7tBDGawDw-8Ry1xK0mvI,723
|
32
|
+
openarchx/utils/model_io.py,sha256=otQcOy39X6lWCEYw1cX-1iGlAiNmrGm0dMpxoMOTISU,18571
|
33
|
+
openarchx/utils/pytorch.py,sha256=VCEDRGM3zEfB994plVfAhnaeGZHvNegISDs9qzq4kTA,17331
|
34
|
+
openarchx/utils/tensorflow.py,sha256=T-h1Ttou2O1GnJJgt3ZvZ1MK-7o-ybT_35aus0MSXd8,18299
|
35
|
+
openarchx/utils/transforms.py,sha256=6tS6tmvQI5e6G771tI3ttInB7lT1j9suKssfnyXxJCM,8445
|
36
|
+
openarchx-0.1.0.dist-info/licenses/LICENSE,sha256=Ak-g7e8LDd8BKNcKnt6YQAWPtE9btoDNGzsT-qYMNCY,1085
|
37
|
+
tests/__init__.py,sha256=bHuq05M3ZLXz8wZz33PzuPHIQPdse4SzXrgeajei3Hc,26
|
38
|
+
tests/test_cuda_ops.py,sha256=aPzFnVbUp7uXUc7N1f_DG0-W9mrewORxEHnGMwKXLfo,6747
|
39
|
+
tests/test_integrations.py,sha256=C0p5RyGG5DtjtsW1wba-4j431k2PyKex7zE4g-TLwZE,8538
|
40
|
+
openarchx-0.1.0.dist-info/METADATA,sha256=hVE2lx18fL_6TXZneTA_9N7CN6oG8cw0JdTookK6hQA,5124
|
41
|
+
openarchx-0.1.0.dist-info/WHEEL,sha256=lTU6B6eIfYoiQJTZNc-fyaR6BpL6ehTzU3xGYxn2n8k,91
|
42
|
+
openarchx-0.1.0.dist-info/top_level.txt,sha256=ZqkjpbZbSLXTIzjIzJH3vjMMUC92lw8573qJ6cdzEiE,16
|
43
|
+
openarchx-0.1.0.dist-info/RECORD,,
|
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2023 OpenArchX
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
tests/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
# OpenArchX Tests Package
|