easyvae 1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- easyvae/__init__.py +0 -0
- easyvae/activations.py +35 -0
- easyvae/autoencoder.py +158 -0
- easyvae/layers.py +106 -0
- easyvae/utils.py +46 -0
- easyvae-1.0.dist-info/METADATA +76 -0
- easyvae-1.0.dist-info/RECORD +10 -0
- easyvae-1.0.dist-info/WHEEL +5 -0
- easyvae-1.0.dist-info/licenses/LICENSE +21 -0
- easyvae-1.0.dist-info/top_level.txt +1 -0
easyvae/__init__.py
ADDED
|
File without changes
|
easyvae/activations.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ActivationFunc(ABC):
|
|
6
|
+
@abstractmethod
|
|
7
|
+
def d(v: np.ndarray) -> np.ndarray:
|
|
8
|
+
pass
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ReLU(ActivationFunc):
|
|
12
|
+
def __call__(self, x):
|
|
13
|
+
return x * (x > 0)
|
|
14
|
+
|
|
15
|
+
def d(self, x):
|
|
16
|
+
return x > 0
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class LeakyReLU(ActivationFunc):
|
|
20
|
+
def __init__(self, k=0.01):
|
|
21
|
+
self.k = k
|
|
22
|
+
|
|
23
|
+
def __call__(self, x):
|
|
24
|
+
return x * (x > 0) + self.k * x * (x <= 0)
|
|
25
|
+
|
|
26
|
+
def d(self, x):
|
|
27
|
+
return (x > 0) + self.k * (x <= 0)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class Identity(ActivationFunc):
|
|
31
|
+
def __call__(self, x):
|
|
32
|
+
return x
|
|
33
|
+
|
|
34
|
+
def d(self, x):
|
|
35
|
+
return 1
|
easyvae/autoencoder.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from .utils import (
|
|
3
|
+
dynamic_loss_plot_init,
|
|
4
|
+
dynamic_loss_plot_update,
|
|
5
|
+
dynamic_loss_plot_finish
|
|
6
|
+
)
|
|
7
|
+
from tqdm import tqdm
|
|
8
|
+
from .layers import DeepNNLayer, SampleLayer
|
|
9
|
+
from .activations import ActivationFunc, Identity
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
|
|
12
|
+
LOADER = ['⡿', '⣟', '⣯', '⣷', '⣾', '⣽', '⣻', '⢿']
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AAutoencoder(ABC):
|
|
16
|
+
def train_dataset(self,
|
|
17
|
+
data_set: list[np.ndarray],
|
|
18
|
+
max_epoch: int,
|
|
19
|
+
patience: int,
|
|
20
|
+
display_loss: bool = False) -> list[float]:
|
|
21
|
+
losses = [self.loss(data_set)]
|
|
22
|
+
if display_loss is True:
|
|
23
|
+
ax, line = dynamic_loss_plot_init(losses)
|
|
24
|
+
epoch = 0
|
|
25
|
+
no_improv = 0
|
|
26
|
+
prev_error = losses[0]
|
|
27
|
+
with tqdm(bar_format="{desc} {elapsed} {rate_fmt}") as lbar:
|
|
28
|
+
while True:
|
|
29
|
+
lbar.set_description(
|
|
30
|
+
f"{LOADER[epoch % len(LOADER)]} Training ({epoch=} error={float(prev_error):.6f})", # noqa
|
|
31
|
+
)
|
|
32
|
+
lbar.update()
|
|
33
|
+
error = 0
|
|
34
|
+
for x in tqdm(data_set, leave=False):
|
|
35
|
+
error += self.train(x)
|
|
36
|
+
error /= len(data_set)
|
|
37
|
+
derror = prev_error - error
|
|
38
|
+
if derror <= 0 or abs(derror) < 1e-4:
|
|
39
|
+
no_improv += 1
|
|
40
|
+
else:
|
|
41
|
+
no_improv = 0
|
|
42
|
+
prev_error = float(error)
|
|
43
|
+
losses.append(error)
|
|
44
|
+
if display_loss is True:
|
|
45
|
+
dynamic_loss_plot_update(ax, line, losses)
|
|
46
|
+
if no_improv > patience:
|
|
47
|
+
break
|
|
48
|
+
if epoch > max_epoch:
|
|
49
|
+
break
|
|
50
|
+
epoch += 1
|
|
51
|
+
if display_loss is True:
|
|
52
|
+
dynamic_loss_plot_finish(ax, line)
|
|
53
|
+
return losses
|
|
54
|
+
|
|
55
|
+
def save(self, path: str):
|
|
56
|
+
path = path.removesuffix('.npy')
|
|
57
|
+
np.save(path, self)
|
|
58
|
+
|
|
59
|
+
def load(path: str) -> 'ClassicalAutoencoder':
|
|
60
|
+
path = path.removesuffix('.npy') + '.npy'
|
|
61
|
+
data = np.load(path, allow_pickle=True)
|
|
62
|
+
return data.item()
|
|
63
|
+
|
|
64
|
+
@abstractmethod
|
|
65
|
+
def loss(self, data_set: list[np.ndarray]) -> float:
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
@abstractmethod
|
|
69
|
+
def train(self, v: np.ndarray) -> float:
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
@abstractmethod
|
|
73
|
+
def forward(self, v: np.ndarray) -> np.ndarray:
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class ClassicalAutoencoder(AAutoencoder):
|
|
78
|
+
def __init__(self,
|
|
79
|
+
encoder_layers: list[int],
|
|
80
|
+
decoder_layers: list[int],
|
|
81
|
+
lr: float,
|
|
82
|
+
activation_func: ActivationFunc):
|
|
83
|
+
if encoder_layers[-1] != decoder_layers[0]:
|
|
84
|
+
raise Exception(
|
|
85
|
+
f"Encoder output and decoder input don't match {encoder_layers[-1]} != {encoder_layers[0]}" # noqa
|
|
86
|
+
)
|
|
87
|
+
self.encoder = DeepNNLayer(encoder_layers, lr, activation_func)
|
|
88
|
+
self.decoder = DeepNNLayer(decoder_layers, lr, activation_func)
|
|
89
|
+
|
|
90
|
+
def __str__(self):
|
|
91
|
+
return f'Encoder:\n{self.encoder}\n\nDecoder:\n{self.decoder}'
|
|
92
|
+
|
|
93
|
+
def loss(self, data_set: list[np.ndarray]) -> float:
|
|
94
|
+
loss = 0
|
|
95
|
+
for x in data_set:
|
|
96
|
+
loss += np.sum(np.abs(x - self.forward(x)[0])) / len(x)
|
|
97
|
+
return loss / len(data_set)
|
|
98
|
+
|
|
99
|
+
def train(self, v: np.ndarray):
|
|
100
|
+
out = self.decoder.forward(
|
|
101
|
+
self.encoder.forward(v)
|
|
102
|
+
)
|
|
103
|
+
error = out - v
|
|
104
|
+
self.encoder.backprop(
|
|
105
|
+
self.decoder.backprop(error)
|
|
106
|
+
)
|
|
107
|
+
return np.sum(np.abs(error)) / len(v)
|
|
108
|
+
|
|
109
|
+
def encode(self, v: np.ndarray) -> np.ndarray:
|
|
110
|
+
return self.encoder.forward(v)
|
|
111
|
+
|
|
112
|
+
def decode(self, v: np.ndarray) -> np.ndarray:
|
|
113
|
+
return self.decoder.forward(v)
|
|
114
|
+
|
|
115
|
+
def forward(self, v: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
|
116
|
+
code = self.encode(v)
|
|
117
|
+
out = self.decode(code)
|
|
118
|
+
return out, code
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class VariationalAutoencoder(AAutoencoder):
|
|
122
|
+
def __init__(self,
|
|
123
|
+
encoder_layers: list[int],
|
|
124
|
+
decoder_layers: list[int],
|
|
125
|
+
lr: float,
|
|
126
|
+
activation_func: ActivationFunc):
|
|
127
|
+
if encoder_layers[-1] != decoder_layers[0]:
|
|
128
|
+
raise Exception(
|
|
129
|
+
f"Encoder output and decoder input don't match {encoder_layers[-1]} != {encoder_layers[0]}" # noqa
|
|
130
|
+
)
|
|
131
|
+
self.encoder = DeepNNLayer(encoder_layers, lr, activation_func)
|
|
132
|
+
self.decoder = DeepNNLayer(decoder_layers, lr, activation_func)
|
|
133
|
+
self.sampler = SampleLayer(self.encoder.out_size, lr, Identity())
|
|
134
|
+
|
|
135
|
+
def loss(self, data_set: list[np.ndarray]) -> float:
|
|
136
|
+
loss = 0
|
|
137
|
+
for x in data_set:
|
|
138
|
+
out = self.forward(x)[0]
|
|
139
|
+
kl = self.sampler.DKL()
|
|
140
|
+
loss += np.mean((out - x) ** 2)
|
|
141
|
+
loss += kl
|
|
142
|
+
return loss / len(data_set)
|
|
143
|
+
|
|
144
|
+
def train(self, v: np.ndarray) -> float:
|
|
145
|
+
out, _ = self.forward(v)
|
|
146
|
+
error = out - v
|
|
147
|
+
self.encoder.backprop(
|
|
148
|
+
self.sampler.backprop(
|
|
149
|
+
self.decoder.backprop(error)
|
|
150
|
+
)
|
|
151
|
+
)
|
|
152
|
+
return np.mean(error ** 2) + self.sampler.DKL()
|
|
153
|
+
|
|
154
|
+
def forward(self, v: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
|
155
|
+
code = self.encoder.forward(v)
|
|
156
|
+
sample = self.sampler.forward(code)
|
|
157
|
+
out = self.decoder.forward(sample)
|
|
158
|
+
return out, code
|
easyvae/layers.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from .activations import ActivationFunc, Identity
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class NNLayer:
|
|
6
|
+
def __init__(self,
|
|
7
|
+
in_size: int,
|
|
8
|
+
out_size: int,
|
|
9
|
+
lr: float,
|
|
10
|
+
activation_func: ActivationFunc):
|
|
11
|
+
limit = np.sqrt(6 / (in_size + out_size))
|
|
12
|
+
self.W = np.random.uniform(-limit, limit, (in_size, out_size))
|
|
13
|
+
self.B = np.zeros((out_size))
|
|
14
|
+
self.lr = lr
|
|
15
|
+
self.input = None
|
|
16
|
+
self.output = None
|
|
17
|
+
self.output_linear = None
|
|
18
|
+
self.activation_func = activation_func
|
|
19
|
+
|
|
20
|
+
def __str__(self):
|
|
21
|
+
return f'[ {self.W.shape[0]} => {self.W.shape[1]}\tlr:{self.lr}\tactivation:{self.activation_func.__class__.__name__} ]' # noqa
|
|
22
|
+
|
|
23
|
+
def forward(self, v: np.ndarray) -> np.ndarray:
|
|
24
|
+
self.input = v
|
|
25
|
+
self.output_linear = self.input @ self.W + self.B
|
|
26
|
+
self.output = self.activation_func(
|
|
27
|
+
self.output_linear
|
|
28
|
+
)
|
|
29
|
+
return self.output
|
|
30
|
+
|
|
31
|
+
def backprop(self, error: np.ndarray) -> np.ndarray:
|
|
32
|
+
error *= self.activation_func.d(self.output_linear)
|
|
33
|
+
ret = self.W @ error
|
|
34
|
+
dW = np.outer(self.input, error) * self.lr
|
|
35
|
+
dB = error * self.lr
|
|
36
|
+
self.W -= dW
|
|
37
|
+
self.B -= dB
|
|
38
|
+
return ret
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class SampleLayer:
|
|
42
|
+
def __init__(self,
|
|
43
|
+
in_size: int,
|
|
44
|
+
lr: float,
|
|
45
|
+
activation_func: ActivationFunc):
|
|
46
|
+
self.input = None
|
|
47
|
+
self.mean_nn = NNLayer(
|
|
48
|
+
in_size,
|
|
49
|
+
in_size,
|
|
50
|
+
lr,
|
|
51
|
+
activation_func)
|
|
52
|
+
self.std_nn = NNLayer(
|
|
53
|
+
in_size,
|
|
54
|
+
in_size,
|
|
55
|
+
lr,
|
|
56
|
+
activation_func)
|
|
57
|
+
|
|
58
|
+
def DKL(self):
|
|
59
|
+
return -0.5 * np.mean(1 + self.logvar - self.mean ** 2 - np.exp(self.logvar)) # noqa
|
|
60
|
+
|
|
61
|
+
def forward(self, v: np.ndarray) -> np.ndarray:
|
|
62
|
+
self.input = v
|
|
63
|
+
self.mean = self.mean_nn.forward(v)
|
|
64
|
+
self.logvar = np.clip(self.std_nn.forward(v), -10, 10)
|
|
65
|
+
self.std = np.exp(0.5 * self.logvar)
|
|
66
|
+
self.eps = np.random.normal(0, 1, self.mean.shape)
|
|
67
|
+
return 0.5 * self.eps * self.std + self.mean
|
|
68
|
+
|
|
69
|
+
def backprop(self, error: np.ndarray) -> np.ndarray:
|
|
70
|
+
dmean = error + self.mean
|
|
71
|
+
dstd = error * self.eps + 0.5 * (np.exp(self.logvar) - 1)
|
|
72
|
+
mean_error = self.mean_nn.backprop(dmean)
|
|
73
|
+
logvar_error = self.std_nn.backprop(dstd * self.std)
|
|
74
|
+
return mean_error + logvar_error
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class DeepNNLayer:
|
|
78
|
+
def __init__(self,
|
|
79
|
+
layers: list[int],
|
|
80
|
+
lr: float,
|
|
81
|
+
activation_func: ActivationFunc):
|
|
82
|
+
self.layers: list[NNLayer] = []
|
|
83
|
+
for i in range(len(layers) - 1):
|
|
84
|
+
self.layers.append(
|
|
85
|
+
NNLayer(
|
|
86
|
+
layers[i],
|
|
87
|
+
layers[i+1],
|
|
88
|
+
lr,
|
|
89
|
+
activation_func if i != len(layers) - 2 else Identity()
|
|
90
|
+
)
|
|
91
|
+
)
|
|
92
|
+
self.in_size = layers[0]
|
|
93
|
+
self.out_size = layers[-1]
|
|
94
|
+
|
|
95
|
+
def __str__(self):
|
|
96
|
+
return '\n'.join([str(layer) for layer in self.layers])
|
|
97
|
+
|
|
98
|
+
def forward(self, v: np.ndarray) -> np.ndarray:
|
|
99
|
+
for layer in self.layers:
|
|
100
|
+
v = layer.forward(v)
|
|
101
|
+
return v
|
|
102
|
+
|
|
103
|
+
def backprop(self, error: np.ndarray) -> np.ndarray:
|
|
104
|
+
for layer in self.layers[::-1]:
|
|
105
|
+
error = layer.backprop(error)
|
|
106
|
+
return error
|
easyvae/utils.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
|
|
2
|
+
import numpy as np
|
|
3
|
+
import matplotlib.pyplot as plt
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def softmax(v: np.ndarray) -> np.ndarray:
|
|
7
|
+
v = v - np.max(v)
|
|
8
|
+
exp_v = np.exp(v)
|
|
9
|
+
return exp_v / np.sum(exp_v)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def normalize(v: np.ndarray) -> np.ndarray:
|
|
13
|
+
return v / (np.linalg.norm(v) + 1e-8)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def regularize(v: np.ndarray) -> np.ndarray:
|
|
17
|
+
v_min = v.min(axis=0)
|
|
18
|
+
v_max = v.max(axis=0)
|
|
19
|
+
if v_min - v_max == 0:
|
|
20
|
+
return v
|
|
21
|
+
return (v - v_min) / (v_max - v_min)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def dynamic_loss_plot_init(losses: list):
|
|
25
|
+
plt.ion()
|
|
26
|
+
fig, ax = plt.subplots()
|
|
27
|
+
line, = ax.plot([0], losses, label="Loss")
|
|
28
|
+
ax.set_xlabel("Epoch")
|
|
29
|
+
ax.set_ylabel("Loss")
|
|
30
|
+
ax.set_title("Training Loss")
|
|
31
|
+
ax.legend()
|
|
32
|
+
return ax, line
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def dynamic_loss_plot_update(ax, line, loss):
|
|
36
|
+
line.set_xdata(range(len(loss)))
|
|
37
|
+
line.set_ydata(loss)
|
|
38
|
+
ax.relim()
|
|
39
|
+
ax.autoscale_view()
|
|
40
|
+
plt.draw()
|
|
41
|
+
plt.pause(0.1)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def dynamic_loss_plot_finish(ax, line):
|
|
45
|
+
plt.ioff()
|
|
46
|
+
plt.show()
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: easyvae
|
|
3
|
+
Version: 1.0
|
|
4
|
+
Summary: Python implementation of a Classical and Variational Autoencoders
|
|
5
|
+
Author-email: Ravaka RALAMBOARIVONY <ravaka.rlb.pro@gmail.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/lenoctambule/autoencoder
|
|
8
|
+
Project-URL: Issues, https://github.com/lenoctambule/autoencoder/issues
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Requires-Python: >=3.10
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Dynamic: license-file
|
|
15
|
+
|
|
16
|
+
# Python AutoEncoder from scratch using Numpy
|
|
17
|
+
|
|
18
|
+
## Usage
|
|
19
|
+
|
|
20
|
+
1. To install from source :
|
|
21
|
+
```sh
|
|
22
|
+
$ git clone git@github.com:lenoctambule/autoencoder.git
|
|
23
|
+
$ pip install -e autoencoder/
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
2. Optionally, run mnist_test.py to see it in action on the MNIST dataset.
|
|
27
|
+
```sh
|
|
28
|
+
$ cd examples
|
|
29
|
+
$ py mnist_test.py
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Training
|
|
33
|
+
|
|
34
|
+
Instatiate an `ClassicalAutoencoder` or `VariationalAutoencoder` object :
|
|
35
|
+
```py
|
|
36
|
+
from easyvae.autoencoder import ClassicalAutoencoder, VariationalAutoencoder
|
|
37
|
+
from easyvae.activations import LeakyReLU
|
|
38
|
+
|
|
39
|
+
autoencoder = ClassicalAutoencoder(
|
|
40
|
+
[768, 64, 16],
|
|
41
|
+
[16, 64, 768],
|
|
42
|
+
0.01,
|
|
43
|
+
LeakyReLU()
|
|
44
|
+
)
|
|
45
|
+
# or
|
|
46
|
+
autoencoder = VariationalAutoencoder(
|
|
47
|
+
[768, 64, 16],
|
|
48
|
+
[16, 64, 768],
|
|
49
|
+
0.01,
|
|
50
|
+
LeakyReLU()
|
|
51
|
+
)
|
|
52
|
+
```
|
|
53
|
+
And then via the `train_dataset` method to train over a dataset :
|
|
54
|
+
```py
|
|
55
|
+
autoencoder.train_dataset(data)
|
|
56
|
+
```
|
|
57
|
+
Or via the `train` method to input each data points iteratively :
|
|
58
|
+
```py
|
|
59
|
+
autoencoder.train(v)
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
After training, you can save your model via the `save` method and load that model using `load` method :
|
|
63
|
+
```
|
|
64
|
+
autoencoder.save("mymodel.npy)
|
|
65
|
+
autoencoder.load("mymodel.npy")
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
## Inference
|
|
69
|
+
|
|
70
|
+
Use your `Autoencoder` object with the `encode`, `decode`, `forward` methods like so :
|
|
71
|
+
```py
|
|
72
|
+
example = ...
|
|
73
|
+
code = autoencoder.encode(example)
|
|
74
|
+
output = autoencoder.decode(code)
|
|
75
|
+
output, code = autoencoder.forward(example)
|
|
76
|
+
```
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
easyvae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
easyvae/activations.py,sha256=_m6HfYf0Ql47nnE_1ja7myjUFsa5cRDqiA4YEIsccqM,628
|
|
3
|
+
easyvae/autoencoder.py,sha256=nlDTDOg9zsifuVsL_-A9-lckAdzFie_5vaKjzS6Rddw,5386
|
|
4
|
+
easyvae/layers.py,sha256=eEvD1RyS25mBaVARAloFBNxw16RqQvW0Hg6dfQGw4Ck,3422
|
|
5
|
+
easyvae/utils.py,sha256=izWeHZynrJiyXHNnoBpjCZulG4WnyFFNKxMra5Gb2S0,957
|
|
6
|
+
easyvae-1.0.dist-info/licenses/LICENSE,sha256=ns8NzocsWCnsNVwzaRKvSwR2De-afT-6rfHB9B7i1h4,1078
|
|
7
|
+
easyvae-1.0.dist-info/METADATA,sha256=Jb0FK8RsLJ74d9bwc6dnaIEE-BctIv-vLx2ko7KPBH0,1906
|
|
8
|
+
easyvae-1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
9
|
+
easyvae-1.0.dist-info/top_level.txt,sha256=IvHSUpiBQtoeBle9XwznzBfSxnihu9Ee2AtQ2IaCKLs,8
|
|
10
|
+
easyvae-1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 RALAMBOARIVONY Ravaka
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
easyvae
|