deepdefend 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ MIT License (Modified)
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to make derivative works based on the Software, provided that any substantial changes to the Software are clearly distinguished from the original work and are distributed under a different name.
4
+
5
+ The original copyright notice and disclaimer must be retained in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,22 @@
1
+ Metadata-Version: 2.1
2
+ Name: deepdefend
3
+ Version: 0.1.0
4
+ Summary: An open-source Python library for adversarial attacks and defenses in deep learning models.
5
+ Home-page: https://github.com/infinitode/deepdefend
6
+ Author: Infinitode Pty Ltd
7
+ Author-email: infinitode.ltd@gmail.com
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.6
13
+ Classifier: Programming Language :: Python :: 3.7
14
+ Classifier: Programming Language :: Python :: 3.8
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Requires-Python: >=3.6
19
+ Description-Content-Type: text/markdown
20
+ License-File: LICENSE
21
+
22
+ An open-source Python library for adversarial attacks and defenses in deep learning models, enhancing the security and robustness of AI systems.
@@ -0,0 +1,3 @@
1
+ import deepdefend
2
+ from .attacks import fgsm, pgd, bim
3
+ from .defenses import adversarial_training, feature_squeezing
@@ -0,0 +1,103 @@
1
+ """
2
+ Functions to run adversarial attacks on deep learning models.
3
+
4
+ Available functions:
5
+ - `fgsm(model, x, y, epsilon=0.01)`: Fast Gradient Sign Method (FGSM) attack.
6
+ - `pgd(model, x, y, epsilon=0.01, alpha=0.01, num_steps=10)`: Projected Gradient Descent (PGD) attack.
7
+ - `bim(model, x, y, epsilon=0.01, alpha=0.01, num_steps=10)`: Basic Iterative Method (BIM) attack.
8
+
9
+ """
10
+
11
+ import numpy as np
12
+ import tensorflow as tf
13
+
14
+ def fgsm(model, x, y, epsilon=0.01):
15
+ """
16
+ Fast Gradient Sign Method (FGSM) attack.
17
+
18
+ Parameters:
19
+ model (tensorflow.keras.Model): The target model to attack.
20
+ x (numpy.ndarray): The input example to attack.
21
+ y (numpy.ndarray): The true labels of the input example.
22
+ epsilon (float): The magnitude of the perturbation (default: 0.01).
23
+
24
+ Returns:
25
+ adversarial_example (numpy.ndarray): The perturbed input example.
26
+ """
27
+ # Determine the loss function based on the number of classes
28
+ if y.shape[-1] == 1 or len(y.shape) == 1:
29
+ loss_object = tf.keras.losses.BinaryCrossentropy()
30
+ else:
31
+ loss_object = tf.keras.losses.CategoricalCrossentropy()
32
+
33
+ with tf.GradientTape() as tape:
34
+ tape.watch(x)
35
+ prediction = model(x)
36
+ loss = loss_object(y, prediction)
37
+
38
+ gradient = tape.gradient(loss, x)
39
+
40
+ # Generate adversarial example
41
+ perturbation = epsilon * tf.sign(gradient)
42
+ adversarial_example = x + perturbation
43
+ return adversarial_example.numpy()
44
+
45
+ def pgd(model, x, y, epsilon=0.01, alpha=0.01, num_steps=10):
46
+ """
47
+ Projected Gradient Descent (PGD) attack.
48
+
49
+ Parameters:
50
+ model (tensorflow.keras.Model): The target model to attack.
51
+ x (numpy.ndarray): The input example to attack.
52
+ y (numpy.ndarray): The true labels of the input example.
53
+ epsilon (float): The maximum magnitude of the perturbation (default: 0.01).
54
+ alpha (float): The step size for each iteration (default: 0.01).
55
+ num_steps (int): The number of PGD iterations (default: 10).
56
+
57
+ Returns:
58
+ adversarial_example (numpy.ndarray): The perturbed input example.
59
+ """
60
+ adversarial_example = tf.identity(x)
61
+
62
+ for _ in range(num_steps):
63
+ with tf.GradientTape() as tape:
64
+ tape.watch(adversarial_example)
65
+ prediction = model(adversarial_example)
66
+ loss = tf.keras.losses.CategoricalCrossentropy()(y, prediction)
67
+
68
+ gradient = tape.gradient(loss, adversarial_example)
69
+ perturbation = alpha * tf.sign(gradient)
70
+ adversarial_example = tf.clip_by_value(adversarial_example + perturbation, 0, 1)
71
+ adversarial_example = tf.clip_by_value(adversarial_example, x - epsilon, x + epsilon)
72
+
73
+ return adversarial_example.numpy()
74
+
75
+ def bim(model, x, y, epsilon=0.01, alpha=0.01, num_steps=10):
76
+ """
77
+ Basic Iterative Method (BIM) attack.
78
+
79
+ Parameters:
80
+ model (tensorflow.keras.Model): The target model to attack.
81
+ x (numpy.ndarray): The input example to attack.
82
+ y (numpy.ndarray): The true labels of the input example.
83
+ epsilon (float): The maximum magnitude of the perturbation (default: 0.01).
84
+ alpha (float): The step size for each iteration (default: 0.01).
85
+ num_steps (int): The number of BIM iterations (default: 10).
86
+
87
+ Returns:
88
+ adversarial_example (numpy.ndarray): The perturbed input example.
89
+ """
90
+ adversarial_example = tf.identity(x)
91
+
92
+ for _ in range(num_steps):
93
+ with tf.GradientTape() as tape:
94
+ tape.watch(adversarial_example)
95
+ prediction = model(adversarial_example)
96
+ loss = tf.keras.losses.CategoricalCrossentropy()(y, prediction)
97
+
98
+ gradient = tape.gradient(loss, adversarial_example)
99
+ perturbation = alpha * tf.sign(gradient)
100
+ adversarial_example = tf.clip_by_value(adversarial_example + perturbation, 0, 1)
101
+ adversarial_example = tf.clip_by_value(adversarial_example, x - epsilon, x + epsilon)
102
+
103
+ return adversarial_example.numpy()
@@ -0,0 +1,71 @@
1
+ """
2
+ Functions to apply adversarial defense mechanisms to deep learning models.
3
+
4
+ Available functions:
5
+ - `adversarial_training(model, x, y, epsilon=0.01)`: Adversarial Training defense.
6
+ - `feature_squeezing(model, bit_depth=4)`: Feature Squeezing defense.
7
+
8
+ """
9
+
10
+ import numpy as np
11
+ import tensorflow as tf
12
+ from deepdefend import attacks
13
+
14
+ def adversarial_training(model, x, y, epsilon=0.01):
15
+ """
16
+ Adversarial Training defense.
17
+
18
+ Adversarial training is a method where the model is trained on both the original
19
+ and adversarial examples, aiming to make the model more robust to adversarial attacks.
20
+
21
+ Parameters:
22
+ model (tensorflow.keras.Model): The model to defend.
23
+ x (numpy.ndarray): The input training examples.
24
+ y (numpy.ndarray): The true labels of the training examples.
25
+ epsilon (float): The magnitude of the perturbation (default: 0.01).
26
+
27
+ Returns:
28
+ defended_model (tensorflow.keras.Model): The adversarially trained model.
29
+ """
30
+ defended_model = tf.keras.models.clone_model(model)
31
+ defended_model.set_weights(model.get_weights())
32
+
33
+ adversarial_examples = []
34
+ for i in range(len(x)):
35
+ adversarial_example = attacks.fgsm(model, x[i:i+1], y[i:i+1], epsilon)
36
+ adversarial_examples.append(adversarial_example)
37
+
38
+ x_adversarial = np.concatenate(adversarial_examples, axis=0)
39
+ y_adversarial = np.copy(y)
40
+ x_train = np.concatenate([x, x_adversarial], axis=0)
41
+ y_train = np.concatenate([y, y_adversarial], axis=0)
42
+
43
+ defended_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
44
+ defended_model.fit(x_train, y_train, epochs=10, batch_size=32)
45
+
46
+ return defended_model
47
+
48
+ def feature_squeezing(model, bit_depth=4):
49
+ """
50
+ Feature Squeezing defense.
51
+
52
+ Feature squeezing reduces the number of bits used to represent the input features,
53
+ which can remove certain adversarial perturbations.
54
+
55
+ Parameters:
56
+ model (tensorflow.keras.Model): The model to defend.
57
+ bit_depth (int): The number of bits per feature (default: 4).
58
+
59
+ Returns:
60
+ defended_model (tensorflow.keras.Model): The model with feature squeezing defense.
61
+ """
62
+ defended_model = tf.keras.models.clone_model(model)
63
+ defended_model.set_weights(model.get_weights())
64
+
65
+ for layer in defended_model.layers:
66
+ if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(layer, tf.keras.layers.Dense):
67
+ layer_weights = layer.get_weights()
68
+ squeezed_weights = [np.clip(np.round(w * (2**bit_depth) / np.max(np.abs(w))), -2**(bit_depth - 1), 2**(bit_depth - 1) - 1) / (2**(bit_depth) / np.max(np.abs(w))) for w in layer_weights]
69
+ layer.set_weights(squeezed_weights)
70
+
71
+ return defended_model
@@ -0,0 +1,22 @@
1
+ Metadata-Version: 2.1
2
+ Name: deepdefend
3
+ Version: 0.1.0
4
+ Summary: An open-source Python library for adversarial attacks and defenses in deep learning models.
5
+ Home-page: https://github.com/infinitode/deepdefend
6
+ Author: Infinitode Pty Ltd
7
+ Author-email: infinitode.ltd@gmail.com
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.6
13
+ Classifier: Programming Language :: Python :: 3.7
14
+ Classifier: Programming Language :: Python :: 3.8
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Requires-Python: >=3.6
19
+ Description-Content-Type: text/markdown
20
+ License-File: LICENSE
21
+
22
+ An open-source Python library for adversarial attacks and defenses in deep learning models, enhancing the security and robustness of AI systems.
@@ -0,0 +1,10 @@
1
+ LICENSE
2
+ setup.py
3
+ deepdefend/__init__.py
4
+ deepdefend/attacks.py
5
+ deepdefend/defenses.py
6
+ deepdefend.egg-info/PKG-INFO
7
+ deepdefend.egg-info/SOURCES.txt
8
+ deepdefend.egg-info/dependency_links.txt
9
+ deepdefend.egg-info/requires.txt
10
+ deepdefend.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ numpy
2
+ tensorflow
@@ -0,0 +1 @@
1
+ deepdefend
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,30 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name='deepdefend',
5
+ version='0.1.0',
6
+ author='Infinitode Pty Ltd',
7
+ author_email='infinitode.ltd@gmail.com',
8
+ description='An open-source Python library for adversarial attacks and defenses in deep learning models.',
9
+ long_description='An open-source Python library for adversarial attacks and defenses in deep learning models, enhancing the security and robustness of AI systems.',
10
+ long_description_content_type='text/markdown',
11
+ url='https://github.com/infinitode/deepdefend',
12
+ packages=find_packages(),
13
+ install_requires=[
14
+ 'numpy',
15
+ 'tensorflow',
16
+ ],
17
+ classifiers=[
18
+ 'Development Status :: 3 - Alpha',
19
+ 'Intended Audience :: Developers',
20
+ 'License :: OSI Approved :: MIT License',
21
+ 'Programming Language :: Python :: 3',
22
+ 'Programming Language :: Python :: 3.6',
23
+ 'Programming Language :: Python :: 3.7',
24
+ 'Programming Language :: Python :: 3.8',
25
+ 'Programming Language :: Python :: 3.9',
26
+ 'Programming Language :: Python :: 3.10',
27
+ 'Programming Language :: Python :: 3.11',
28
+ ],
29
+ python_requires='>=3.6',
30
+ )