attackbenchlib 1.0.0a9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. attackbench/__init__.py +180 -0
  2. attackbench/adv_lib_sub.py +239 -0
  3. attackbench/attacks/README.md +65 -0
  4. attackbench/attacks/__init__.py +17 -0
  5. attackbench/attacks/adv_lib/__init__.py +0 -0
  6. attackbench/attacks/adv_lib/configs.py +415 -0
  7. attackbench/attacks/adv_lib/wrapper.py +66 -0
  8. attackbench/attacks/art/__init__.py +0 -0
  9. attackbench/attacks/art/configs.py +276 -0
  10. attackbench/attacks/art/wrapper.py +86 -0
  11. attackbench/attacks/bomn.py +262 -0
  12. attackbench/attacks/cleverhans/__init__.py +0 -0
  13. attackbench/attacks/cleverhans/configs.py +137 -0
  14. attackbench/attacks/cleverhans/wrapper.py +75 -0
  15. attackbench/attacks/deeprobust/__init__.py +0 -0
  16. attackbench/attacks/deeprobust/configs.py +114 -0
  17. attackbench/attacks/deeprobust/wrapper.py +82 -0
  18. attackbench/attacks/foolbox/__init__.py +0 -0
  19. attackbench/attacks/foolbox/bb_adv_init.py +20 -0
  20. attackbench/attacks/foolbox/configs.py +294 -0
  21. attackbench/attacks/foolbox/wrapper.py +83 -0
  22. attackbench/attacks/original/__init__.py +0 -0
  23. attackbench/attacks/original/auto_pgd.py +773 -0
  24. attackbench/attacks/original/configs.py +288 -0
  25. attackbench/attacks/original/deepfool.py +103 -0
  26. attackbench/attacks/original/fast_adaptive_boundary.py +624 -0
  27. attackbench/attacks/original/fast_minimum_norm.py +431 -0
  28. attackbench/attacks/original/pgd_lzero.py +213 -0
  29. attackbench/attacks/original/sigma_zero.py +141 -0
  30. attackbench/attacks/original/superdeepfool.py +167 -0
  31. attackbench/attacks/original/trust_region.py +263 -0
  32. attackbench/attacks/registry.py +321 -0
  33. attackbench/attacks/torchattacks/__init__.py +0 -0
  34. attackbench/attacks/torchattacks/configs.py +209 -0
  35. attackbench/attacks/torchattacks/wrapper.py +69 -0
  36. attackbench/attacks.json +122 -0
  37. attackbench/compat.py +14 -0
  38. attackbench/custom_components.py +310 -0
  39. attackbench/datasets/__init__.py +0 -0
  40. attackbench/datasets/imagenet.py +79 -0
  41. attackbench/datasets/registry.py +75 -0
  42. attackbench/datasets/subsets/__init__.py +0 -0
  43. attackbench/datasets/subsets/imagenet-5000-val.txt +5000 -0
  44. attackbench/metrics/__init__.py +90 -0
  45. attackbench/metrics/analysis.py +245 -0
  46. attackbench/metrics/curves.py +89 -0
  47. attackbench/metrics/distances.py +134 -0
  48. attackbench/metrics/ensemble.py +90 -0
  49. attackbench/metrics/global_optimality.py +320 -0
  50. attackbench/metrics/optimality.py +254 -0
  51. attackbench/metrics/storage.py +140 -0
  52. attackbench/models/__init__.py +0 -0
  53. attackbench/models/benchmodel_wrapper.py +268 -0
  54. attackbench/models/mnist.py +51 -0
  55. attackbench/models/original/__init__.py +0 -0
  56. attackbench/models/original/stutz2020/__init__.py +0 -0
  57. attackbench/models/original/stutz2020/ccat.py +44 -0
  58. attackbench/models/original/stutz2020/classifier.py +169 -0
  59. attackbench/models/original/stutz2020/resnet.py +96 -0
  60. attackbench/models/original/stutz2020/resnet_block.py +79 -0
  61. attackbench/models/original/stutz2020/torch.py +160 -0
  62. attackbench/models/original/utils.py +15 -0
  63. attackbench/models/original/wang2023/__init__.py +0 -0
  64. attackbench/models/original/wang2023/dm_adv_training.py +69 -0
  65. attackbench/models/original/wang2023/wideresnetwithswish.py +199 -0
  66. attackbench/models/original/xiao2020/__init__.py +0 -0
  67. attackbench/models/original/xiao2020/kwta.py +23 -0
  68. attackbench/models/original/xiao2020/models.py +516 -0
  69. attackbench/models/original/zhang2020/__init__.py +0 -0
  70. attackbench/models/original/zhang2020/crown.py +49 -0
  71. attackbench/models/original/zhang2020/model_defs_gowal.py +90 -0
  72. attackbench/models/original/zhang2020/utils.py +23 -0
  73. attackbench/models/registry.py +228 -0
  74. attackbench/preconfigured.py +114 -0
  75. attackbench/run.py +414 -0
  76. attackbench/utils.py +7 -0
  77. attackbench/wandb/__init__.py +31 -0
  78. attackbench/wandb/manager.py +817 -0
  79. attackbench/wandb/utils.py +187 -0
  80. attackbenchlib-1.0.0a9.dist-info/METADATA +256 -0
  81. attackbenchlib-1.0.0a9.dist-info/RECORD +84 -0
  82. attackbenchlib-1.0.0a9.dist-info/WHEEL +5 -0
  83. attackbenchlib-1.0.0a9.dist-info/entry_points.txt +3 -0
  84. attackbenchlib-1.0.0a9.dist-info/top_level.txt +1 -0
@@ -0,0 +1,180 @@
1
+ """
2
+ AttackBench - A Python package for benchmarking adversarial attacks.
3
+
4
+ Usage:
5
+ import attackbench
6
+
7
+ results = attackbench.run_attack(model, dataset, attack, 'linf', device)
8
+ stats = attackbench.get_stats(results, 'linf')
9
+
10
+ Optional subpackages:
11
+ - attacks: pip install attackbench[attacks] (adversarial attack libraries)
12
+ - metrics: pip install attackbench[metrics] (analysis & evaluation tools)
13
+ Both are optional and independent of each other.
14
+ """
15
+
16
+ import importlib
17
+
18
+ # Version info
19
+ __version__ = "1.0.0"
20
+
21
+ # ── Core API (always available) ──────────────────────────────────────────
22
+ from .run import run_attack
23
+ from .custom_components import create_custom_attack
24
+
25
+ # ── Helpers to load objects (always available) ───────────────────────────
26
+ from .datasets.registry import get_loader
27
+
28
+
29
+ # ── RobustBench integration ─────────────────────────────────────────────
30
+ def load_model(model_name, dataset='cifar10', threat_model='Linf', **kwargs):
31
+ """Load a RobustBench model and attach AttackBench metadata for automatic extraction."""
32
+ from robustbench import load_model as _rb_load_model
33
+ model = _rb_load_model(model_name=model_name, dataset=dataset, threat_model=threat_model, **kwargs)
34
+ model._attackbench_model = model_name
35
+ model._attackbench_dataset = dataset
36
+ return model
37
+
38
+ # ── W&B integration ─────────────────────────────────────────────────────
39
+ from .wandb import (
40
+ upload_precompiled_distances,
41
+ download_precompiled_distances,
42
+ list_available_distances,
43
+ upload_optimal_distances,
44
+ download_optimal_distances,
45
+ get_precompiled_distances,
46
+ get_optimal_distances,
47
+ )
48
+
49
+ # ── Lazy imports for optional subpackages (attacks/ and metrics/) ────────
50
+ # These symbols are resolved on first access via __getattr__ (PEP 562).
51
+ # If the subpackage is not installed, a clear ImportError is raised.
52
+
53
+ _LAZY_ATTACKS = {
54
+ 'get_attack': ('attacks.registry', 'get_attack'),
55
+ 'list_attacks': ('attacks.registry', 'list_attacks'),
56
+ 'bomn_attack': ('attacks.bomn', 'bomn_attack'),
57
+ }
58
+
59
+ _LAZY_MODELS = {
60
+ 'get_model': ('models.registry', 'get_model'),
61
+ }
62
+
63
+ _LAZY_METRICS = {
64
+ 'get_stats': ('metrics.analysis', 'get_stats'),
65
+ 'eval_optimality': ('metrics.distances', 'eval_optimality'),
66
+ 'ensemble_gain': ('metrics.ensemble', 'ensemble_gain'),
67
+ 'ensemble_distances': ('metrics.ensemble', 'ensemble_distances'),
68
+ 'compare_attacks': ('metrics.analysis', 'compare_attacks'),
69
+ 'compute_curves': ('metrics.analysis', 'compute_curves'),
70
+ 'compute_optimality': ('metrics.analysis', 'compute_optimality'),
71
+ 'compute_efficiency': ('metrics.analysis', 'compute_efficiency'),
72
+ 'compute_local_optimality': ('metrics.optimality', 'compute_local_optimality'),
73
+ 'compare_attacks_optimality': ('metrics.optimality', 'compare_attacks_optimality'),
74
+ 'compute_global_optimality': ('metrics.global_optimality','compute_global_optimality'),
75
+ 'create_attack_leaderboard': ('metrics.global_optimality','create_attack_leaderboard'),
76
+ 'compare_attacks_global': ('metrics.global_optimality','compare_attacks_global'),
77
+ 'format_leaderboard': ('metrics.global_optimality','format_leaderboard'),
78
+ }
79
+
80
+
81
+ def __getattr__(name: str):
82
+ # ── Models (requires robustbench) ───────────────────────────────────
83
+ if name in _LAZY_MODELS:
84
+ submodule, attr = _LAZY_MODELS[name]
85
+ try:
86
+ mod = importlib.import_module(f'.{submodule}', __name__)
87
+ value = getattr(mod, attr)
88
+ except (ImportError, ModuleNotFoundError) as e:
89
+ raise ImportError(
90
+ f"attackbench.{name} requires robustbench. "
91
+ f"Install it with: pip install attackbench[models]"
92
+ ) from e
93
+ globals()[name] = value
94
+ return value
95
+
96
+ # ── Attacks (optional) ───────────────────────────────────────────────
97
+ if name in _LAZY_ATTACKS:
98
+ submodule, attr = _LAZY_ATTACKS[name]
99
+ try:
100
+ mod = importlib.import_module(f'.{submodule}', __name__)
101
+ value = getattr(mod, attr)
102
+ except (ImportError, ModuleNotFoundError) as e:
103
+ raise ImportError(
104
+ f"attackbench.{name} requires the 'attacks' subpackage. "
105
+ f"Install it with: pip install attackbench[attacks]"
106
+ ) from e
107
+ globals()[name] = value # cache for subsequent accesses
108
+ return value
109
+
110
+ # ── Metrics (optional) ───────────────────────────────────────────────
111
+ if name in _LAZY_METRICS:
112
+ submodule, attr = _LAZY_METRICS[name]
113
+ try:
114
+ mod = importlib.import_module(f'.{submodule}', __name__)
115
+ value = getattr(mod, attr)
116
+ except (ImportError, ModuleNotFoundError) as e:
117
+ raise ImportError(
118
+ f"attackbench.{name} requires the 'metrics' subpackage. "
119
+ f"Install it with: pip install attackbench[metrics]"
120
+ ) from e
121
+ globals()[name] = value
122
+ return value
123
+
124
+ raise AttributeError(f"module 'attackbench' has no attribute '{name}'")
125
+
126
+
127
+ def __dir__():
128
+ """Support tab-completion for lazy attributes."""
129
+ public = list(globals().keys())
130
+ public.extend(_LAZY_MODELS.keys())
131
+ public.extend(_LAZY_ATTACKS.keys())
132
+ public.extend(_LAZY_METRICS.keys())
133
+ return public
134
+
135
+
136
+ __all__ = [
137
+ 'run_attack',
138
+ 'get_stats',
139
+
140
+ # Helpers to load objects
141
+ 'load_model',
142
+ 'get_model',
143
+ 'get_loader',
144
+ 'get_attack',
145
+ 'list_attacks',
146
+
147
+ # Custom components
148
+ 'create_custom_attack',
149
+
150
+ # BoMN composite attack
151
+ 'bomn_attack',
152
+
153
+ # Analysis functions
154
+ 'eval_optimality',
155
+ 'ensemble_gain',
156
+ 'ensemble_distances',
157
+ 'compare_attacks',
158
+ 'compute_curves',
159
+ 'compute_optimality',
160
+ 'compute_efficiency',
161
+
162
+ # Stage 3: Local Optimality
163
+ 'compute_local_optimality',
164
+ 'compare_attacks_optimality',
165
+
166
+ # Stage 4-5: Global Optimality & Ranking
167
+ 'compute_global_optimality',
168
+ 'create_attack_leaderboard',
169
+ 'compare_attacks_global',
170
+ 'format_leaderboard',
171
+
172
+ # W&B functions
173
+ 'upload_precompiled_distances',
174
+ 'download_precompiled_distances',
175
+ 'list_available_distances',
176
+ 'get_precompiled_distances',
177
+ 'upload_optimal_distances',
178
+ 'download_optimal_distances',
179
+ 'get_optimal_distances',
180
+ ]
@@ -0,0 +1,239 @@
1
+ """
2
+ adv_lib_sub.py - Substitute for adv_lib external dependency.
3
+
4
+ This module provides internal implementations of functions that were
5
+ previously imported from adv_lib, allowing AttackBench to work without
6
+ the external adv_lib dependency.
7
+
8
+ Contains:
9
+ - Distance metrics (l0, l1, l2, linf)
10
+ - Default metrics dictionary
11
+ - Loss functions (difference_of_logits)
12
+ - Model utilities (normalize_model, NormalizeLayer)
13
+ """
14
+
15
+ from collections import OrderedDict
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import torch
19
+ from torch import nn, Tensor
20
+
21
+
22
+ # =============================================================================
23
+ # DISTANCE METRICS
24
+ # =============================================================================
25
+
26
+ def l0_distances(x: Tensor, x_adv: Tensor, dim: Optional[int] = None) -> Tensor:
27
+ """
28
+ Compute L0 distance (number of perturbed elements) between original and adversarial samples.
29
+
30
+ Args:
31
+ x: Original inputs, shape (batch_size, ...)
32
+ x_adv: Adversarial inputs, shape (batch_size, ...)
33
+ dim: Starting dimension from which to flatten and compute distance.
34
+ If None, flattens from dim 1.
35
+
36
+ Returns:
37
+ L0 distances per sample
38
+ """
39
+ diff = (x - x_adv).abs()
40
+ if dim is not None:
41
+ return (diff.flatten(start_dim=dim) > 1e-10).sum(dim=-1).float()
42
+ else:
43
+ return (diff.flatten(start_dim=1) > 1e-10).sum(dim=1).float()
44
+
45
+
46
+ def l1_distances(x: Tensor, x_adv: Tensor, dim: Optional[int] = None) -> Tensor:
47
+ """
48
+ Compute L1 distance (sum of absolute differences) between original and adversarial samples.
49
+
50
+ Args:
51
+ x: Original inputs, shape (batch_size, ...)
52
+ x_adv: Adversarial inputs, shape (batch_size, ...)
53
+ dim: Starting dimension from which to flatten and compute distance.
54
+ If None, flattens from dim 1.
55
+
56
+ Returns:
57
+ L1 distances per sample
58
+ """
59
+ diff = (x - x_adv).abs()
60
+ if dim is not None:
61
+ return diff.flatten(start_dim=dim).sum(dim=-1)
62
+ else:
63
+ return diff.flatten(start_dim=1).sum(dim=1)
64
+
65
+
66
+ def l2_distances(x: Tensor, x_adv: Tensor, dim: Optional[int] = None) -> Tensor:
67
+ """
68
+ Compute L2 (Euclidean) distance between original and adversarial samples.
69
+
70
+ Args:
71
+ x: Original inputs, shape (batch_size, ...)
72
+ x_adv: Adversarial inputs, shape (batch_size, ...)
73
+ dim: Starting dimension from which to flatten and compute distance.
74
+ If None, flattens from dim 1.
75
+
76
+ Returns:
77
+ L2 distances per sample
78
+ """
79
+ diff = x - x_adv
80
+ if dim is not None:
81
+ return diff.flatten(start_dim=dim).norm(p=2, dim=-1)
82
+ else:
83
+ return diff.flatten(start_dim=1).norm(p=2, dim=1)
84
+
85
+
86
+ def linf_distances(x: Tensor, x_adv: Tensor, dim: Optional[int] = None) -> Tensor:
87
+ """
88
+ Compute L∞ distance (maximum absolute difference) between original and adversarial samples.
89
+
90
+ Args:
91
+ x: Original inputs, shape (batch_size, ...)
92
+ x_adv: Adversarial inputs, shape (batch_size, ...)
93
+ dim: Starting dimension from which to flatten and compute distance.
94
+ If None, flattens from dim 1.
95
+
96
+ Returns:
97
+ L∞ distances per sample
98
+ """
99
+ diff = (x - x_adv).abs()
100
+ if dim is not None:
101
+ return diff.flatten(start_dim=dim).max(dim=-1)[0]
102
+ else:
103
+ return diff.flatten(start_dim=1).max(dim=1)[0]
104
+
105
+
106
+ # =============================================================================
107
+ # DEFAULT METRICS
108
+ # =============================================================================
109
+
110
+ # Default metrics dictionary for tracking distance metrics
111
+ # This matches the adv_lib._default_metrics structure
112
+ _default_metrics = OrderedDict([
113
+ ('linf', linf_distances),
114
+ ('l2', l2_distances),
115
+ ('l1', l1_distances),
116
+ ('l0', l0_distances),
117
+ ])
118
+
119
+
120
+ # =============================================================================
121
+ # LOSS FUNCTIONS
122
+ # =============================================================================
123
+
124
+ def difference_of_logits(logits: Tensor, labels: Tensor, targeted: bool = False) -> Tensor:
125
+ """
126
+ Compute the Difference of Logits (DL) loss.
127
+
128
+ DL loss is defined as:
129
+ - For untargeted: logit[true_class] - max(logit[other_classes])
130
+ - For targeted: max(logit[other_classes]) - logit[target_class]
131
+
132
+ Positive values indicate misclassification (successful attack).
133
+
134
+ Args:
135
+ logits: Model output logits, shape (batch_size, num_classes)
136
+ labels: True labels (untargeted) or target labels (targeted), shape (batch_size,)
137
+ targeted: Whether this is a targeted attack
138
+
139
+ Returns:
140
+ DL loss per sample, shape (batch_size,)
141
+ """
142
+ batch_size, num_classes = logits.shape
143
+
144
+ # Get logit values for the target/true class
145
+ target_logits = logits.gather(1, labels.unsqueeze(1)).squeeze(1)
146
+
147
+ # Create a mask for other classes
148
+ mask = torch.ones_like(logits).scatter_(1, labels.unsqueeze(1), 0.0)
149
+
150
+ # Get maximum logit among other classes
151
+ other_logits = (logits * mask + (1 - mask) * float('-inf')).max(dim=1)[0]
152
+
153
+ if targeted:
154
+ # For targeted: want other_logits > target_logits (positive when successful)
155
+ return other_logits - target_logits
156
+ else:
157
+ # For untargeted: want target_logits < other_logits (positive when successful)
158
+ return target_logits - other_logits
159
+
160
+
161
+ # =============================================================================
162
+ # MODEL UTILITIES
163
+ # =============================================================================
164
+
165
+ class NormalizeLayer(nn.Module):
166
+ """Normalization layer to be prepended to a model."""
167
+
168
+ def __init__(self, mean: Union[Tuple[float, ...], Tensor],
169
+ std: Union[Tuple[float, ...], Tensor]):
170
+ """
171
+ Initialize normalization layer.
172
+
173
+ Args:
174
+ mean: Mean values for each channel
175
+ std: Standard deviation values for each channel
176
+ """
177
+ super(NormalizeLayer, self).__init__()
178
+
179
+ if isinstance(mean, (tuple, list)):
180
+ mean = torch.tensor(mean)
181
+ if isinstance(std, (tuple, list)):
182
+ std = torch.tensor(std)
183
+
184
+ self.register_buffer('mean', mean.view(1, -1, 1, 1))
185
+ self.register_buffer('std', std.view(1, -1, 1, 1))
186
+
187
+ def forward(self, x: Tensor) -> Tensor:
188
+ """Normalize input tensor."""
189
+ return (x - self.mean) / self.std
190
+
191
+
192
+ def normalize_model(model: nn.Module,
193
+ mean: Union[Tuple[float, ...], Tensor],
194
+ std: Union[Tuple[float, ...], Tensor]) -> nn.Module:
195
+ """
196
+ Prepend a normalization layer to a model.
197
+
198
+ Creates a sequential model with normalization as the first layer,
199
+ allowing the model to accept [0, 1] normalized inputs.
200
+
201
+ Args:
202
+ model: PyTorch model to wrap
203
+ mean: Mean values for normalization (per channel)
204
+ std: Standard deviation values for normalization (per channel)
205
+
206
+ Returns:
207
+ Sequential model with normalization prepended
208
+
209
+ Example:
210
+ >>> model = resnet18()
211
+ >>> # Normalize with ImageNet stats
212
+ >>> normalized_model = normalize_model(
213
+ ... model,
214
+ ... mean=(0.485, 0.456, 0.406),
215
+ ... std=(0.229, 0.224, 0.225)
216
+ ... )
217
+ """
218
+ normalize_layer = NormalizeLayer(mean, std)
219
+ return nn.Sequential(normalize_layer, model)
220
+
221
+
222
+ # =============================================================================
223
+ # EXPORTS
224
+ # =============================================================================
225
+
226
+ __all__ = [
227
+ # Distance metrics
228
+ 'l0_distances',
229
+ 'l1_distances',
230
+ 'l2_distances',
231
+ 'linf_distances',
232
+ # Default metrics
233
+ '_default_metrics',
234
+ # Loss functions
235
+ 'difference_of_logits',
236
+ # Model utilities
237
+ 'NormalizeLayer',
238
+ 'normalize_model',
239
+ ]
@@ -0,0 +1,65 @@
1
+ # Adding an attack
2
+
3
+ To add an attack that can be called through the sacred CLI, you need to implement two functions.
4
+
5
+ ### Config function
6
+
7
+ The first function will be the named config and should follow the template:
8
+
9
+ ```python
10
+ def <library prefix>_<config name>():
11
+ name = '<name of the attack>'
12
+ source = '<name of the library>'
13
+ threat_model = '<name of the threat model>'
14
+ option1 = 0.01
15
+ ```
16
+
17
+ The library prefix corresponds to a shorter version of the library name; for instance, Foolbox's prefix is `fb`.
18
+ All other variables of the config function will correspond to the attack's option.
19
+ This function should be placed in the `<library>/configs.py` file.
20
+
21
+ ### Getter function
22
+
23
+ The second function to implemented is the getter function, which will return the attack as a callable. This getter
24
+ function should follow the template:
25
+
26
+ ```python
27
+ def get_<library prefix>_<attack name>(option1: float) -> Callable:
28
+ return ...
29
+ ```
30
+
31
+ The `<attack name>` in the name of the getter function should match exactly the name of the attack in the config
32
+ function: `name = <attack name>`. This is necessary to determine which getter function to call when calling a named
33
+ config.
34
+
35
+ ### Example
36
+
37
+ In `foolbox/configs.py`, the DDN attack is added with the two functions:
38
+
39
+ ```python
40
+ def fb_ddn():
41
+ name = 'ddn'
42
+ source = 'foolbox'
43
+ threat_model = 'l2'
44
+ init_epsilon = 1
45
+ num_steps = 100
46
+ gamma = 0.05
47
+
48
+
49
+ def get_fb_ddn(init_epsilon: float, num_steps: int, gamma: float) -> Callable:
50
+ return partial(DDNAttack, init_epsilon=init_epsilon, steps=num_steps, gamma=gamma)
51
+ ```
52
+
53
+ Additionally, one could add a second named config for DDN by simply implementing a second config function:
54
+
55
+ ```python
56
+ def fb_ddn_large_gamma():
57
+ name = 'ddn'
58
+ source = 'foolbox'
59
+ threat_model = 'l2'
60
+ init_epsilon = 1
61
+ num_steps = 100
62
+ gamma = 0.5
63
+ ```
64
+
65
+ This second named config would point to the same getter function.
@@ -0,0 +1,17 @@
1
+ minimal_search_steps = 20
2
+ minimal_init_eps = {
3
+ 'l0': 100,
4
+ 'l1': 10,
5
+ 'l2': 1,
6
+ 'linf': 1 / 255,
7
+ }
8
+
9
+ # Pre-configured attack instances — importable as `from attackbench.attacks import pgd`
10
+ from ..preconfigured import (
11
+ pgd, fgsm, apgd, fab, fmn, deepfool, superdeepfool, trust_region
12
+ )
13
+
14
+ __all__ = [
15
+ 'minimal_search_steps', 'minimal_init_eps',
16
+ 'pgd', 'fgsm', 'apgd', 'fab', 'fmn', 'deepfool', 'superdeepfool', 'trust_region',
17
+ ]
File without changes