glitchlings 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glitchlings/__init__.py +42 -0
- glitchlings/__main__.py +9 -0
- {dlc → glitchlings/dlc}/prime.py +52 -50
- glitchlings/main.py +238 -0
- glitchlings/util/__init__.py +151 -0
- {zoo → glitchlings/zoo}/__init__.py +57 -50
- {zoo → glitchlings/zoo}/core.py +190 -136
- glitchlings/zoo/jargoyle.py +225 -0
- {zoo → glitchlings/zoo}/mim1c.py +79 -62
- {zoo → glitchlings/zoo}/redactyl.py +91 -73
- {zoo → glitchlings/zoo}/reduple.py +73 -54
- {zoo → glitchlings/zoo}/rushmore.py +74 -53
- {zoo → glitchlings/zoo}/scannequin.py +140 -124
- {zoo → glitchlings/zoo}/typogre.py +231 -224
- {glitchlings-0.1.1.dist-info → glitchlings-0.1.2.dist-info}/METADATA +49 -23
- glitchlings-0.1.2.dist-info/RECORD +20 -0
- {glitchlings-0.1.1.dist-info → glitchlings-0.1.2.dist-info}/licenses/LICENSE +201 -201
- .github/workflows/publish.yml +0 -42
- .gitignore +0 -14
- LICENSE +0 -201
- MONSTER_MANUAL.md +0 -272
- PKG-INFO +0 -429
- README.md +0 -196
- __init__.py +0 -73
- glitchlings-0.1.1.dist-info/RECORD +0 -26
- main.py +0 -6
- pyproject.toml +0 -79
- util/__init__.py +0 -73
- zoo/jargoyle.py +0 -89
- {dlc → glitchlings/dlc}/__init__.py +0 -0
- {glitchlings-0.1.1.dist-info → glitchlings-0.1.2.dist-info}/WHEEL +0 -0
- {glitchlings-0.1.1.dist-info → glitchlings-0.1.2.dist-info}/entry_points.txt +0 -0
{zoo → glitchlings/zoo}/core.py
RENAMED
@@ -1,136 +1,190 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
import
|
4
|
-
from
|
5
|
-
|
6
|
-
import
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
class
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
self
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
self.
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
self.
|
96
|
-
|
97
|
-
self.
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
def
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
1
|
+
"""Core data structures used to model glitchlings and their interactions."""
|
2
|
+
|
3
|
+
from enum import IntEnum, auto
|
4
|
+
from datasets import Dataset
|
5
|
+
import random
|
6
|
+
from typing import Any, Protocol
|
7
|
+
|
8
|
+
|
9
|
+
class CorruptionCallable(Protocol):
|
10
|
+
"""Protocol describing a callable capable of corrupting text."""
|
11
|
+
|
12
|
+
def __call__(self, text: str, *args: Any, **kwargs: Any) -> str: ...
|
13
|
+
|
14
|
+
|
15
|
+
# Text levels for glitchlings, to enforce a sort order
|
16
|
+
# Work from highest level down, because e.g.
|
17
|
+
# duplicating a word then adding a typo is potentially different than
|
18
|
+
# adding a typo then duplicating a word
|
19
|
+
class AttackWave(IntEnum):
|
20
|
+
"""Granularity of text that a glitchling corrupts."""
|
21
|
+
|
22
|
+
DOCUMENT = auto()
|
23
|
+
PARAGRAPH = auto()
|
24
|
+
SENTENCE = auto()
|
25
|
+
WORD = auto()
|
26
|
+
CHARACTER = auto()
|
27
|
+
|
28
|
+
|
29
|
+
# Modifier for within the same attack wave
|
30
|
+
class AttackOrder(IntEnum):
|
31
|
+
"""Relative execution order for glitchlings within the same wave."""
|
32
|
+
|
33
|
+
FIRST = auto()
|
34
|
+
EARLY = auto()
|
35
|
+
NORMAL = auto()
|
36
|
+
LATE = auto()
|
37
|
+
LAST = auto()
|
38
|
+
|
39
|
+
|
40
|
+
class Glitchling:
|
41
|
+
"""A single text corruption agent with deterministic behaviour."""
|
42
|
+
|
43
|
+
def __init__(
|
44
|
+
self,
|
45
|
+
name: str,
|
46
|
+
corruption_function: CorruptionCallable,
|
47
|
+
scope: AttackWave,
|
48
|
+
order: AttackOrder = AttackOrder.NORMAL,
|
49
|
+
seed: int | None = None,
|
50
|
+
**kwargs: Any,
|
51
|
+
) -> None:
|
52
|
+
"""Initialize a glitchling.
|
53
|
+
|
54
|
+
Args:
|
55
|
+
name: Human readable glitchling name.
|
56
|
+
corruption_function: Callable used to transform text.
|
57
|
+
scope: Text granularity on which the glitchling operates.
|
58
|
+
order: Relative ordering within the same scope.
|
59
|
+
seed: Optional seed for deterministic random behaviour.
|
60
|
+
**kwargs: Additional parameters forwarded to the corruption callable.
|
61
|
+
"""
|
62
|
+
|
63
|
+
# Each Glitchling maintains its own RNG for deterministic yet isolated behavior.
|
64
|
+
# If no seed is supplied, we fall back to Python's default entropy.
|
65
|
+
self.seed = seed
|
66
|
+
self.rng: random.Random = random.Random(seed)
|
67
|
+
self.name: str = name
|
68
|
+
self.corruption_function: CorruptionCallable = corruption_function
|
69
|
+
self.level: AttackWave = scope
|
70
|
+
self.order: AttackOrder = order
|
71
|
+
self.kwargs: dict[str, Any] = {}
|
72
|
+
for kw, val in kwargs.items():
|
73
|
+
self.set_param(kw, val)
|
74
|
+
|
75
|
+
def set_param(self, key: str, value: Any) -> None:
|
76
|
+
"""Persist a parameter for use by the corruption callable."""
|
77
|
+
|
78
|
+
setattr(self, key, value)
|
79
|
+
self.kwargs[key] = value
|
80
|
+
|
81
|
+
def __corrupt(self, text: str, *args: Any, **kwargs: Any) -> str:
|
82
|
+
"""Execute the corruption callable, injecting the RNG when required."""
|
83
|
+
|
84
|
+
# Pass rng to underlying corruption function if it expects it.
|
85
|
+
if "rng" in self.corruption_function.__code__.co_varnames:
|
86
|
+
corrupted = self.corruption_function(text, *args, rng=self.rng, **kwargs)
|
87
|
+
else:
|
88
|
+
corrupted = self.corruption_function(text, *args, **kwargs)
|
89
|
+
return corrupted
|
90
|
+
|
91
|
+
def corrupt(self, text: str | list[dict[str, Any]]) -> str | list[dict[str, Any]]:
|
92
|
+
"""Apply the corruption function to text or conversational transcripts."""
|
93
|
+
|
94
|
+
if isinstance(text, list):
|
95
|
+
text[-1]["content"] = self.__corrupt(text[-1]["content"], **self.kwargs)
|
96
|
+
else:
|
97
|
+
text = self.__corrupt(text, **self.kwargs)
|
98
|
+
|
99
|
+
return text
|
100
|
+
|
101
|
+
def corrupt_dataset(self, dataset: Dataset, columns: list[str]) -> Dataset:
|
102
|
+
"""Apply corruption lazily across dataset columns."""
|
103
|
+
|
104
|
+
def __corrupt_row(row: dict[str, Any]) -> dict[str, Any]:
|
105
|
+
row = dict(row)
|
106
|
+
for column in columns:
|
107
|
+
row[column] = self.corrupt(row[column])
|
108
|
+
return row
|
109
|
+
|
110
|
+
return dataset.with_transform(__corrupt_row)
|
111
|
+
|
112
|
+
def __call__(self, text: str, *args: Any, **kwds: Any) -> str | list[dict[str, Any]]:
|
113
|
+
"""Allow a glitchling to be invoked directly like a callable."""
|
114
|
+
|
115
|
+
return self.corrupt(text, *args, **kwds)
|
116
|
+
|
117
|
+
def reset_rng(self, seed: int | None = None) -> None:
|
118
|
+
"""Reset the glitchling's RNG to its initial seed."""
|
119
|
+
|
120
|
+
if seed is not None:
|
121
|
+
self.seed = seed
|
122
|
+
if self.seed is not None:
|
123
|
+
self.rng = random.Random(self.seed)
|
124
|
+
|
125
|
+
def clone(self, seed: int | None = None) -> "Glitchling":
|
126
|
+
"""Create a copy of this glitchling, optionally with a new seed."""
|
127
|
+
|
128
|
+
cls = self.__class__
|
129
|
+
filtered_kwargs = {k: v for k, v in self.kwargs.items() if k != "seed"}
|
130
|
+
clone_seed = seed if seed is not None else self.seed
|
131
|
+
if clone_seed is not None:
|
132
|
+
filtered_kwargs["seed"] = clone_seed
|
133
|
+
|
134
|
+
if cls is Glitchling:
|
135
|
+
return Glitchling(
|
136
|
+
self.name,
|
137
|
+
self.corruption_function,
|
138
|
+
self.level,
|
139
|
+
self.order,
|
140
|
+
**filtered_kwargs,
|
141
|
+
)
|
142
|
+
|
143
|
+
return cls(**filtered_kwargs)
|
144
|
+
|
145
|
+
|
146
|
+
class Gaggle(Glitchling):
|
147
|
+
"""A collection of glitchlings executed in a deterministic order."""
|
148
|
+
|
149
|
+
def __init__(self, glitchlings: list[Glitchling], seed: int = 151):
|
150
|
+
"""Initialize the gaggle and derive per-glitchling RNG seeds.
|
151
|
+
|
152
|
+
Args:
|
153
|
+
glitchlings: Glitchlings to orchestrate.
|
154
|
+
seed: Master seed used to derive per-glitchling seeds.
|
155
|
+
"""
|
156
|
+
|
157
|
+
super().__init__("Gaggle", self.corrupt, AttackWave.DOCUMENT, seed=seed)
|
158
|
+
self.glitchlings: dict[AttackWave, list[Glitchling]] = {
|
159
|
+
level: [] for level in AttackWave
|
160
|
+
}
|
161
|
+
self.apply_order: list[Glitchling] = []
|
162
|
+
# Derive deterministic per-glitchling seeds from master seed if provided
|
163
|
+
for idx, g in enumerate(glitchlings):
|
164
|
+
_g = g.clone()
|
165
|
+
derived_seed = Gaggle.derive_seed(seed, _g.name, idx)
|
166
|
+
_g.reset_rng(derived_seed)
|
167
|
+
self.glitchlings[g.level].append(_g)
|
168
|
+
self.sort_glitchlings()
|
169
|
+
|
170
|
+
@staticmethod
|
171
|
+
def derive_seed(master_seed: int, glitchling_name: str, index: int) -> int:
|
172
|
+
"""Derive a deterministic seed for a glitchling based on the master seed."""
|
173
|
+
return hash((master_seed, glitchling_name, index)) & 0xFFFFFFFF
|
174
|
+
|
175
|
+
def sort_glitchlings(self) -> None:
|
176
|
+
"""Sort glitchlings by wave then order to produce application order."""
|
177
|
+
|
178
|
+
self.apply_order = [
|
179
|
+
g
|
180
|
+
for _, glitchlings in sorted(self.glitchlings.items())
|
181
|
+
for g in sorted(glitchlings, key=lambda x: (x.order, x.name))
|
182
|
+
]
|
183
|
+
|
184
|
+
def corrupt(self, text: str) -> str:
|
185
|
+
"""Apply each glitchling to the provided text sequentially."""
|
186
|
+
|
187
|
+
corrupted = text
|
188
|
+
for glitchling in self.apply_order:
|
189
|
+
corrupted = glitchling(corrupted)
|
190
|
+
return corrupted
|
@@ -0,0 +1,225 @@
|
|
1
|
+
import random
|
2
|
+
import re
|
3
|
+
from collections.abc import Iterable
|
4
|
+
from dataclasses import dataclass
|
5
|
+
from typing import Any, Literal, cast
|
6
|
+
|
7
|
+
import nltk
|
8
|
+
from nltk.corpus import wordnet as wn
|
9
|
+
from .core import Glitchling, AttackWave
|
10
|
+
|
11
|
+
_wordnet_ready = False
|
12
|
+
|
13
|
+
|
14
|
+
def _ensure_wordnet() -> None:
|
15
|
+
"""Ensure the WordNet corpus is available before use."""
|
16
|
+
|
17
|
+
global _wordnet_ready
|
18
|
+
if _wordnet_ready:
|
19
|
+
return
|
20
|
+
|
21
|
+
try:
|
22
|
+
wn.ensure_loaded()
|
23
|
+
except LookupError:
|
24
|
+
nltk.download("wordnet", quiet=True)
|
25
|
+
try:
|
26
|
+
wn.ensure_loaded()
|
27
|
+
except LookupError as exc: # pragma: no cover - only triggered when download fails
|
28
|
+
raise RuntimeError(
|
29
|
+
"Unable to load NLTK WordNet corpus for the jargoyle glitchling."
|
30
|
+
) from exc
|
31
|
+
|
32
|
+
_wordnet_ready = True
|
33
|
+
|
34
|
+
|
35
|
+
PartOfSpeech = Literal["n", "v", "a", "r"]
|
36
|
+
PartOfSpeechInput = PartOfSpeech | Iterable[PartOfSpeech] | Literal["any"]
|
37
|
+
NormalizedPartsOfSpeech = tuple[PartOfSpeech, ...]
|
38
|
+
|
39
|
+
_VALID_POS: tuple[PartOfSpeech, ...] = ("n", "v", "a", "r")
|
40
|
+
|
41
|
+
|
42
|
+
def _split_token(token: str) -> tuple[str, str, str]:
|
43
|
+
"""Split a token into leading punctuation, core word, and trailing punctuation."""
|
44
|
+
|
45
|
+
match = re.match(r"^(\W*)(.*?)(\W*)$", token)
|
46
|
+
if not match:
|
47
|
+
return "", token, ""
|
48
|
+
prefix, core, suffix = match.groups()
|
49
|
+
return prefix, core, suffix
|
50
|
+
|
51
|
+
|
52
|
+
def _normalize_parts_of_speech(part_of_speech: PartOfSpeechInput) -> NormalizedPartsOfSpeech:
|
53
|
+
"""Coerce user input into a tuple of valid WordNet POS tags."""
|
54
|
+
|
55
|
+
if isinstance(part_of_speech, str):
|
56
|
+
lowered = part_of_speech.lower()
|
57
|
+
if lowered == "any":
|
58
|
+
return _VALID_POS
|
59
|
+
if lowered not in _VALID_POS:
|
60
|
+
raise ValueError(
|
61
|
+
"part_of_speech must be one of 'n', 'v', 'a', 'r', or 'any'"
|
62
|
+
)
|
63
|
+
return (cast(PartOfSpeech, lowered),)
|
64
|
+
|
65
|
+
normalized: list[PartOfSpeech] = []
|
66
|
+
for pos in part_of_speech:
|
67
|
+
if pos not in _VALID_POS:
|
68
|
+
raise ValueError(
|
69
|
+
"part_of_speech entries must be one of 'n', 'v', 'a', or 'r'"
|
70
|
+
)
|
71
|
+
if pos not in normalized:
|
72
|
+
normalized.append(pos)
|
73
|
+
if not normalized:
|
74
|
+
raise ValueError("part_of_speech iterable may not be empty")
|
75
|
+
return tuple(normalized)
|
76
|
+
|
77
|
+
|
78
|
+
@dataclass(frozen=True)
|
79
|
+
class CandidateInfo:
|
80
|
+
"""Metadata for a candidate token that may be replaced."""
|
81
|
+
|
82
|
+
prefix: str
|
83
|
+
core_word: str
|
84
|
+
suffix: str
|
85
|
+
parts_of_speech: NormalizedPartsOfSpeech
|
86
|
+
|
87
|
+
|
88
|
+
def _collect_synonyms(
|
89
|
+
word: str, parts_of_speech: NormalizedPartsOfSpeech
|
90
|
+
) -> list[str]:
|
91
|
+
"""Gather deterministic synonym candidates for the supplied word."""
|
92
|
+
|
93
|
+
normalized_word = word.lower()
|
94
|
+
synonyms: set[str] = set()
|
95
|
+
for pos_tag in parts_of_speech:
|
96
|
+
synsets = wn.synsets(word, pos=pos_tag)
|
97
|
+
if not synsets:
|
98
|
+
continue
|
99
|
+
|
100
|
+
for synset in synsets:
|
101
|
+
lemmas_list = [lemma.name() for lemma in cast(Any, synset).lemmas()]
|
102
|
+
if not lemmas_list:
|
103
|
+
continue
|
104
|
+
|
105
|
+
filtered = []
|
106
|
+
for lemma_str in lemmas_list:
|
107
|
+
cleaned = lemma_str.replace("_", " ")
|
108
|
+
if cleaned.lower() != normalized_word:
|
109
|
+
filtered.append(cleaned)
|
110
|
+
|
111
|
+
if filtered:
|
112
|
+
synonyms.update(filtered)
|
113
|
+
break
|
114
|
+
|
115
|
+
if synonyms:
|
116
|
+
break
|
117
|
+
|
118
|
+
return sorted(synonyms)
|
119
|
+
|
120
|
+
|
121
|
+
def substitute_random_synonyms(
|
122
|
+
text: str,
|
123
|
+
replacement_rate: float = 0.1,
|
124
|
+
part_of_speech: PartOfSpeechInput = "n",
|
125
|
+
seed: int | None = None,
|
126
|
+
rng: random.Random | None = None,
|
127
|
+
) -> str:
|
128
|
+
"""Replace words with random WordNet synonyms.
|
129
|
+
|
130
|
+
Parameters
|
131
|
+
- text: Input text.
|
132
|
+
- replacement_rate: Max proportion of candidate words to replace (default 0.1).
|
133
|
+
- part_of_speech: WordNet POS tag(s) to target. Accepts "n", "v", "a", "r",
|
134
|
+
any iterable of those tags, or "any" to include all four.
|
135
|
+
- rng: Optional RNG instance used for deterministic sampling.
|
136
|
+
- seed: Optional seed if `rng` not provided.
|
137
|
+
|
138
|
+
Determinism
|
139
|
+
- Candidates collected in left-to-right order; no set() reordering.
|
140
|
+
- Replacement positions chosen via rng.sample.
|
141
|
+
- Synonyms sorted before rng.choice to fix ordering.
|
142
|
+
- For each POS, the first synset containing alternate lemmas is used for stability.
|
143
|
+
"""
|
144
|
+
_ensure_wordnet()
|
145
|
+
|
146
|
+
active_rng: random.Random
|
147
|
+
if rng is not None:
|
148
|
+
active_rng = rng
|
149
|
+
else:
|
150
|
+
active_rng = random.Random(seed)
|
151
|
+
|
152
|
+
target_pos = _normalize_parts_of_speech(part_of_speech)
|
153
|
+
|
154
|
+
# Split but keep whitespace separators so we can rebuild easily
|
155
|
+
tokens = re.split(r"(\s+)", text)
|
156
|
+
|
157
|
+
# Collect indices of candidate tokens (even positions 0,2,.. are words given our split design)
|
158
|
+
candidate_indices: list[int] = []
|
159
|
+
candidate_metadata: dict[int, CandidateInfo] = {}
|
160
|
+
for idx, tok in enumerate(tokens):
|
161
|
+
if idx % 2 == 0 and tok and not tok.isspace():
|
162
|
+
prefix, core_word, suffix = _split_token(tok)
|
163
|
+
if not core_word:
|
164
|
+
continue
|
165
|
+
|
166
|
+
available_pos: NormalizedPartsOfSpeech = tuple(
|
167
|
+
pos for pos in target_pos if wn.synsets(core_word, pos=pos)
|
168
|
+
)
|
169
|
+
if available_pos:
|
170
|
+
candidate_indices.append(idx)
|
171
|
+
candidate_metadata[idx] = CandidateInfo(
|
172
|
+
prefix=prefix,
|
173
|
+
core_word=core_word,
|
174
|
+
suffix=suffix,
|
175
|
+
parts_of_speech=available_pos,
|
176
|
+
)
|
177
|
+
|
178
|
+
if not candidate_indices:
|
179
|
+
return text
|
180
|
+
|
181
|
+
max_replacements = int(len(candidate_indices) * replacement_rate)
|
182
|
+
if max_replacements <= 0:
|
183
|
+
return text
|
184
|
+
|
185
|
+
# Choose which positions to replace deterministically via rng.sample
|
186
|
+
replace_positions = active_rng.sample(candidate_indices, k=max_replacements)
|
187
|
+
# Process in ascending order to avoid affecting later indices
|
188
|
+
replace_positions.sort()
|
189
|
+
|
190
|
+
for pos in replace_positions:
|
191
|
+
metadata = candidate_metadata[pos]
|
192
|
+
synonyms = _collect_synonyms(metadata.core_word, metadata.parts_of_speech)
|
193
|
+
if not synonyms:
|
194
|
+
continue
|
195
|
+
|
196
|
+
replacement = active_rng.choice(synonyms)
|
197
|
+
tokens[pos] = f"{metadata.prefix}{replacement}{metadata.suffix}"
|
198
|
+
|
199
|
+
return "".join(tokens)
|
200
|
+
|
201
|
+
|
202
|
+
class Jargoyle(Glitchling):
|
203
|
+
"""Glitchling that swaps words with random WordNet synonyms."""
|
204
|
+
|
205
|
+
def __init__(
|
206
|
+
self,
|
207
|
+
*,
|
208
|
+
replacement_rate: float = 0.1,
|
209
|
+
part_of_speech: PartOfSpeechInput = "n",
|
210
|
+
seed: int | None = None,
|
211
|
+
) -> None:
|
212
|
+
super().__init__(
|
213
|
+
name="Jargoyle",
|
214
|
+
corruption_function=substitute_random_synonyms,
|
215
|
+
scope=AttackWave.WORD,
|
216
|
+
seed=seed,
|
217
|
+
replacement_rate=replacement_rate,
|
218
|
+
part_of_speech=part_of_speech,
|
219
|
+
)
|
220
|
+
|
221
|
+
|
222
|
+
jargoyle = Jargoyle()
|
223
|
+
|
224
|
+
|
225
|
+
__all__ = ["Jargoyle", "jargoyle"]
|