mapFolding 0.3.5__tar.gz → 0.3.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mapfolding-0.3.5 → mapfolding-0.3.7}/PKG-INFO +4 -2
- mapfolding-0.3.7/mapFolding/citations/updateCitation.py +238 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/mapFolding.egg-info/PKG-INFO +4 -2
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/mapFolding.egg-info/SOURCES.txt +3 -4
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/mapFolding.egg-info/requires.txt +2 -1
- mapfolding-0.3.7/mapFolding/someAssemblyRequired/__init__.py +1 -0
- mapfolding-0.3.7/mapFolding/someAssemblyRequired/makeJob.py +34 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/someAssemblyRequired/synthesizeModuleJob.py +30 -42
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/someAssemblyRequired/synthesizeModules.py +15 -43
- mapfolding-0.3.5/mapFolding/syntheticModules/countInitialize.py → mapfolding-0.3.7/mapFolding/syntheticModules/Initialize.py +4 -3
- mapfolding-0.3.5/mapFolding/syntheticModules/countParallel.py → mapfolding-0.3.7/mapFolding/syntheticModules/Parallel.py +5 -4
- mapfolding-0.3.5/mapFolding/syntheticModules/countSequential.py → mapfolding-0.3.7/mapFolding/syntheticModules/Sequential.py +5 -4
- mapfolding-0.3.7/mapFolding/syntheticModules/__init__.py +4 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/pyproject.toml +6 -4
- mapfolding-0.3.5/mapFolding/citations/updateCitation.py +0 -67
- mapfolding-0.3.5/mapFolding/someAssemblyRequired/__init__.py +0 -2
- mapfolding-0.3.5/mapFolding/someAssemblyRequired/generalizeSourceCode.py +0 -122
- mapfolding-0.3.5/mapFolding/someAssemblyRequired/makeJob.py +0 -21
- mapfolding-0.3.5/mapFolding/syntheticModules/__init__.py +0 -3
- {mapfolding-0.3.5 → mapfolding-0.3.7}/README.md +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/benchmarks/benchmarking.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/mapFolding.egg-info/dependency_links.txt +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/mapFolding.egg-info/entry_points.txt +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/mapFolding.egg-info/top_level.txt +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/flattened.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/hunterNumba.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/irvineJavaPort.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/jax.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/lunnan.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/lunnanNumpy.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/lunnanWhile.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/rotatedEntryPoint.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/reference/total_countPlus1vsPlusN.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/someAssemblyRequired/getLLVMforNoReason.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/setup.cfg +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/tests/test_oeis.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/tests/test_other.py +0 -0
- {mapfolding-0.3.5 → mapfolding-0.3.7}/tests/test_tasks.py +0 -0
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: mapFolding
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.7
|
|
4
4
|
Summary: Count distinct ways to fold a map (or a strip of stamps)
|
|
5
5
|
Author-email: Hunter Hogan <HunterHogan@pm.me>
|
|
6
6
|
License: CC-BY-NC-4.0
|
|
7
7
|
Project-URL: Homepage, https://github.com/hunterhogan/mapFolding
|
|
8
8
|
Project-URL: Donate, https://www.patreon.com/integrated
|
|
9
|
+
Project-URL: Repository, https://github.com/hunterhogan/mapFolding.git
|
|
9
10
|
Keywords: A001415,A001416,A001417,A001418,A195646,folding,map folding,OEIS,stamp folding
|
|
10
11
|
Classifier: Development Status :: 5 - Production/Stable
|
|
11
12
|
Classifier: Environment :: Console
|
|
@@ -29,12 +30,13 @@ Requires-Dist: jupyter; extra == "benchmark"
|
|
|
29
30
|
Requires-Dist: pandas; extra == "benchmark"
|
|
30
31
|
Requires-Dist: tqdm; extra == "benchmark"
|
|
31
32
|
Provides-Extra: testing
|
|
33
|
+
Requires-Dist: attrs; extra == "testing"
|
|
32
34
|
Requires-Dist: cffconvert; extra == "testing"
|
|
33
35
|
Requires-Dist: more_itertools; extra == "testing"
|
|
34
|
-
Requires-Dist: pytest; extra == "testing"
|
|
35
36
|
Requires-Dist: pytest-cov; extra == "testing"
|
|
36
37
|
Requires-Dist: pytest-env; extra == "testing"
|
|
37
38
|
Requires-Dist: pytest-xdist; extra == "testing"
|
|
39
|
+
Requires-Dist: pytest; extra == "testing"
|
|
38
40
|
Requires-Dist: python_minifier; extra == "testing"
|
|
39
41
|
Requires-Dist: tomli; extra == "testing"
|
|
40
42
|
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
from cffconvert.cli.create_citation import create_citation
|
|
2
|
+
from packaging.metadata import Metadata as PyPAMetadata
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
import attrs
|
|
5
|
+
import cffconvert
|
|
6
|
+
import tempfile
|
|
7
|
+
import packaging
|
|
8
|
+
import packaging.metadata
|
|
9
|
+
import packaging.utils
|
|
10
|
+
import packaging.version
|
|
11
|
+
import pathlib
|
|
12
|
+
import ruamel.yaml
|
|
13
|
+
import tomli
|
|
14
|
+
|
|
15
|
+
listProjectURLsTarget: List[str] = ["homepage", "license", "repository"]
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
Tentative plan:
|
|
19
|
+
- Commit and push to GitHub
|
|
20
|
+
- GitHub Action gathers information from the sources of truth
|
|
21
|
+
- If the citation needs to be updated, write to both
|
|
22
|
+
- pathFilenameCitationSSOT
|
|
23
|
+
- pathFilenameCitationDOTcffRepo
|
|
24
|
+
- Commit and push to GitHub
|
|
25
|
+
- this complicates things
|
|
26
|
+
- I want the updated citation to be in the `commit` field of itself
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
@attrs.define
|
|
30
|
+
class CitationNexus:
|
|
31
|
+
"""
|
|
32
|
+
- one-to-one correlation with `cffconvert.lib.cff_1_2_x.citation` class Citation_1_2_x.cffobj
|
|
33
|
+
"""
|
|
34
|
+
cffDASHversion: str # pathFilenameCitationSSOT
|
|
35
|
+
message: str # pathFilenameCitationSSOT
|
|
36
|
+
|
|
37
|
+
abstract: str | None = None # pathFilenameCitationSSOT
|
|
38
|
+
authors: list[dict[str,str]] = attrs.field(factory=list) # pathFilenamePackageSSOT; pyproject.toml authors
|
|
39
|
+
commit: str | None = None # workflows['Make GitHub Release']
|
|
40
|
+
contact: list[dict[str,str]] = attrs.field(factory=list) # pathFilenamePackageSSOT; pyproject.toml maintainers
|
|
41
|
+
dateDASHreleased: str | None = None # workflows['Make GitHub Release']
|
|
42
|
+
doi: str | None = None # pathFilenameCitationSSOT
|
|
43
|
+
identifiers: list[str] = attrs.field(factory=list) # workflows['Make GitHub Release']
|
|
44
|
+
keywords: list[str] = attrs.field(factory=list) # pathFilenamePackageSSOT; packaging.metadata.Metadata.keywords
|
|
45
|
+
license: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.license_expression
|
|
46
|
+
licenseDASHurl: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.project_urls: license or pyproject.toml urls license
|
|
47
|
+
preferredDASHcitation: str | None = None # pathFilenameCitationSSOT
|
|
48
|
+
references: list[str] = attrs.field(factory=list) # bibtex files in pathCitationSSOT. Conversion method and timing TBD.
|
|
49
|
+
repositoryDASHartifact: str | None = None # (https://pypi.org/pypi/{package_name}/json').json()['releases']
|
|
50
|
+
repositoryDASHcode: str | None = None # workflows['Make GitHub Release']
|
|
51
|
+
repository: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.project_urls: repository
|
|
52
|
+
title: str | None = None # pathFilenamePackageSSOT; pyproject.toml name (packaging normalizes the names)
|
|
53
|
+
type: str | None = None # pathFilenameCitationSSOT
|
|
54
|
+
url: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.project_urls: homepage
|
|
55
|
+
version: str | None = None # pathFilenamePackageSSOT; packaging.metadata.Metadata.version
|
|
56
|
+
|
|
57
|
+
def setInStone(self, prophet: str) -> "CitationNexus":
|
|
58
|
+
match prophet:
|
|
59
|
+
case "Citation":
|
|
60
|
+
pass
|
|
61
|
+
# "freeze" these items
|
|
62
|
+
# setattr(self.cffDASHversion, 'type', Final[str])
|
|
63
|
+
# setattr(self.doi, 'type', Final[str])
|
|
64
|
+
# cffDASHversion: str # pathFilenameCitationSSOT
|
|
65
|
+
# message: str # pathFilenameCitationSSOT
|
|
66
|
+
# abstract: str | None = None # pathFilenameCitationSSOT
|
|
67
|
+
# doi: str | None = None # pathFilenameCitationSSOT
|
|
68
|
+
# preferredDASHcitation: str | None = None # pathFilenameCitationSSOT
|
|
69
|
+
# type: str | None = None # pathFilenameCitationSSOT
|
|
70
|
+
case "PyPA":
|
|
71
|
+
pass
|
|
72
|
+
# "freeze" these items
|
|
73
|
+
# setattr(self.keywords, 'type', Final[list[str]])
|
|
74
|
+
# setattr(self.license, 'type', Final[str])
|
|
75
|
+
# setattr(self.licenseDASHurl, 'type', Final[str])
|
|
76
|
+
# setattr(self.repository, 'type', Final[str])
|
|
77
|
+
# setattr(self.url, 'type', Final[str])
|
|
78
|
+
# setattr(self.version, 'type', Final[str])
|
|
79
|
+
case "pyprojectDOTtoml":
|
|
80
|
+
pass
|
|
81
|
+
# "freeze" these items
|
|
82
|
+
# setattr(self.authors, 'type', Final[list[dict[str,str]]])
|
|
83
|
+
# setattr(self.contact, 'type', Final[list[dict[str,str]]])
|
|
84
|
+
# setattr(self.title, 'type', Final[str])
|
|
85
|
+
return self
|
|
86
|
+
|
|
87
|
+
def getNexusCitation(pathFilenameCitationSSOT: pathlib.Path) -> CitationNexus:
|
|
88
|
+
|
|
89
|
+
# `cffconvert.cli.create_citation.create_citation()` is PAINFULLY mundane, but a major problem
|
|
90
|
+
# in the CFF ecosystem is divergence. Therefore, I will use this function so that my code
|
|
91
|
+
# converges with the CFF ecosystem.
|
|
92
|
+
citationObject: cffconvert.Citation = create_citation(infile=pathFilenameCitationSSOT, url=None)
|
|
93
|
+
# `._parse()` is a yaml loader: use it for convergence
|
|
94
|
+
cffobj: Dict[Any, Any] = citationObject._parse()
|
|
95
|
+
|
|
96
|
+
nexusCitation = CitationNexus(
|
|
97
|
+
cffDASHversion=cffobj["cff-version"],
|
|
98
|
+
message=cffobj["message"],
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
Z0Z_list: List[attrs.Attribute] = list(attrs.fields(type(nexusCitation)))
|
|
102
|
+
for Z0Z_field in Z0Z_list:
|
|
103
|
+
cffobjKeyName: str = Z0Z_field.name.replace("DASH", "-")
|
|
104
|
+
cffobjValue = cffobj.get(cffobjKeyName)
|
|
105
|
+
if cffobjValue: # An empty list will be False
|
|
106
|
+
setattr(nexusCitation, Z0Z_field.name, cffobjValue)
|
|
107
|
+
|
|
108
|
+
nexusCitation = nexusCitation.setInStone("Citation")
|
|
109
|
+
return nexusCitation
|
|
110
|
+
|
|
111
|
+
def getPypaMetadata(packageData: Dict[str, Any]) -> PyPAMetadata:
|
|
112
|
+
"""
|
|
113
|
+
Create a PyPA metadata object (version 2.4) from packageData.
|
|
114
|
+
https://packaging.python.org/en/latest/specifications/core-metadata/
|
|
115
|
+
"""
|
|
116
|
+
dictionaryProjectURLs: Dict[str, str] = {}
|
|
117
|
+
for urlName, url in packageData.get("urls", {}).items():
|
|
118
|
+
urlName = urlName.lower()
|
|
119
|
+
if urlName in listProjectURLsTarget:
|
|
120
|
+
dictionaryProjectURLs[urlName] = url
|
|
121
|
+
|
|
122
|
+
metadataRaw = packaging.metadata.RawMetadata(
|
|
123
|
+
keywords=packageData.get("keywords", []),
|
|
124
|
+
license_expression=packageData.get("license", {}).get("text", ""),
|
|
125
|
+
metadata_version="2.4",
|
|
126
|
+
name=packaging.utils.canonicalize_name(packageData.get("name", None), validate=True), # packaging.metadata.InvalidMetadata: 'name' is a required field
|
|
127
|
+
project_urls=dictionaryProjectURLs,
|
|
128
|
+
version=packageData.get("version", None),
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
metadata = PyPAMetadata().from_raw(metadataRaw)
|
|
132
|
+
return metadata
|
|
133
|
+
|
|
134
|
+
def addPypaMetadata(nexusCitation: CitationNexus, metadata: PyPAMetadata) -> CitationNexus:
|
|
135
|
+
if not metadata.name:
|
|
136
|
+
raise ValueError("Metadata name is required.")
|
|
137
|
+
|
|
138
|
+
nexusCitation.title = metadata.name
|
|
139
|
+
if metadata.version: nexusCitation.version = str(metadata.version)
|
|
140
|
+
if metadata.keywords: nexusCitation.keywords = metadata.keywords
|
|
141
|
+
if metadata.license_expression: nexusCitation.license = metadata.license_expression
|
|
142
|
+
|
|
143
|
+
Z0Z_lookup: Dict[str, str] = {
|
|
144
|
+
"homepage": "url",
|
|
145
|
+
"license": "licenseDASHurl",
|
|
146
|
+
"repository": "repository",
|
|
147
|
+
}
|
|
148
|
+
if metadata.project_urls:
|
|
149
|
+
for urlTarget in listProjectURLsTarget:
|
|
150
|
+
url = metadata.project_urls.get(urlTarget, None)
|
|
151
|
+
if url:
|
|
152
|
+
setattr(nexusCitation, Z0Z_lookup[urlTarget], url)
|
|
153
|
+
|
|
154
|
+
nexusCitation = nexusCitation.setInStone("PyPA")
|
|
155
|
+
return nexusCitation
|
|
156
|
+
|
|
157
|
+
def add_pyprojectDOTtoml(nexusCitation: CitationNexus, packageData: Dict[str, Any]) -> CitationNexus:
|
|
158
|
+
def Z0Z_ImaNotValidatingNoNames(person: Dict[str, str]) -> Dict[str, str]:
|
|
159
|
+
cffPerson: Dict[str, str] = {}
|
|
160
|
+
if person.get('name', None):
|
|
161
|
+
cffPerson['given-names'], cffPerson['family-names'] = person['name'].split(' ', 1)
|
|
162
|
+
if person.get('email', None):
|
|
163
|
+
cffPerson['email'] = person['email']
|
|
164
|
+
return cffPerson
|
|
165
|
+
listAuthors = packageData.get("authors", None)
|
|
166
|
+
if not listAuthors:
|
|
167
|
+
raise ValueError("Authors are required.")
|
|
168
|
+
else:
|
|
169
|
+
listPersons = []
|
|
170
|
+
for person in listAuthors:
|
|
171
|
+
listPersons.append(Z0Z_ImaNotValidatingNoNames(person))
|
|
172
|
+
nexusCitation.authors = listPersons
|
|
173
|
+
if packageData.get("maintainers", None):
|
|
174
|
+
listPersons = []
|
|
175
|
+
for person in packageData["maintainers"]:
|
|
176
|
+
listPersons.append(Z0Z_ImaNotValidatingNoNames(person))
|
|
177
|
+
nexusCitation.contact = listPersons
|
|
178
|
+
nexusCitation.title = packageData["name"]
|
|
179
|
+
nexusCitation = nexusCitation.setInStone("pyprojectDOTtoml")
|
|
180
|
+
return nexusCitation
|
|
181
|
+
|
|
182
|
+
def writeCitation(nexusCitation: CitationNexus, pathFilenameCitationSSOT: pathlib.Path, pathFilenameCitationDOTcffRepo: pathlib.Path):
|
|
183
|
+
# NOTE embarrassingly hacky process to follow
|
|
184
|
+
parameterIndent= 2
|
|
185
|
+
parameterLineWidth = 60
|
|
186
|
+
yamlWorkhorse = ruamel.yaml.YAML()
|
|
187
|
+
|
|
188
|
+
def srsly(Z0Z_filed, Z0Z_value):
|
|
189
|
+
if Z0Z_value: # empty lists
|
|
190
|
+
return True
|
|
191
|
+
else:
|
|
192
|
+
return False
|
|
193
|
+
|
|
194
|
+
dictionaryCitation = attrs.asdict(nexusCitation, filter=srsly)
|
|
195
|
+
for keyName in list(dictionaryCitation.keys()):
|
|
196
|
+
dictionaryCitation[keyName.replace("DASH", "-")] = dictionaryCitation.pop(keyName)
|
|
197
|
+
|
|
198
|
+
pathFilenameForValidation = pathlib.Path(tempfile.mktemp())
|
|
199
|
+
|
|
200
|
+
def writeStream(pathFilename):
|
|
201
|
+
with open(pathFilename, 'w') as pathlibIsAStealthContextManagerThatRuamelCannotDetectAndRefusesToWorkWith:
|
|
202
|
+
yamlWorkhorse.dump(dictionaryCitation, pathlibIsAStealthContextManagerThatRuamelCannotDetectAndRefusesToWorkWith)
|
|
203
|
+
|
|
204
|
+
writeStream(pathFilenameForValidation)
|
|
205
|
+
|
|
206
|
+
citationObject: cffconvert.Citation = create_citation(infile=pathFilenameForValidation, url=None)
|
|
207
|
+
if citationObject.validate(verbose=True) is None:
|
|
208
|
+
writeStream(pathFilenameCitationSSOT)
|
|
209
|
+
writeStream(pathFilenameCitationDOTcffRepo)
|
|
210
|
+
|
|
211
|
+
def logistics():
|
|
212
|
+
# Prefer reliable, dynamic values over hardcoded ones
|
|
213
|
+
packageNameHARDCODED: str = 'mapFolding'
|
|
214
|
+
|
|
215
|
+
packageName: str = packageNameHARDCODED
|
|
216
|
+
pathRepoRoot = pathlib.Path(__file__).parent.parent.parent
|
|
217
|
+
pathFilenamePackageSSOT = pathRepoRoot / 'pyproject.toml'
|
|
218
|
+
filenameGitHubAction = 'updateCitation.yml'
|
|
219
|
+
pathFilenameGitHubAction = pathRepoRoot / '.github' / 'workflows' / filenameGitHubAction
|
|
220
|
+
|
|
221
|
+
filenameCitationDOTcff = 'CITATION.cff'
|
|
222
|
+
pathCitations = pathRepoRoot / packageName / 'citations'
|
|
223
|
+
pathFilenameCitationSSOT = pathCitations / filenameCitationDOTcff
|
|
224
|
+
pathFilenameCitationDOTcffRepo = pathRepoRoot / filenameCitationDOTcff
|
|
225
|
+
|
|
226
|
+
nexusCitation = getNexusCitation(pathFilenameCitationSSOT)
|
|
227
|
+
|
|
228
|
+
tomlPackageData: Dict[str, Any] = tomli.loads(pathFilenamePackageSSOT.read_text())['project']
|
|
229
|
+
# https://packaging.python.org/en/latest/specifications/pyproject-toml/
|
|
230
|
+
pypaMetadata: PyPAMetadata = getPypaMetadata(tomlPackageData)
|
|
231
|
+
|
|
232
|
+
nexusCitation = addPypaMetadata(nexusCitation, pypaMetadata)
|
|
233
|
+
nexusCitation = add_pyprojectDOTtoml(nexusCitation, tomlPackageData)
|
|
234
|
+
|
|
235
|
+
writeCitation(nexusCitation, pathFilenameCitationSSOT, pathFilenameCitationDOTcffRepo)
|
|
236
|
+
|
|
237
|
+
if __name__ == '__main__':
|
|
238
|
+
logistics()
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: mapFolding
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.7
|
|
4
4
|
Summary: Count distinct ways to fold a map (or a strip of stamps)
|
|
5
5
|
Author-email: Hunter Hogan <HunterHogan@pm.me>
|
|
6
6
|
License: CC-BY-NC-4.0
|
|
7
7
|
Project-URL: Homepage, https://github.com/hunterhogan/mapFolding
|
|
8
8
|
Project-URL: Donate, https://www.patreon.com/integrated
|
|
9
|
+
Project-URL: Repository, https://github.com/hunterhogan/mapFolding.git
|
|
9
10
|
Keywords: A001415,A001416,A001417,A001418,A195646,folding,map folding,OEIS,stamp folding
|
|
10
11
|
Classifier: Development Status :: 5 - Production/Stable
|
|
11
12
|
Classifier: Environment :: Console
|
|
@@ -29,12 +30,13 @@ Requires-Dist: jupyter; extra == "benchmark"
|
|
|
29
30
|
Requires-Dist: pandas; extra == "benchmark"
|
|
30
31
|
Requires-Dist: tqdm; extra == "benchmark"
|
|
31
32
|
Provides-Extra: testing
|
|
33
|
+
Requires-Dist: attrs; extra == "testing"
|
|
32
34
|
Requires-Dist: cffconvert; extra == "testing"
|
|
33
35
|
Requires-Dist: more_itertools; extra == "testing"
|
|
34
|
-
Requires-Dist: pytest; extra == "testing"
|
|
35
36
|
Requires-Dist: pytest-cov; extra == "testing"
|
|
36
37
|
Requires-Dist: pytest-env; extra == "testing"
|
|
37
38
|
Requires-Dist: pytest-xdist; extra == "testing"
|
|
39
|
+
Requires-Dist: pytest; extra == "testing"
|
|
38
40
|
Requires-Dist: python_minifier; extra == "testing"
|
|
39
41
|
Requires-Dist: tomli; extra == "testing"
|
|
40
42
|
|
|
@@ -18,15 +18,14 @@ mapFolding/reference/lunnanWhile.py
|
|
|
18
18
|
mapFolding/reference/rotatedEntryPoint.py
|
|
19
19
|
mapFolding/reference/total_countPlus1vsPlusN.py
|
|
20
20
|
mapFolding/someAssemblyRequired/__init__.py
|
|
21
|
-
mapFolding/someAssemblyRequired/generalizeSourceCode.py
|
|
22
21
|
mapFolding/someAssemblyRequired/getLLVMforNoReason.py
|
|
23
22
|
mapFolding/someAssemblyRequired/makeJob.py
|
|
24
23
|
mapFolding/someAssemblyRequired/synthesizeModuleJob.py
|
|
25
24
|
mapFolding/someAssemblyRequired/synthesizeModules.py
|
|
25
|
+
mapFolding/syntheticModules/Initialize.py
|
|
26
|
+
mapFolding/syntheticModules/Parallel.py
|
|
27
|
+
mapFolding/syntheticModules/Sequential.py
|
|
26
28
|
mapFolding/syntheticModules/__init__.py
|
|
27
|
-
mapFolding/syntheticModules/countInitialize.py
|
|
28
|
-
mapFolding/syntheticModules/countParallel.py
|
|
29
|
-
mapFolding/syntheticModules/countSequential.py
|
|
30
29
|
tests/test_oeis.py
|
|
31
30
|
tests/test_other.py
|
|
32
31
|
tests/test_tasks.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .makeJob import makeStateJob
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from mapFolding import getPathFilenameFoldsTotal, computationState
|
|
2
|
+
from mapFolding import outfitCountFolds
|
|
3
|
+
from typing import Any, Literal, Optional, Sequence, Type, overload
|
|
4
|
+
import pathlib
|
|
5
|
+
import pickle
|
|
6
|
+
|
|
7
|
+
@overload
|
|
8
|
+
def makeStateJob(listDimensions: Sequence[int], writeJob: Literal[True] = True
|
|
9
|
+
, **keywordArguments: Optional[Type[Any]]) -> pathlib.Path:
|
|
10
|
+
...
|
|
11
|
+
|
|
12
|
+
@overload
|
|
13
|
+
def makeStateJob(listDimensions: Sequence[int], writeJob: Literal[False] = False
|
|
14
|
+
, **keywordArguments: Optional[Type[Any]]) -> computationState:
|
|
15
|
+
...
|
|
16
|
+
|
|
17
|
+
def makeStateJob(listDimensions: Sequence[int], writeJob: bool = True, **keywordArguments: Optional[Type[Any]]) -> computationState | pathlib.Path:
|
|
18
|
+
|
|
19
|
+
stateUniversal: computationState = outfitCountFolds(listDimensions, computationDivisions=None, CPUlimit=None, **keywordArguments)
|
|
20
|
+
|
|
21
|
+
from mapFolding.syntheticModules import countInitialize
|
|
22
|
+
countInitialize(stateUniversal['connectionGraph'], stateUniversal['gapsWhere'], stateUniversal['my'], stateUniversal['track'])
|
|
23
|
+
|
|
24
|
+
if not writeJob:
|
|
25
|
+
return stateUniversal
|
|
26
|
+
|
|
27
|
+
pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal['mapShape'])
|
|
28
|
+
suffix = pathFilenameChopChop.suffix
|
|
29
|
+
pathJob = pathlib.Path(str(pathFilenameChopChop)[0:-len(suffix)])
|
|
30
|
+
pathJob.mkdir(parents=True, exist_ok=True)
|
|
31
|
+
pathFilenameJob = pathJob / 'stateJob.pkl'
|
|
32
|
+
|
|
33
|
+
pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
|
|
34
|
+
return pathFilenameJob
|
{mapfolding-0.3.5 → mapfolding-0.3.7}/mapFolding/someAssemblyRequired/synthesizeModuleJob.py
RENAMED
|
@@ -1,15 +1,13 @@
|
|
|
1
|
-
from mapFolding import getPathFilenameFoldsTotal
|
|
1
|
+
from mapFolding import getPathFilenameFoldsTotal, indexMy, indexTrack
|
|
2
2
|
from mapFolding import make_dtype, datatypeLargeDEFAULT, datatypeMediumDEFAULT, datatypeSmallDEFAULT, datatypeModuleDEFAULT
|
|
3
|
-
from mapFolding import computationState
|
|
4
3
|
from someAssemblyRequired import makeStateJob
|
|
5
4
|
from typing import Optional
|
|
6
|
-
import more_itertools
|
|
7
|
-
import inspect
|
|
8
5
|
import importlib
|
|
9
6
|
import importlib.util
|
|
7
|
+
import inspect
|
|
8
|
+
import more_itertools
|
|
10
9
|
import numpy
|
|
11
10
|
import pathlib
|
|
12
|
-
import pickle
|
|
13
11
|
import python_minifier
|
|
14
12
|
|
|
15
13
|
identifierCallableLaunch = "goGoGadgetAbsurdity"
|
|
@@ -52,33 +50,15 @@ def writeModuleWithNumba(listDimensions, **keywordArguments: Optional[str]) -> p
|
|
|
52
50
|
dtypeMedium = make_dtype(datatypeMedium, datatypeModule) # type: ignore
|
|
53
51
|
dtypeSmall = make_dtype(datatypeSmall, datatypeModule) # type: ignore
|
|
54
52
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
pathFilenameFoldsTotal = getPathFilenameFoldsTotal(stateJob['mapShape'], pathFilenameJob.parent)
|
|
53
|
+
stateJob = makeStateJob(listDimensions, writeJob=False, dtypeLarge = dtypeLarge, dtypeMedium = dtypeMedium, dtypeSmall = dtypeSmall)
|
|
54
|
+
pathFilenameFoldsTotal = getPathFilenameFoldsTotal(stateJob['mapShape'])
|
|
58
55
|
|
|
59
56
|
from syntheticModules import countSequential
|
|
60
57
|
algorithmSource = countSequential
|
|
61
58
|
codeSource = inspect.getsource(algorithmSource)
|
|
62
59
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
cache=True, \
|
|
66
|
-
nopython=True, \
|
|
67
|
-
fastmath=True, \
|
|
68
|
-
forceinline=True, \
|
|
69
|
-
inline='always', \
|
|
70
|
-
looplift=False, \
|
|
71
|
-
_nrt=True, \
|
|
72
|
-
error_model='numpy', \
|
|
73
|
-
parallel=False, \
|
|
74
|
-
boundscheck=False, \
|
|
75
|
-
no_cfunc_wrapper=False, \
|
|
76
|
-
no_cpython_wrapper=False, \
|
|
77
|
-
"
|
|
78
|
-
# no_cfunc_wrapper=True, \
|
|
79
|
-
# no_cpython_wrapper=True, \
|
|
80
|
-
|
|
81
|
-
lineNumba = f"@numba.jit({parametersNumba})"
|
|
60
|
+
if datatypeLarge:
|
|
61
|
+
lineNumba = f"@numba.jit(numba.types.{datatypeLarge}(), cache=True, nopython=True, fastmath=True, forceinline=True, inline='always', looplift=False, _nrt=True, error_model='numpy', parallel=False, boundscheck=False, no_cfunc_wrapper=True, no_cpython_wrapper=False)"
|
|
82
62
|
|
|
83
63
|
linesImport = "\n".join([
|
|
84
64
|
"import numpy"
|
|
@@ -88,8 +68,8 @@ no_cpython_wrapper=False, \
|
|
|
88
68
|
ImaIndent = ' '
|
|
89
69
|
linesDataDynamic = """"""
|
|
90
70
|
linesDataDynamic = "\n".join([linesDataDynamic
|
|
91
|
-
, ImaIndent + f"foldsTotal = numba.types.{datatypeLarge}(0)"
|
|
92
|
-
, ImaIndent + makeStrRLEcompacted(stateJob['foldGroups'], 'foldGroups')
|
|
71
|
+
# , ImaIndent + f"foldsTotal = numba.types.{datatypeLarge}(0)"
|
|
72
|
+
# , ImaIndent + makeStrRLEcompacted(stateJob['foldGroups'], 'foldGroups')
|
|
93
73
|
, ImaIndent + makeStrRLEcompacted(stateJob['gapsWhere'], 'gapsWhere')
|
|
94
74
|
])
|
|
95
75
|
|
|
@@ -112,14 +92,22 @@ no_cpython_wrapper=False, \
|
|
|
112
92
|
, linesDataDynamic
|
|
113
93
|
, linesDataStatic
|
|
114
94
|
])
|
|
95
|
+
elif 'taskIndex' in lineSource:
|
|
96
|
+
continue
|
|
115
97
|
elif 'my[indexMy.' in lineSource:
|
|
98
|
+
if 'dimensionsTotal' in lineSource:
|
|
99
|
+
continue
|
|
116
100
|
# leaf1ndex = my[indexMy.leaf1ndex.value]
|
|
117
101
|
identifier, statement = lineSource.split('=')
|
|
118
|
-
lineSource = ImaIndent + identifier.strip() +
|
|
102
|
+
lineSource = ImaIndent + identifier.strip() + f"=numba.types.{datatypeSmall}({str(eval(statement.strip()))})"
|
|
119
103
|
elif 'track[indexTrack.' in lineSource:
|
|
120
104
|
# leafAbove = track[indexTrack.leafAbove.value]
|
|
121
105
|
identifier, statement = lineSource.split('=')
|
|
122
106
|
lineSource = ImaIndent + makeStrRLEcompacted(eval(statement.strip()), identifier.strip())
|
|
107
|
+
elif 'foldGroups[-1]' in lineSource:
|
|
108
|
+
lineSource = lineSource.replace('foldGroups[-1]', str(stateJob['foldGroups'][-1]))
|
|
109
|
+
elif 'dimensionsTotal' in lineSource:
|
|
110
|
+
lineSource = lineSource.replace('dimensionsTotal', str(stateJob['my'][indexMy.dimensionsTotal]))
|
|
123
111
|
|
|
124
112
|
linesAlgorithm = "\n".join([linesAlgorithm
|
|
125
113
|
, lineSource
|
|
@@ -128,18 +116,19 @@ no_cpython_wrapper=False, \
|
|
|
128
116
|
linesLaunch = """"""
|
|
129
117
|
linesLaunch = linesLaunch + f"""
|
|
130
118
|
if __name__ == '__main__':
|
|
131
|
-
import time
|
|
132
|
-
timeStart = time.perf_counter()
|
|
119
|
+
# import time
|
|
120
|
+
# timeStart = time.perf_counter()
|
|
133
121
|
{identifierCallableLaunch}()
|
|
134
|
-
print(time.perf_counter() - timeStart)
|
|
122
|
+
# print(time.perf_counter() - timeStart)
|
|
123
|
+
"""
|
|
135
124
|
|
|
136
125
|
linesWriteFoldsTotal = """"""
|
|
137
126
|
linesWriteFoldsTotal = "\n".join([linesWriteFoldsTotal
|
|
138
|
-
, "
|
|
139
|
-
, " print(
|
|
127
|
+
, f" groupsOfFolds *= {str(stateJob['foldGroups'][-1])}"
|
|
128
|
+
, " print(groupsOfFolds)"
|
|
140
129
|
, " with numba.objmode():"
|
|
141
|
-
, f" open('{pathFilenameFoldsTotal.as_posix()}', 'w').write(str(
|
|
142
|
-
, " return
|
|
130
|
+
, f" open('{pathFilenameFoldsTotal.as_posix()}', 'w').write(str(groupsOfFolds))"
|
|
131
|
+
, " return groupsOfFolds"
|
|
143
132
|
])
|
|
144
133
|
|
|
145
134
|
linesAll = "\n".join([
|
|
@@ -149,7 +138,7 @@ if __name__ == '__main__':
|
|
|
149
138
|
, linesLaunch
|
|
150
139
|
])
|
|
151
140
|
|
|
152
|
-
pathFilenameDestination =
|
|
141
|
+
pathFilenameDestination = pathFilenameFoldsTotal.with_suffix(".py")
|
|
153
142
|
pathFilenameDestination.write_text(linesAll)
|
|
154
143
|
|
|
155
144
|
return pathFilenameDestination
|
|
@@ -160,11 +149,10 @@ if __name__ == '__main__':
|
|
|
160
149
|
datatypeMedium = 'uint8'
|
|
161
150
|
datatypeSmall = datatypeMedium
|
|
162
151
|
pathFilenameModule = writeModuleWithNumba(listDimensions, datatypeLarge=datatypeLarge, datatypeMedium=datatypeMedium, datatypeSmall=datatypeSmall)
|
|
152
|
+
|
|
163
153
|
# Induce numba.jit compilation
|
|
164
154
|
moduleSpec = importlib.util.spec_from_file_location(pathFilenameModule.stem, pathFilenameModule)
|
|
165
|
-
if moduleSpec is None:
|
|
166
|
-
raise ImportError(f"Could not load module specification from {pathFilenameModule}")
|
|
155
|
+
if moduleSpec is None: raise ImportError(f"Could not load module specification from {pathFilenameModule}")
|
|
167
156
|
module = importlib.util.module_from_spec(moduleSpec)
|
|
168
|
-
if moduleSpec.loader is None:
|
|
169
|
-
raise ImportError(f"Could not load module from {moduleSpec}")
|
|
157
|
+
if moduleSpec.loader is None: raise ImportError(f"Could not load module from {moduleSpec}")
|
|
170
158
|
moduleSpec.loader.exec_module(module)
|
|
@@ -82,7 +82,10 @@ def decorateCallableWithNumba(astCallable: ast.FunctionDef, parallel: bool=False
|
|
|
82
82
|
|
|
83
83
|
astArgsNumbaSignature = ast.Tuple(elts=listNumbaParameterSignature, ctx=ast.Load())
|
|
84
84
|
|
|
85
|
-
|
|
85
|
+
if astCallable.name == 'countInitialize':
|
|
86
|
+
parametersNumba = {}
|
|
87
|
+
else:
|
|
88
|
+
parametersNumba = parametersNumbaDEFAULT if not parallel else ParametersNumba({**parametersNumbaDEFAULT, 'parallel': True})
|
|
86
89
|
listKeywordsNumbaSignature = [ast.keyword(arg=parameterName, value=ast.Constant(value=parameterValue)) for parameterName, parameterValue in parametersNumba.items()]
|
|
87
90
|
|
|
88
91
|
astDecoratorNumba = ast.Call(func=ast.Attribute(value=ast.Name(id='numba', ctx=ast.Load()), attr='jit', ctx=ast.Load()), args=[astArgsNumbaSignature], keywords=listKeywordsNumbaSignature)
|
|
@@ -117,10 +120,7 @@ class UnpackArrayAccesses(ast.NodeTransformer):
|
|
|
117
120
|
return ast.Name(id=member_name, ctx=node.ctx)
|
|
118
121
|
elif isinstance(node, ast.Tuple):
|
|
119
122
|
# Handle tuple slices by transforming each element
|
|
120
|
-
return ast.Tuple(
|
|
121
|
-
elts=cast(List[ast.expr], [self.transform_slice_element(elt) for elt in node.elts]),
|
|
122
|
-
ctx=node.ctx
|
|
123
|
-
)
|
|
123
|
+
return ast.Tuple(elts=cast(List[ast.expr], [self.transform_slice_element(elt) for elt in node.elts]), ctx=node.ctx)
|
|
124
124
|
elif isinstance(node, ast.Attribute):
|
|
125
125
|
member_name = self.extract_member_name(node)
|
|
126
126
|
if member_name:
|
|
@@ -131,7 +131,6 @@ class UnpackArrayAccesses(ast.NodeTransformer):
|
|
|
131
131
|
# Recursively visit any nested subscripts in value or slice
|
|
132
132
|
node.value = self.visit(node.value)
|
|
133
133
|
node.slice = self.visit(node.slice)
|
|
134
|
-
|
|
135
134
|
# If node.value is not our arrayName, just return node
|
|
136
135
|
if not (isinstance(node.value, ast.Name) and node.value.id == self.arrayName):
|
|
137
136
|
return node
|
|
@@ -152,11 +151,7 @@ class UnpackArrayAccesses(ast.NodeTransformer):
|
|
|
152
151
|
self.substitutions[memberName] = ('array', node)
|
|
153
152
|
if len(sliceRemainder) == 0:
|
|
154
153
|
return ast.Name(id=memberName, ctx=ast.Load())
|
|
155
|
-
return ast.Subscript(
|
|
156
|
-
value=ast.Name(id=memberName, ctx=ast.Load()),
|
|
157
|
-
slice=ast.Tuple(elts=sliceRemainder, ctx=ast.Load()) if len(sliceRemainder) > 1 else sliceRemainder[0],
|
|
158
|
-
ctx=ast.Load()
|
|
159
|
-
)
|
|
154
|
+
return ast.Subscript(value=ast.Name(id=memberName, ctx=ast.Load()), slice=ast.Tuple(elts=sliceRemainder, ctx=ast.Load()) if len(sliceRemainder) > 1 else sliceRemainder[0], ctx=ast.Load())
|
|
160
155
|
|
|
161
156
|
# If single-element tuple, unwrap
|
|
162
157
|
if isinstance(node.slice, ast.Tuple) and len(node.slice.elts) == 1:
|
|
@@ -170,44 +165,20 @@ class UnpackArrayAccesses(ast.NodeTransformer):
|
|
|
170
165
|
initializations = []
|
|
171
166
|
for name, (kind, original_node) in self.substitutions.items():
|
|
172
167
|
if kind == 'scalar':
|
|
173
|
-
initializations.append(
|
|
174
|
-
ast.Assign(
|
|
175
|
-
targets=[ast.Name(id=name, ctx=ast.Store())],
|
|
176
|
-
value=original_node
|
|
177
|
-
)
|
|
178
|
-
)
|
|
168
|
+
initializations.append(ast.Assign(targets=[ast.Name(id=name, ctx=ast.Store())], value=original_node))
|
|
179
169
|
else: # array
|
|
180
170
|
initializations.append(
|
|
181
171
|
ast.Assign(
|
|
182
172
|
targets=[ast.Name(id=name, ctx=ast.Store())],
|
|
183
|
-
value=ast.Subscript(
|
|
184
|
-
|
|
185
|
-
slice=ast.Attribute(
|
|
186
|
-
value=ast.Attribute(
|
|
173
|
+
value=ast.Subscript(value=ast.Name(id=self.arrayName, ctx=ast.Load()),
|
|
174
|
+
slice=ast.Attribute(value=ast.Attribute(
|
|
187
175
|
value=ast.Name(id=self.enumIndexClass.__name__, ctx=ast.Load()),
|
|
188
|
-
attr=name,
|
|
189
|
-
ctx=ast.Load()
|
|
190
|
-
),
|
|
191
|
-
attr='value',
|
|
192
|
-
ctx=ast.Load()
|
|
193
|
-
),
|
|
194
|
-
ctx=ast.Load()
|
|
195
|
-
)
|
|
196
|
-
)
|
|
197
|
-
)
|
|
176
|
+
attr=name, ctx=ast.Load()), attr='value', ctx=ast.Load()), ctx=ast.Load())))
|
|
198
177
|
|
|
199
178
|
node.body = initializations + node.body
|
|
200
179
|
return node
|
|
201
180
|
|
|
202
|
-
def getDictionaryEnumValues() -> Dict[str, int]:
|
|
203
|
-
dictionaryEnumValues = {}
|
|
204
|
-
for enumIndex in [indexMy, indexTrack]:
|
|
205
|
-
for memberName, memberValue in enumIndex._member_map_.items():
|
|
206
|
-
dictionaryEnumValues[f"{enumIndex.__name__}.{memberName}.value"] = memberValue.value
|
|
207
|
-
return dictionaryEnumValues
|
|
208
|
-
|
|
209
181
|
def inlineMapFoldingNumba(**keywordArguments: Optional[str]):
|
|
210
|
-
dictionaryEnumValues = getDictionaryEnumValues()
|
|
211
182
|
codeSource = inspect.getsource(algorithmSource)
|
|
212
183
|
pathFilenameAlgorithm = pathlib.Path(inspect.getfile(algorithmSource))
|
|
213
184
|
|
|
@@ -233,11 +204,12 @@ def inlineMapFoldingNumba(**keywordArguments: Optional[str]):
|
|
|
233
204
|
callableDecorated = cast(ast.FunctionDef, trackUnpacker.visit(callableDecorated))
|
|
234
205
|
ast.fix_missing_locations(callableDecorated)
|
|
235
206
|
|
|
236
|
-
|
|
237
|
-
|
|
207
|
+
moduleAST = ast.Module(body=cast(List[ast.stmt], list(codeSourceImportStatements) + [callableDecorated]), type_ignores=[])
|
|
208
|
+
ast.fix_missing_locations(moduleAST)
|
|
209
|
+
moduleSource = ast.unparse(moduleAST)
|
|
238
210
|
|
|
239
|
-
pathFilenameDestination = pathFilenameAlgorithm.parent / "syntheticModules" / pathFilenameAlgorithm.with_stem(callableTarget).name
|
|
240
|
-
pathFilenameDestination.write_text(
|
|
211
|
+
pathFilenameDestination = pathFilenameAlgorithm.parent / "syntheticModules" / pathFilenameAlgorithm.with_stem(callableTarget).name[5:None]
|
|
212
|
+
pathFilenameDestination.write_text(moduleSource)
|
|
241
213
|
listPathFilenamesDestination.append(pathFilenameDestination)
|
|
242
214
|
|
|
243
215
|
if __name__ == '__main__':
|
|
@@ -3,9 +3,10 @@ from typing import Any, Tuple
|
|
|
3
3
|
from mapFolding import indexMy, indexTrack
|
|
4
4
|
import numba
|
|
5
5
|
from numpy import integer
|
|
6
|
-
|
|
6
|
+
|
|
7
|
+
@numba.jit((numba.uint8[:, :, ::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]))
|
|
7
8
|
def countInitialize(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
|
|
8
|
-
while my[indexMy.leaf1ndex.value]
|
|
9
|
+
while my[indexMy.leaf1ndex.value]:
|
|
9
10
|
if my[indexMy.leaf1ndex.value] <= 1 or track[indexTrack.leafBelow.value, 0] == 1:
|
|
10
11
|
my[indexMy.dimensionsUnconstrained.value] = my[indexMy.dimensionsTotal.value]
|
|
11
12
|
my[indexMy.gap1ndexCeiling.value] = track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value] - 1]
|
|
@@ -35,7 +36,7 @@ def countInitialize(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.d
|
|
|
35
36
|
my[indexMy.gap1ndex.value] += 1
|
|
36
37
|
track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] = 0
|
|
37
38
|
my[indexMy.indexMiniGap.value] += 1
|
|
38
|
-
if my[indexMy.leaf1ndex.value]
|
|
39
|
+
if my[indexMy.leaf1ndex.value]:
|
|
39
40
|
my[indexMy.gap1ndex.value] -= 1
|
|
40
41
|
track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]] = gapsWhere[my[indexMy.gap1ndex.value]]
|
|
41
42
|
track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]] = track[indexTrack.leafBelow.value, track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]]
|
|
@@ -3,15 +3,16 @@ from typing import Any, Tuple
|
|
|
3
3
|
import numba
|
|
4
4
|
from mapFolding import indexMy, indexTrack
|
|
5
5
|
import numpy
|
|
6
|
+
|
|
6
7
|
@numba.jit((numba.uint8[:, :, ::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=False, inline='never', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=True)
|
|
7
8
|
def countParallel(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWherePARALLEL: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], myPARALLEL: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], trackPARALLEL: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
|
|
8
9
|
for indexSherpa in numba.prange(myPARALLEL[indexMy.taskDivisions.value]):
|
|
9
|
-
groupsOfFolds
|
|
10
|
+
groupsOfFolds = numba.types.int64(0)
|
|
10
11
|
gapsWhere = gapsWherePARALLEL.copy()
|
|
11
12
|
my = myPARALLEL.copy()
|
|
12
13
|
my[indexMy.taskIndex.value] = indexSherpa
|
|
13
14
|
track = trackPARALLEL.copy()
|
|
14
|
-
while my[indexMy.leaf1ndex.value]
|
|
15
|
+
while my[indexMy.leaf1ndex.value]:
|
|
15
16
|
if my[indexMy.leaf1ndex.value] <= 1 or track[indexTrack.leafBelow.value, 0] == 1:
|
|
16
17
|
if my[indexMy.leaf1ndex.value] > foldGroups[-1]:
|
|
17
18
|
groupsOfFolds += 1
|
|
@@ -39,11 +40,11 @@ def countParallel(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dty
|
|
|
39
40
|
my[indexMy.gap1ndex.value] += 1
|
|
40
41
|
track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] = 0
|
|
41
42
|
my[indexMy.indexMiniGap.value] += 1
|
|
42
|
-
while my[indexMy.leaf1ndex.value]
|
|
43
|
+
while my[indexMy.leaf1ndex.value] and my[indexMy.gap1ndex.value] == track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value] - 1]:
|
|
43
44
|
my[indexMy.leaf1ndex.value] -= 1
|
|
44
45
|
track[indexTrack.leafBelow.value, track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]] = track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]]
|
|
45
46
|
track[indexTrack.leafAbove.value, track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]]] = track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]
|
|
46
|
-
if my[indexMy.leaf1ndex.value]
|
|
47
|
+
if my[indexMy.leaf1ndex.value]:
|
|
47
48
|
my[indexMy.gap1ndex.value] -= 1
|
|
48
49
|
track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]] = gapsWhere[my[indexMy.gap1ndex.value]]
|
|
49
50
|
track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]] = track[indexTrack.leafBelow.value, track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]]
|
|
@@ -3,6 +3,7 @@ from typing import Any, Tuple
|
|
|
3
3
|
import numba
|
|
4
4
|
from mapFolding import indexMy, indexTrack
|
|
5
5
|
import numpy
|
|
6
|
+
|
|
6
7
|
@numba.jit((numba.uint8[:, :, ::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=False, inline='never', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=False)
|
|
7
8
|
def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
|
|
8
9
|
leafBelow = track[indexTrack.leafBelow.value]
|
|
@@ -18,9 +19,9 @@ def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.d
|
|
|
18
19
|
indexMiniGap = my[indexMy.indexMiniGap.value]
|
|
19
20
|
gap1ndex = my[indexMy.gap1ndex.value]
|
|
20
21
|
taskIndex = my[indexMy.taskIndex.value]
|
|
21
|
-
groupsOfFolds
|
|
22
|
+
groupsOfFolds = numba.types.int64(0)
|
|
22
23
|
doFindGaps = True
|
|
23
|
-
while leaf1ndex
|
|
24
|
+
while leaf1ndex:
|
|
24
25
|
if (doFindGaps := (leaf1ndex <= 1 or leafBelow[0] == 1)) and leaf1ndex > foldGroups[-1]:
|
|
25
26
|
groupsOfFolds += 1
|
|
26
27
|
elif doFindGaps:
|
|
@@ -46,11 +47,11 @@ def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.d
|
|
|
46
47
|
gap1ndex += 1
|
|
47
48
|
countDimensionsGapped[gapsWhere[indexMiniGap]] = 0
|
|
48
49
|
indexMiniGap += 1
|
|
49
|
-
while leaf1ndex
|
|
50
|
+
while leaf1ndex and gap1ndex == gapRangeStart[leaf1ndex - 1]:
|
|
50
51
|
leaf1ndex -= 1
|
|
51
52
|
leafBelow[leafAbove[leaf1ndex]] = leafBelow[leaf1ndex]
|
|
52
53
|
leafAbove[leafBelow[leaf1ndex]] = leafAbove[leaf1ndex]
|
|
53
|
-
if leaf1ndex
|
|
54
|
+
if leaf1ndex:
|
|
54
55
|
gap1ndex -= 1
|
|
55
56
|
leafAbove[leaf1ndex] = gapsWhere[gap1ndex]
|
|
56
57
|
leafBelow[leaf1ndex] = leafBelow[leafAbove[leaf1ndex]]
|
|
@@ -28,7 +28,8 @@ keywords = [
|
|
|
28
28
|
"map folding",
|
|
29
29
|
"OEIS",
|
|
30
30
|
"stamp folding",]
|
|
31
|
-
license = { text = "CC-BY-NC-4.0" }
|
|
31
|
+
license = { 'text' = "CC-BY-NC-4.0" }
|
|
32
|
+
maintainers = []
|
|
32
33
|
name = "mapFolding"
|
|
33
34
|
optional-dependencies = { benchmark = [
|
|
34
35
|
"ipywidgets",
|
|
@@ -36,19 +37,20 @@ optional-dependencies = { benchmark = [
|
|
|
36
37
|
"pandas",
|
|
37
38
|
"tqdm",
|
|
38
39
|
], testing = [
|
|
40
|
+
"attrs",
|
|
39
41
|
"cffconvert",
|
|
40
42
|
"more_itertools",
|
|
41
|
-
"pytest",
|
|
42
43
|
"pytest-cov",
|
|
43
44
|
"pytest-env",
|
|
44
45
|
"pytest-xdist",
|
|
46
|
+
"pytest",
|
|
45
47
|
"python_minifier",
|
|
46
48
|
"tomli",] }
|
|
47
49
|
readme = { file = "README.md", content-type = "text/markdown" }
|
|
48
50
|
requires-python = ">=3.10,<3.14"
|
|
49
51
|
scripts = { getOEISids = "mapFolding.oeis:getOEISids", clearOEIScache = "mapFolding.oeis:clearOEIScache", OEIS_for_n = "mapFolding.oeis:OEIS_for_n" }
|
|
50
|
-
urls = { Homepage = "https://github.com/hunterhogan/mapFolding", Donate = "https://www.patreon.com/integrated" }
|
|
51
|
-
version = "0.3.
|
|
52
|
+
urls = { Homepage = "https://github.com/hunterhogan/mapFolding", Donate = "https://www.patreon.com/integrated", Repository = "https://github.com/hunterhogan/mapFolding.git" }
|
|
53
|
+
version = "0.3.7"
|
|
52
54
|
|
|
53
55
|
[tool.coverage]
|
|
54
56
|
report = { exclude_lines = [
|
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
from cffconvert.cli.create_citation import create_citation
|
|
2
|
-
from typing import Any, Dict
|
|
3
|
-
import cffconvert
|
|
4
|
-
import pathlib
|
|
5
|
-
import tomli
|
|
6
|
-
import inspect
|
|
7
|
-
import json
|
|
8
|
-
|
|
9
|
-
"""
|
|
10
|
-
Tentative plan:
|
|
11
|
-
- Commit and push to GitHub
|
|
12
|
-
- GitHub Action gathers information from the sources of truth
|
|
13
|
-
- If the citation needs to be updated, write to both
|
|
14
|
-
- pathFilenameCitationSSOT
|
|
15
|
-
- pathFilenameCitationDOTcffRepo
|
|
16
|
-
- Commit and push to GitHub
|
|
17
|
-
- this complicates things
|
|
18
|
-
- I want the updated citation to be in the `commit` field of itself
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
"""cffconvert.Citation fields and the source of truth
|
|
22
|
-
abstract: pathFilenameCitationSSOT
|
|
23
|
-
authors: pathFilenamePackageSSOT
|
|
24
|
-
cff-version: pathFilenameCitationSSOT
|
|
25
|
-
commit: workflows['Make GitHub Release']
|
|
26
|
-
contact: pathFilenamePackageSSOT
|
|
27
|
-
date-released: workflows['Make GitHub Release']
|
|
28
|
-
doi: pathFilenameCitationSSOT
|
|
29
|
-
identifiers: workflows['Make GitHub Release']
|
|
30
|
-
keywords: pathFilenamePackageSSOT
|
|
31
|
-
license: pathFilenamePackageSSOT
|
|
32
|
-
license-url: pathFilenamePackageSSOT
|
|
33
|
-
message: pathFilenameCitationSSOT
|
|
34
|
-
preferred-citation: pathFilenameCitationSSOT
|
|
35
|
-
references: to be determined
|
|
36
|
-
repository: pathFilenamePackageSSOT
|
|
37
|
-
repository-artifact: (https://pypi.org/pypi/{package_name}/json').json()['releases']
|
|
38
|
-
repository-code: workflows['Make GitHub Release']
|
|
39
|
-
title: pathFilenamePackageSSOT
|
|
40
|
-
type: pathFilenameCitationSSOT
|
|
41
|
-
url: pathFilenamePackageSSOT
|
|
42
|
-
version: pathFilenamePackageSSOT
|
|
43
|
-
"""
|
|
44
|
-
# Prefer reliable, dynamic values over hardcoded ones
|
|
45
|
-
packageName: str = 'mapFolding'
|
|
46
|
-
pathRepoRoot = pathlib.Path(__file__).parent.parent.parent
|
|
47
|
-
pathFilenamePackageSSOT = pathRepoRoot / 'pyproject.toml'
|
|
48
|
-
|
|
49
|
-
filenameGitHubAction = 'updateCitation.yml'
|
|
50
|
-
pathFilenameGitHubAction = pathRepoRoot / '.github' / 'workflows' / filenameGitHubAction
|
|
51
|
-
|
|
52
|
-
filenameCitationDOTcff = 'CITATION.cff'
|
|
53
|
-
pathCitations = pathRepoRoot / packageName / 'citations'
|
|
54
|
-
pathFilenameCitationSSOT = pathCitations / filenameCitationDOTcff
|
|
55
|
-
pathFilenameCitationDOTcffRepo = pathRepoRoot / filenameCitationDOTcff
|
|
56
|
-
|
|
57
|
-
tomlPackageData: Dict[str, Any] = tomli.loads(pathFilenamePackageSSOT.read_text())['project']
|
|
58
|
-
|
|
59
|
-
citationObject: cffconvert.Citation = create_citation(infile=pathFilenameCitationSSOT, url=None)
|
|
60
|
-
|
|
61
|
-
path_cffconvert = pathlib.Path(inspect.getfile(cffconvert)).parent
|
|
62
|
-
pathFilenameSchema = path_cffconvert / "schemas/1.2.0/schema.json"
|
|
63
|
-
scheme: Dict[str, Any] = json.loads(pathFilenameSchema.read_text())
|
|
64
|
-
schemaSpecifications: Dict[str, Any] = scheme['properties']
|
|
65
|
-
|
|
66
|
-
for property, subProperties in schemaSpecifications.items():
|
|
67
|
-
print(property, subProperties.get('items', None))
|
|
@@ -1,122 +0,0 @@
|
|
|
1
|
-
from mapFolding import datatypeLargeDEFAULT, datatypeMediumDEFAULT, datatypeSmallDEFAULT
|
|
2
|
-
from typing import Dict, Optional, List, Set, Union
|
|
3
|
-
import ast
|
|
4
|
-
|
|
5
|
-
class RecursiveInlinerWithEnum(ast.NodeTransformer):
|
|
6
|
-
"""Process AST nodes to inline functions and substitute enum values.
|
|
7
|
-
Also handles function decorators during inlining."""
|
|
8
|
-
|
|
9
|
-
def __init__(self, dictionaryFunctions: Dict[str, ast.FunctionDef], dictionaryEnumValues: Dict[str, int]) -> None:
|
|
10
|
-
self.dictionaryFunctions = dictionaryFunctions
|
|
11
|
-
self.dictionaryEnumValues = dictionaryEnumValues
|
|
12
|
-
self.processed = set()
|
|
13
|
-
|
|
14
|
-
def inlineFunctionBody(self, functionName: str) -> Optional[ast.FunctionDef]:
|
|
15
|
-
if functionName in self.processed:
|
|
16
|
-
return None
|
|
17
|
-
|
|
18
|
-
self.processed.add(functionName)
|
|
19
|
-
inlineDefinition = self.dictionaryFunctions[functionName]
|
|
20
|
-
# Recursively process the function body
|
|
21
|
-
for node in ast.walk(inlineDefinition):
|
|
22
|
-
self.visit(node)
|
|
23
|
-
return inlineDefinition
|
|
24
|
-
|
|
25
|
-
def visit_Attribute(self, node: ast.Attribute) -> ast.AST:
|
|
26
|
-
# Substitute enum identifiers (e.g., indexMy.leaf1ndex.value)
|
|
27
|
-
if isinstance(node.value, ast.Attribute) and isinstance(node.value.value, ast.Name):
|
|
28
|
-
enumPath = f"{node.value.value.id}.{node.value.attr}.{node.attr}"
|
|
29
|
-
if enumPath in self.dictionaryEnumValues:
|
|
30
|
-
return ast.Constant(value=self.dictionaryEnumValues[enumPath])
|
|
31
|
-
return self.generic_visit(node)
|
|
32
|
-
|
|
33
|
-
def visit_Call(self, node: ast.Call) -> ast.AST:
|
|
34
|
-
callNode = self.generic_visit(node)
|
|
35
|
-
if isinstance(callNode, ast.Call) and isinstance(callNode.func, ast.Name) and callNode.func.id in self.dictionaryFunctions:
|
|
36
|
-
inlineDefinition = self.inlineFunctionBody(callNode.func.id)
|
|
37
|
-
if (inlineDefinition and inlineDefinition.body):
|
|
38
|
-
lastStmt = inlineDefinition.body[-1]
|
|
39
|
-
if isinstance(lastStmt, ast.Return) and lastStmt.value is not None:
|
|
40
|
-
return self.visit(lastStmt.value)
|
|
41
|
-
elif isinstance(lastStmt, ast.Expr) and lastStmt.value is not None:
|
|
42
|
-
return self.visit(lastStmt.value)
|
|
43
|
-
return ast.Constant(value=None)
|
|
44
|
-
return callNode
|
|
45
|
-
|
|
46
|
-
def visit_Expr(self, node: ast.Expr) -> Union[ast.AST, List[ast.AST]]:
|
|
47
|
-
if isinstance(node.value, ast.Call):
|
|
48
|
-
if isinstance(node.value.func, ast.Name) and node.value.func.id in self.dictionaryFunctions:
|
|
49
|
-
inlineDefinition = self.inlineFunctionBody(node.value.func.id)
|
|
50
|
-
if inlineDefinition:
|
|
51
|
-
return [self.visit(stmt) for stmt in inlineDefinition.body]
|
|
52
|
-
return self.generic_visit(node)
|
|
53
|
-
|
|
54
|
-
def findRequiredImports(node: ast.AST) -> Set[str]:
|
|
55
|
-
"""Find all modules that need to be imported based on AST analysis.
|
|
56
|
-
NOTE: due to hardcoding, this is a glorified regex. No, wait, this is less versatile than regex."""
|
|
57
|
-
requiredImports = set()
|
|
58
|
-
|
|
59
|
-
class ImportFinder(ast.NodeVisitor):
|
|
60
|
-
def visit_Name(self, node: ast.Name) -> None:
|
|
61
|
-
if node.id in {'numba'}:
|
|
62
|
-
requiredImports.add(node.id)
|
|
63
|
-
self.generic_visit(node)
|
|
64
|
-
|
|
65
|
-
def visitDecorator(self, node: ast.AST) -> None:
|
|
66
|
-
if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):
|
|
67
|
-
if node.func.id == 'jit':
|
|
68
|
-
requiredImports.add('numba')
|
|
69
|
-
self.generic_visit(node)
|
|
70
|
-
|
|
71
|
-
ImportFinder().visit(node)
|
|
72
|
-
return requiredImports
|
|
73
|
-
|
|
74
|
-
def generateImports(requiredImports: Set[str]) -> str:
|
|
75
|
-
"""Generate import statements based on required modules."""
|
|
76
|
-
importStatements = {'import numba', 'from mapFolding import indexMy, indexTrack'}
|
|
77
|
-
|
|
78
|
-
importMapping = {
|
|
79
|
-
'numba': 'import numba',
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
for moduleName in sorted(requiredImports):
|
|
83
|
-
if moduleName in importMapping:
|
|
84
|
-
importStatements.add(importMapping[moduleName])
|
|
85
|
-
|
|
86
|
-
return '\n'.join(importStatements)
|
|
87
|
-
|
|
88
|
-
def makeInlineFunction(sourceCode: str, targetFunctionName: str, dictionaryEnumValues: Dict[str, int], skipEnum: bool=False, **keywordArguments: Optional[str]):
|
|
89
|
-
datatypeLarge = keywordArguments.get('datatypeLarge', datatypeLargeDEFAULT)
|
|
90
|
-
datatypeMedium = keywordArguments.get('datatypeMedium', datatypeMediumDEFAULT)
|
|
91
|
-
datatypeSmall = keywordArguments.get('datatypeSmall', datatypeSmallDEFAULT)
|
|
92
|
-
if skipEnum:
|
|
93
|
-
dictionaryEnumValues = {}
|
|
94
|
-
dictionaryParsed = ast.parse(sourceCode)
|
|
95
|
-
dictionaryFunctions = {
|
|
96
|
-
element.name: element
|
|
97
|
-
for element in dictionaryParsed.body
|
|
98
|
-
if isinstance(element, ast.FunctionDef)
|
|
99
|
-
}
|
|
100
|
-
nodeTarget = dictionaryFunctions[targetFunctionName]
|
|
101
|
-
nodeInliner = RecursiveInlinerWithEnum(dictionaryFunctions, dictionaryEnumValues)
|
|
102
|
-
nodeInlined = nodeInliner.visit(nodeTarget)
|
|
103
|
-
ast.fix_missing_locations(nodeInlined)
|
|
104
|
-
callableInlinedDecorators = [decorator for decorator in nodeInlined.decorator_list]
|
|
105
|
-
|
|
106
|
-
requiredImports = findRequiredImports(nodeInlined)
|
|
107
|
-
importStatements = generateImports(requiredImports)
|
|
108
|
-
importsRequired = importStatements
|
|
109
|
-
dictionaryDecoratorsNumba={
|
|
110
|
-
'countInitialize':
|
|
111
|
-
f'@numba.jit((numba.{datatypeSmall}[:,:,::1], numba.{datatypeMedium}[::1], numba.{datatypeSmall}[::1], numba.{datatypeMedium}[:,::1]), parallel=False, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)\n',
|
|
112
|
-
'countParallel':
|
|
113
|
-
f'@numba.jit((numba.{datatypeSmall}[:,:,::1], numba.{datatypeLarge}[::1], numba.{datatypeMedium}[::1], numba.{datatypeSmall}[::1], numba.{datatypeMedium}[:,::1]), parallel=True, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)\n',
|
|
114
|
-
'countSequential':
|
|
115
|
-
f'@numba.jit((numba.{datatypeSmall}[:,:,::1], numba.{datatypeLarge}[::1], numba.{datatypeMedium}[::1], numba.{datatypeSmall}[::1], numba.{datatypeMedium}[:,::1]), parallel=False, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)\n',
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
lineNumbaDecorator = dictionaryDecoratorsNumba[targetFunctionName]
|
|
119
|
-
|
|
120
|
-
# inlinedCode = ast.unparse(ast.Module(body=[nodeInlined], type_ignores=[]))
|
|
121
|
-
callableInlined = lineNumbaDecorator + ast.unparse(nodeInlined)
|
|
122
|
-
return (callableInlined, callableInlinedDecorators, importsRequired)
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
from mapFolding import getPathFilenameFoldsTotal
|
|
2
|
-
from mapFolding import outfitCountFolds
|
|
3
|
-
from typing import Any, Optional, Sequence, Type
|
|
4
|
-
import pathlib
|
|
5
|
-
import pickle
|
|
6
|
-
|
|
7
|
-
def makeStateJob(listDimensions: Sequence[int], **keywordArguments: Optional[Type[Any]]) -> pathlib.Path:
|
|
8
|
-
|
|
9
|
-
stateUniversal = outfitCountFolds(listDimensions, computationDivisions=None, CPUlimit=None, **keywordArguments)
|
|
10
|
-
|
|
11
|
-
from syntheticModules import countInitialize
|
|
12
|
-
countInitialize(stateUniversal['connectionGraph'], stateUniversal['gapsWhere'], stateUniversal['my'], stateUniversal['track'])
|
|
13
|
-
|
|
14
|
-
pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal['mapShape'])
|
|
15
|
-
suffix = pathFilenameChopChop.suffix
|
|
16
|
-
pathJob = pathlib.Path(str(pathFilenameChopChop)[0:-len(suffix)])
|
|
17
|
-
pathJob.mkdir(parents=True, exist_ok=True)
|
|
18
|
-
pathFilenameJob = pathJob / 'stateJob.pkl'
|
|
19
|
-
|
|
20
|
-
pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
|
|
21
|
-
return pathFilenameJob
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|