mapFolding 0.3.8__py3-none-any.whl → 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. mapFolding/__init__.py +38 -0
  2. mapFolding/basecamp.py +55 -0
  3. mapFolding/beDRY.py +364 -0
  4. mapFolding/oeis.py +329 -0
  5. {someAssemblyRequired → mapFolding/someAssemblyRequired}/makeJob.py +3 -3
  6. mapFolding/someAssemblyRequired/synthesizeModuleJAX.py +29 -0
  7. {someAssemblyRequired → mapFolding/someAssemblyRequired}/synthesizeModulesNumba.py +149 -89
  8. mapFolding/syntheticModules/__init__.py +3 -0
  9. syntheticModules/numbaInitialize.py → mapFolding/syntheticModules/numba_countInitialize.py +5 -7
  10. syntheticModules/numbaParallel.py → mapFolding/syntheticModules/numba_countParallel.py +5 -4
  11. syntheticModules/numbaSequential.py → mapFolding/syntheticModules/numba_countSequential.py +5 -4
  12. mapFolding/syntheticModules/numba_doTheNeedful.py +33 -0
  13. mapFolding/theDao.py +214 -0
  14. mapFolding/theSSOT.py +269 -0
  15. mapFolding-0.3.9.dist-info/LICENSE +407 -0
  16. {mapFolding-0.3.8.dist-info → mapFolding-0.3.9.dist-info}/METADATA +9 -5
  17. mapFolding-0.3.9.dist-info/RECORD +40 -0
  18. mapFolding-0.3.9.dist-info/top_level.txt +2 -0
  19. tests/__init__.py +1 -0
  20. tests/conftest.py +224 -0
  21. tests/conftest_tmpRegistry.py +62 -0
  22. tests/conftest_uniformTests.py +53 -0
  23. tests/test_oeis.py +200 -0
  24. tests/test_other.py +258 -0
  25. tests/test_tasks.py +44 -0
  26. tests/test_types.py +5 -0
  27. benchmarks/benchmarking.py +0 -67
  28. citations/constants.py +0 -3
  29. citations/updateCitation.py +0 -354
  30. mapFolding-0.3.8.dist-info/RECORD +0 -26
  31. mapFolding-0.3.8.dist-info/top_level.txt +0 -5
  32. syntheticModules/__init__.py +0 -3
  33. {reference → mapFolding/reference}/flattened.py +0 -0
  34. {reference → mapFolding/reference}/hunterNumba.py +0 -0
  35. {reference → mapFolding/reference}/irvineJavaPort.py +0 -0
  36. {reference → mapFolding/reference}/jax.py +0 -0
  37. {reference → mapFolding/reference}/lunnan.py +0 -0
  38. {reference → mapFolding/reference}/lunnanNumpy.py +0 -0
  39. {reference → mapFolding/reference}/lunnanWhile.py +0 -0
  40. {reference → mapFolding/reference}/rotatedEntryPoint.py +0 -0
  41. {reference → mapFolding/reference}/total_countPlus1vsPlusN.py +0 -0
  42. {someAssemblyRequired → mapFolding/someAssemblyRequired}/__init__.py +0 -0
  43. {someAssemblyRequired → mapFolding/someAssemblyRequired}/getLLVMforNoReason.py +0 -0
  44. {someAssemblyRequired → mapFolding/someAssemblyRequired}/synthesizeModuleJobNumba.py +0 -0
  45. {mapFolding-0.3.8.dist-info → mapFolding-0.3.9.dist-info}/WHEEL +0 -0
  46. {mapFolding-0.3.8.dist-info → mapFolding-0.3.9.dist-info}/entry_points.txt +0 -0
@@ -1,13 +1,32 @@
1
- from mapFolding import EnumIndices, relativePathSyntheticModules, setDatatypeElephino, setDatatypeFoldsTotal, setDatatypeLeavesTotal, setDatatypeModule
2
- from mapFolding import indexMy, indexTrack, getAlgorithmSource, ParametersNumba, parametersNumbaDEFAULT, hackSSOTdatatype, hackSSOTdtype
3
- from typing import cast, Dict, List, Optional, Sequence, Set, Type, Union
1
+ from mapFolding import (
2
+ EnumIndices,
3
+ getAlgorithmSource,
4
+ getPathPackage,
5
+ getPathSyntheticModules,
6
+ hackSSOTdatatype,
7
+ hackSSOTdtype,
8
+ indexMy,
9
+ indexTrack,
10
+ moduleOfSyntheticModules,
11
+ myPackageNameIs,
12
+ ParametersNumba,
13
+ parametersNumbaDEFAULT,
14
+ setDatatypeElephino,
15
+ setDatatypeFoldsTotal,
16
+ setDatatypeLeavesTotal,
17
+ setDatatypeModule,
18
+ )
19
+ from typing import cast, Dict, List, Optional, Sequence, Set, Tuple, Type, Union
4
20
  from types import ModuleType
21
+ from collections import namedtuple
5
22
  import ast
6
23
  import inspect
7
24
  import numba
8
25
  import numpy
9
26
  import pathlib
10
27
 
28
+ youOughtaKnow = namedtuple('youOughtaKnow', ['callableSynthesized', 'pathFilenameForMe', 'astForCompetentProgrammers'])
29
+
11
30
  """TODO
12
31
  Convert types
13
32
  e.g. `groupsOfFolds: int = 0` to `groupsOfFolds = numba.types.{datatypeLarge}(0)`
@@ -53,8 +72,8 @@ class RecursiveInliner(ast.NodeTransformer):
53
72
  self.visit(astNode)
54
73
  return inlineDefinition
55
74
 
56
- def visit_Call(self, callNode: ast.Call) -> ast.AST:
57
- callNodeVisited = self.generic_visit(callNode)
75
+ def visit_Call(self, node: ast.Call) -> ast.AST:
76
+ callNodeVisited = self.generic_visit(node)
58
77
  if (isinstance(callNodeVisited, ast.Call) and isinstance(callNodeVisited.func, ast.Name) and callNodeVisited.func.id in self.dictionaryFunctions):
59
78
  inlineDefinition = self.inlineFunctionBody(callNodeVisited.func.id)
60
79
  if (inlineDefinition and inlineDefinition.body):
@@ -83,6 +102,7 @@ def decorateCallableWithNumba(astCallable: ast.FunctionDef, parallel: bool=False
83
102
 
84
103
  Parameters
85
104
  ----------
105
+
86
106
  astCallable : ast.FunctionDef
87
107
  The AST node representing the function to be decorated with Numba JIT.
88
108
  parallel : bool, optional
@@ -91,6 +111,7 @@ def decorateCallableWithNumba(astCallable: ast.FunctionDef, parallel: bool=False
91
111
 
92
112
  Returns
93
113
  -------
114
+
94
115
  ast.FunctionDef
95
116
  The modified AST function definition node with added Numba decorators.
96
117
 
@@ -171,7 +192,7 @@ def decorateCallableWithNumba(astCallable: ast.FunctionDef, parallel: bool=False
171
192
 
172
193
  astArgsNumbaSignature = ast.Tuple(elts=listNumbaParameterSignature, ctx=ast.Load())
173
194
 
174
- if astCallable.name == 'countInitialize':
195
+ if astCallable.name == 'countInitialize' or astCallable.name == 'doTheNeedful':
175
196
  parametersNumba = {}
176
197
  else:
177
198
  parametersNumba = parametersNumbaDEFAULT if not parallel else ParametersNumba({**parametersNumbaDEFAULT, 'parallel': True})
@@ -289,7 +310,7 @@ class UnpackArrayAccesses(ast.NodeTransformer):
289
310
  node.body = initializations + node.body
290
311
  return node
291
312
 
292
- def inlineOneCallable(codeSource, callableTarget):
313
+ def inlineOneCallable(codeSource: str, callableTarget: str):
293
314
  """
294
315
  Inlines a target callable function and its dependencies within the provided code source.
295
316
 
@@ -343,104 +364,143 @@ def inlineOneCallable(codeSource, callableTarget):
343
364
  moduleSource = ast.unparse(moduleAST)
344
365
  return moduleSource
345
366
 
346
- class AppendDunderInit(ast.NodeTransformer):
347
- """AST transformer that validates and appends imports to __init__.py files."""
348
-
349
- def __init__(self, listPathFilenamesDestination: list[tuple[pathlib.Path, str]]):
350
- self.listPathFilenamesDestination = listPathFilenamesDestination
351
- self.listTuplesDunderInit = []
352
-
353
- def process_init_files(self) -> list[tuple[pathlib.Path, str]]:
354
- for pathFilename, callableTarget in self.listPathFilenamesDestination:
355
- pathDunderInit = pathFilename.parent / "__init__.py"
356
-
357
- # Create empty init if doesn't exist
358
- if not pathDunderInit.exists():
359
- pathDunderInit.write_text("")
360
-
361
- # Parse existing init file
362
- try:
363
- treeInit = ast.parse(pathDunderInit.read_text())
364
- except SyntaxError:
365
- treeInit = ast.Module(body=[], type_ignores=[])
366
-
367
- # Compute the lowercase module target
368
- moduleTarget = "." + pathFilename.stem
369
- moduleTargetLower = moduleTarget.lower()
370
-
371
- # Track existing imports as (normalizedModule, name)
372
- setImportsExisting = set()
373
- for node in treeInit.body:
374
- if isinstance(node, ast.ImportFrom) and node.module:
375
- # Compare on a lowercase basis
376
- if node.module.lower() == moduleTargetLower:
377
- for alias in node.names:
378
- setImportsExisting.add((moduleTargetLower, alias.name))
379
-
380
- # Only append if this exact import doesn't exist
381
- if (moduleTargetLower, callableTarget) not in setImportsExisting:
382
- newImport = ast.ImportFrom(
383
- module=moduleTarget,
384
- names=[ast.alias(name=callableTarget, asname=None)],
385
- level=0
386
- )
387
- treeInit.body.append(newImport)
388
- ast.fix_missing_locations(treeInit)
389
- pathDunderInit.write_text(ast.unparse(treeInit))
390
-
391
- self.listTuplesDunderInit.append((pathDunderInit, callableTarget))
392
-
393
- return self.listTuplesDunderInit
394
-
395
- def inlineMapFoldingNumba(listCallablesAsStr: List[str], algorithmSource: Optional[ModuleType] = None):
396
- """Synthesizes numba-optimized versions of map folding functions.
397
- This function creates specialized versions of map folding functions by inlining
398
- target callables and generating optimized modules. It handles the code generation
399
- and file writing process.
367
+ def makeDispatcherNumba(codeSource: str, callableTarget: str, listStuffYouOughtaKnow: List[youOughtaKnow]) -> str:
368
+ """Creates AST for the dispatcher module that coordinates the optimized functions."""
369
+ docstringDispatcherNumba = """
370
+ What in tarnation is this stupid module and function?
371
+
372
+ - This function is not in the same module as `countFolds` so that we can delay Numba just-in-time (jit) compilation of this function and the finalization of its settings until we are ready.
373
+ - This function is not in the same module as the next function, which does the hard work, so that we can delay `numba.jit` compilation of the next function.
374
+ - This function is "jitted" but the next function is super jitted, which makes it too arrogant to talk to plebian Python functions. It will, however, reluctantly talk to basic jitted functions.
375
+ - So this module can talk to the next function, and because this module isn't as arrogant, it will talk to the low-class `countFolds` that called this function. Well, with a few restrictions, of course:
376
+ - No `TypedDict`
377
+ - The plebs must clean up their own memory problems
378
+ - No oversized integers
379
+ - No global variables, only global constants
380
+ - It won't accept pleb nonlocal variables either
381
+ - Python "class": they are all inferior to the jit class
382
+ - No `**kwargs`
383
+ - and just a few dozen-jillion other things.
384
+ """
400
385
 
401
- Parameters:
402
- listCallablesAsStr (List[str]): List of callable names to be processed as strings.
403
- algorithmSource (Optional[ModuleType], optional): Source module containing the algorithms.
404
- If None, will be obtained via getAlgorithmSource(). Defaults to None.
386
+ # Parse source code
387
+ sourceAST = ast.parse(codeSource)
405
388
 
406
- Returns:
407
- List[Tuple[pathlib.Path, str]]: List of tuples containing:
408
- - Generated file paths
409
- - Associated callable names
389
+ # Extract imports and target function definition
390
+ importsAST = [node for node in sourceAST.body if isinstance(node, (ast.Import, ast.ImportFrom))]
391
+ FunctionDefTarget = next((node for node in sourceAST.body if isinstance(node, ast.FunctionDef) and node.name == callableTarget), None)
410
392
 
411
- Raises:
412
- Exception: If inline operation fails during code generation.
393
+ if not FunctionDefTarget:
394
+ raise ValueError(f"Could not find function {callableTarget} in source code")
395
+
396
+ # Zero-out the decorator list
397
+ FunctionDefTarget.decorator_list=[]
398
+ # TODO: more explicit handling of decorators. I'm able to ignore this because I know `algorithmSource` doesn't have any decorators.
399
+ # FunctionDefTargetDecorators = [decorator for decorator in FunctionDefTarget.decorator_list]
400
+
401
+ # Add Numba decorator
402
+ FunctionDefTarget = decorateCallableWithNumba(FunctionDefTarget, parallel=False)
403
+ FunctionDefTarget.body.insert(0, ast.Expr(value=ast.Constant(value=docstringDispatcherNumba)))
404
+
405
+ # Combine everything into a module
406
+ moduleAST = ast.Module(
407
+ body=cast(List[ast.stmt]
408
+ , importsAST
409
+ + [Don_Lapre_The_Road_to_Self_Improvement_For_Programmers_by_Using_Short_Identifiers.astForCompetentProgrammers
410
+ for Don_Lapre_The_Road_to_Self_Improvement_For_Programmers_by_Using_Short_Identifiers in listStuffYouOughtaKnow]
411
+ + [FunctionDefTarget])
412
+ , type_ignores=[]
413
+ )
414
+
415
+ ast.fix_missing_locations(moduleAST)
416
+ return ast.unparse(moduleAST)
417
+
418
+ def makeNumbaOptimizedFlow(listCallablesInline: List[str], callableDispatcher: Optional[str] = None, algorithmSource: Optional[ModuleType] = None):
419
+ """Synthesizes numba-optimized versions of map folding functions."""
413
420
 
414
- Note:
415
- - Generated files are placed in a synthetic modules subdirectory
416
- - Modifies __init__.py files to expose generated modules
417
- - Current implementation contains hardcoded paths that should be abstracted
418
- """
419
421
  if not algorithmSource:
420
422
  algorithmSource = getAlgorithmSource()
421
423
 
422
- listPathFilenamesDestination: list[tuple[pathlib.Path, str]] = []
424
+ formatModuleNameDEFAULT = "numba_{callableTarget}"
425
+
426
+ # When I am a more competent programmer, I will make getPathFilenameWrite dependent on makeAstImport or vice versa,
427
+ # so the name of the physical file doesn't get out of whack with the name of the logical module.
428
+ def getPathFilenameWrite(callableTarget: str
429
+ , pathWrite: Optional[pathlib.Path] = None
430
+ , formatFilenameWrite: Optional[str] = None
431
+ ) -> pathlib.Path:
432
+ if not pathWrite:
433
+ pathWrite = getPathSyntheticModules()
434
+ if not formatFilenameWrite:
435
+ formatFilenameWrite = formatModuleNameDEFAULT + '.py'
436
+
437
+ pathFilename = pathWrite / formatFilenameWrite.format(callableTarget=callableTarget)
438
+ return pathFilename
439
+
440
+ def makeAstImport(callableTarget: str
441
+ , packageName: Optional[str] = None
442
+ , subPackageName: Optional[str] = None
443
+ , moduleName: Optional[str] = None
444
+ , astNodeLogicalPathThingy: Optional[ast.AST] = None
445
+ ) -> ast.ImportFrom:
446
+ """Creates import AST node for synthetic modules."""
447
+ if astNodeLogicalPathThingy is None:
448
+ if packageName is None:
449
+ packageName = myPackageNameIs
450
+ if subPackageName is None:
451
+ subPackageName = moduleOfSyntheticModules
452
+ if moduleName is None:
453
+ moduleName = formatModuleNameDEFAULT.format(callableTarget=callableTarget)
454
+ module=f'{packageName}.{subPackageName}.{moduleName}'
455
+ else:
456
+ module = str(astNodeLogicalPathThingy)
457
+ return ast.ImportFrom(
458
+ module=module,
459
+ names=[ast.alias(name=callableTarget, asname=None)],
460
+ level=0
461
+ )
462
+
463
+ listStuffYouOughtaKnow: List[youOughtaKnow] = []
464
+
465
+ for callableTarget in listCallablesInline:
466
+ codeSource = inspect.getsource(algorithmSource)
467
+ moduleSource = inlineOneCallable(codeSource, callableTarget)
468
+ if not moduleSource:
469
+ raise Exception("Pylance, OMG! The sky is falling!")
470
+
471
+ pathFilename = getPathFilenameWrite(callableTarget)
472
+ astImport = makeAstImport(callableTarget)
423
473
 
424
- # TODO abstract this process
425
- # especially remove the hardcoded paths and filenames
474
+ listStuffYouOughtaKnow.append(youOughtaKnow(
475
+ callableSynthesized=callableTarget,
476
+ pathFilenameForMe=pathFilename,
477
+ astForCompetentProgrammers=astImport
478
+ ))
479
+ pathFilename.write_text(moduleSource)
426
480
 
427
- for callableTarget in listCallablesAsStr:
481
+ # Generate dispatcher if requested
482
+ if callableDispatcher:
428
483
  codeSource = inspect.getsource(algorithmSource)
429
- moduleSource = inlineOneCallable(codeSource, callableTarget)
484
+ moduleSource = makeDispatcherNumba(codeSource, callableDispatcher, listStuffYouOughtaKnow)
430
485
  if not moduleSource:
431
486
  raise Exception("Pylance, OMG! The sky is falling!")
432
- pathFilenameAlgorithm = pathlib.Path(inspect.getfile(algorithmSource))
433
- pathFilenameDestination = pathFilenameAlgorithm.parent / relativePathSyntheticModules / pathFilenameAlgorithm.with_stem("numba"+callableTarget[5:None]).name
434
- pathFilenameDestination.write_text(moduleSource)
435
- listPathFilenamesDestination.append((pathFilenameDestination, callableTarget))
436
487
 
437
- # This almost works: it duplicates existing imports, though
438
- listTuplesDunderInit = AppendDunderInit(listPathFilenamesDestination).process_init_files()
488
+ pathFilename = getPathFilenameWrite(callableDispatcher)
489
+ astImport = makeAstImport(callableDispatcher)
490
+
491
+ listStuffYouOughtaKnow.append(youOughtaKnow(
492
+ callableSynthesized=callableDispatcher,
493
+ pathFilenameForMe=pathFilename,
494
+ astForCompetentProgrammers=astImport
495
+ ))
496
+ pathFilename.write_text(moduleSource)
439
497
 
440
498
  if __name__ == '__main__':
441
- listCallablesAsStr: List[str] = ['countInitialize', 'countParallel', 'countSequential']
442
499
  setDatatypeModule('numpy', sourGrapes=True)
443
500
  setDatatypeFoldsTotal('int64', sourGrapes=True)
444
501
  setDatatypeElephino('uint8', sourGrapes=True)
445
502
  setDatatypeLeavesTotal('uint8', sourGrapes=True)
446
- inlineMapFoldingNumba(listCallablesAsStr)
503
+ listCallablesInline: List[str] = ['countInitialize', 'countParallel', 'countSequential']
504
+ callableDispatcher = 'doTheNeedful'
505
+ makeNumbaOptimizedFlow(listCallablesInline, callableDispatcher)
506
+ # makeNumbaOptimizedFlow(listCallablesInline)
@@ -0,0 +1,3 @@
1
+ from .numba_countInitialize import countInitialize
2
+ from .numba_countParallel import countParallel
3
+ from .numba_countSequential import countSequential
@@ -1,14 +1,12 @@
1
+ from numpy.typing import NDArray
1
2
  import numpy
2
- from typing import Any, Tuple
3
3
  from numpy import integer
4
- from mapFolding import indexMy, indexTrack
5
4
  import numba
5
+ from mapFolding import indexMy, indexTrack
6
+ from typing import Any, Tuple
6
7
 
7
8
  @numba.jit((numba.uint8[:, :, ::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]))
8
- def countInitialize(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]]
9
- , gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
10
- , my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
11
- , track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
9
+ def countInitialize(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
12
10
  while my[indexMy.leaf1ndex.value]:
13
11
  if my[indexMy.leaf1ndex.value] <= 1 or track[indexTrack.leafBelow.value, 0] == 1:
14
12
  my[indexMy.dimensionsUnconstrained.value] = my[indexMy.dimensionsTotal.value]
@@ -48,4 +46,4 @@ def countInitialize(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.d
48
46
  track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value]] = my[indexMy.gap1ndex.value]
49
47
  my[indexMy.leaf1ndex.value] += 1
50
48
  if my[indexMy.gap1ndex.value] > 0:
51
- return
49
+ return
@@ -1,11 +1,12 @@
1
- from typing import Any, Tuple
2
- from numpy import integer
1
+ from numpy.typing import NDArray
3
2
  import numpy
4
- from mapFolding import indexMy, indexTrack
3
+ from numpy import integer
5
4
  import numba
5
+ from mapFolding import indexMy, indexTrack
6
+ from typing import Any, Tuple
6
7
 
7
8
  @numba.jit((numba.uint8[:, :, ::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=False, inline='never', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=True)
8
- def countParallel(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
9
+ def countParallel(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
9
10
  gapsWherePARALLEL = gapsWhere.copy()
10
11
  myPARALLEL = my.copy()
11
12
  trackPARALLEL = track.copy()
@@ -1,11 +1,12 @@
1
- from typing import Any, Tuple
2
- from numpy import integer
1
+ from numpy.typing import NDArray
3
2
  import numpy
4
- from mapFolding import indexMy, indexTrack
3
+ from numpy import integer
5
4
  import numba
5
+ from mapFolding import indexMy, indexTrack
6
+ from typing import Any, Tuple
6
7
 
7
8
  @numba.jit((numba.uint8[:, :, ::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=False, inline='never', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=False)
8
- def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
9
+ def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
9
10
  leafBelow = track[indexTrack.leafBelow.value]
10
11
  gapRangeStart = track[indexTrack.gapRangeStart.value]
11
12
  countDimensionsGapped = track[indexTrack.countDimensionsGapped.value]
@@ -0,0 +1,33 @@
1
+ from mapFolding import indexMy, indexTrack
2
+ from numpy import integer
3
+ from numpy.typing import NDArray
4
+ from typing import Any, Tuple
5
+ import numba
6
+ import numpy
7
+ from mapFolding.syntheticModules.numba_countInitialize import countInitialize
8
+ from mapFolding.syntheticModules.numba_countParallel import countParallel
9
+ from mapFolding.syntheticModules.numba_countSequential import countSequential
10
+
11
+ @numba.jit((numba.uint8[:, :, ::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]))
12
+ def doTheNeedful(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], mapShape: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
13
+ """
14
+ What in tarnation is this stupid module and function?
15
+
16
+ - This function is not in the same module as `countFolds` so that we can delay Numba just-in-time (jit) compilation of this function and the finalization of its settings until we are ready.
17
+ - This function is not in the same module as the next function, which does the hard work, so that we can delay `numba.jit` compilation of the next function.
18
+ - This function is "jitted" but the next function is super jitted, which makes it too arrogant to talk to plebian Python functions. It will, however, reluctantly talk to basic jitted functions.
19
+ - So this module can talk to the next function, and because this module isn't as arrogant, it will talk to the low-class `countFolds` that called this function. Well, with a few restrictions, of course:
20
+ - No `TypedDict`
21
+ - The plebs must clean up their own memory problems
22
+ - No oversized integers
23
+ - No global variables, only global constants
24
+ - It won't accept pleb nonlocal variables either
25
+ - Python "class": they are all inferior to the jit class
26
+ - No `**kwargs`
27
+ - and just a few dozen-jillion other things.
28
+ """
29
+ countInitialize(connectionGraph, gapsWhere, my, track)
30
+ if my[indexMy.taskDivisions.value] > 0:
31
+ countParallel(connectionGraph, foldGroups, gapsWhere, my, track)
32
+ else:
33
+ countSequential(connectionGraph, foldGroups, gapsWhere, my, track)
mapFolding/theDao.py ADDED
@@ -0,0 +1,214 @@
1
+ from mapFolding import indexMy, indexTrack
2
+ from numpy import integer
3
+ from numpy.typing import NDArray
4
+ from typing import Any, Tuple
5
+ import numba
6
+ import numpy
7
+
8
+ def activeGapIncrement(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> None:
9
+ my[indexMy.gap1ndex.value] += 1
10
+
11
+ def activeLeafGreaterThan0Condition(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
12
+ return my[indexMy.leaf1ndex.value]
13
+
14
+ def activeLeafGreaterThanLeavesTotalCondition(foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
15
+ return my[indexMy.leaf1ndex.value] > foldGroups[-1]
16
+
17
+ def activeLeafIsTheFirstLeafCondition(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
18
+ return my[indexMy.leaf1ndex.value] <= 1
19
+
20
+ def allDimensionsAreUnconstrained(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
21
+ return not my[indexMy.dimensionsUnconstrained.value]
22
+
23
+ def backtrack(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
24
+ my[indexMy.leaf1ndex.value] -= 1
25
+ track[indexTrack.leafBelow.value, track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]] = track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]]
26
+ track[indexTrack.leafAbove.value, track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]]] = track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]
27
+
28
+ def backtrackCondition(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> Any:
29
+ return my[indexMy.leaf1ndex.value] and my[indexMy.gap1ndex.value] == track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value] - 1]
30
+
31
+ def gap1ndexCeilingIncrement(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> None:
32
+ my[indexMy.gap1ndexCeiling.value] += 1
33
+
34
+ def countGaps(gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
35
+ gapsWhere[my[indexMy.gap1ndexCeiling.value]] = my[indexMy.leafConnectee.value]
36
+ if track[indexTrack.countDimensionsGapped.value, my[indexMy.leafConnectee.value]] == 0:
37
+ gap1ndexCeilingIncrement(my=my)
38
+ track[indexTrack.countDimensionsGapped.value, my[indexMy.leafConnectee.value]] += 1
39
+
40
+ def dimension1ndexIncrement(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> None:
41
+ my[indexMy.indexDimension.value] += 1
42
+
43
+ def dimensionsUnconstrainedCondition(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
44
+ return connectionGraph[my[indexMy.indexDimension.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]] == my[indexMy.leaf1ndex.value]
45
+
46
+ def dimensionsUnconstrainedDecrement(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> None:
47
+ my[indexMy.dimensionsUnconstrained.value] -= 1
48
+
49
+ def filterCommonGaps(gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
50
+ gapsWhere[my[indexMy.gap1ndex.value]] = gapsWhere[my[indexMy.indexMiniGap.value]]
51
+ if track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] == my[indexMy.dimensionsUnconstrained.value]:
52
+ activeGapIncrement(my=my)
53
+ track[indexTrack.countDimensionsGapped.value, gapsWhere[my[indexMy.indexMiniGap.value]]] = 0
54
+
55
+ def findGapsInitializeVariables(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
56
+ my[indexMy.dimensionsUnconstrained.value] = my[indexMy.dimensionsTotal.value]
57
+ my[indexMy.gap1ndexCeiling.value] = track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value] - 1]
58
+ my[indexMy.indexDimension.value] = 0
59
+
60
+ def indexMiniGapIncrement(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> None:
61
+ my[indexMy.indexMiniGap.value] += 1
62
+
63
+ def indexMiniGapInitialization(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> None:
64
+ my[indexMy.indexMiniGap.value] = my[indexMy.gap1ndex.value]
65
+
66
+ def insertUnconstrainedLeaf(gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> None:
67
+ my[indexMy.indexLeaf.value] = 0
68
+ while my[indexMy.indexLeaf.value] < my[indexMy.leaf1ndex.value]:
69
+ gapsWhere[my[indexMy.gap1ndexCeiling.value]] = my[indexMy.indexLeaf.value]
70
+ my[indexMy.gap1ndexCeiling.value] += 1
71
+ my[indexMy.indexLeaf.value] += 1
72
+
73
+ def leafBelowSentinelIs1Condition(track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> Any:
74
+ return track[indexTrack.leafBelow.value, 0] == 1
75
+
76
+ def leafConnecteeInitialization(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> None:
77
+ my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.indexDimension.value], my[indexMy.leaf1ndex.value], my[indexMy.leaf1ndex.value]]
78
+
79
+ def leafConnecteeUpdate(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
80
+ my[indexMy.leafConnectee.value] = connectionGraph[my[indexMy.indexDimension.value], my[indexMy.leaf1ndex.value], track[indexTrack.leafBelow.value, my[indexMy.leafConnectee.value]]]
81
+
82
+ def loopingLeavesConnectedToActiveLeaf(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
83
+ return my[indexMy.leafConnectee.value] != my[indexMy.leaf1ndex.value]
84
+
85
+ def loopingTheDimensions(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
86
+ return my[indexMy.indexDimension.value] < my[indexMy.dimensionsTotal.value]
87
+
88
+ def loopingToActiveGapCeiling(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
89
+ return my[indexMy.indexMiniGap.value] < my[indexMy.gap1ndexCeiling.value]
90
+
91
+ def placeLeaf(gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
92
+ my[indexMy.gap1ndex.value] -= 1
93
+ track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]] = gapsWhere[my[indexMy.gap1ndex.value]]
94
+ track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]] = track[indexTrack.leafBelow.value, track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]]
95
+ track[indexTrack.leafBelow.value, track[indexTrack.leafAbove.value, my[indexMy.leaf1ndex.value]]] = my[indexMy.leaf1ndex.value]
96
+ track[indexTrack.leafAbove.value, track[indexTrack.leafBelow.value, my[indexMy.leaf1ndex.value]]] = my[indexMy.leaf1ndex.value]
97
+ track[indexTrack.gapRangeStart.value, my[indexMy.leaf1ndex.value]] = my[indexMy.gap1ndex.value]
98
+ my[indexMy.leaf1ndex.value] += 1
99
+
100
+ def placeLeafCondition(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
101
+ return my[indexMy.leaf1ndex.value]
102
+
103
+ def thereAreComputationDivisionsYouMightSkip(my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]) -> Any:
104
+ return my[indexMy.leaf1ndex.value] != my[indexMy.taskDivisions.value] or my[indexMy.leafConnectee.value] % my[indexMy.taskDivisions.value] == my[indexMy.taskIndex.value]
105
+
106
+ def countInitialize(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]]
107
+ , gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
108
+ , my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
109
+ , track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
110
+ while activeLeafGreaterThan0Condition(my=my):
111
+ if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
112
+ findGapsInitializeVariables(my=my, track=track)
113
+ while loopingTheDimensions(my=my):
114
+ if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
115
+ dimensionsUnconstrainedDecrement(my=my)
116
+ else:
117
+ leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
118
+ while loopingLeavesConnectedToActiveLeaf(my=my):
119
+ countGaps(gapsWhere=gapsWhere, my=my, track=track)
120
+ leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
121
+ dimension1ndexIncrement(my=my)
122
+ if allDimensionsAreUnconstrained(my=my):
123
+ insertUnconstrainedLeaf(gapsWhere=gapsWhere, my=my)
124
+ indexMiniGapInitialization(my=my)
125
+ while loopingToActiveGapCeiling(my=my):
126
+ filterCommonGaps(gapsWhere=gapsWhere, my=my, track=track)
127
+ indexMiniGapIncrement(my=my)
128
+ if placeLeafCondition(my=my):
129
+ placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
130
+ if my[indexMy.gap1ndex.value] > 0:
131
+ return
132
+
133
+ def countParallel(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]]
134
+ , foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
135
+ , gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
136
+ , my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
137
+ , track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
138
+ gapsWherePARALLEL = gapsWhere.copy()
139
+ myPARALLEL = my.copy()
140
+ trackPARALLEL = track.copy()
141
+ taskDivisionsPrange = myPARALLEL[indexMy.taskDivisions.value]
142
+ for indexSherpa in numba.prange(taskDivisionsPrange):
143
+ groupsOfFolds: int = 0
144
+ gapsWhere = gapsWherePARALLEL.copy()
145
+ my = myPARALLEL.copy()
146
+ my[indexMy.taskIndex.value] = indexSherpa
147
+ track = trackPARALLEL.copy()
148
+ while activeLeafGreaterThan0Condition(my=my):
149
+ if activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track):
150
+ if activeLeafGreaterThanLeavesTotalCondition(foldGroups=foldGroups, my=my):
151
+ groupsOfFolds += 1
152
+ else:
153
+ findGapsInitializeVariables(my=my, track=track)
154
+ while loopingTheDimensions(my=my):
155
+ if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
156
+ dimensionsUnconstrainedDecrement(my=my)
157
+ else:
158
+ leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
159
+ while loopingLeavesConnectedToActiveLeaf(my=my):
160
+ if thereAreComputationDivisionsYouMightSkip(my=my):
161
+ countGaps(gapsWhere=gapsWhere, my=my, track=track)
162
+ leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
163
+ dimension1ndexIncrement(my=my)
164
+ indexMiniGapInitialization(my=my)
165
+ while loopingToActiveGapCeiling(my=my):
166
+ filterCommonGaps(gapsWhere=gapsWhere, my=my, track=track)
167
+ indexMiniGapIncrement(my=my)
168
+ while backtrackCondition(my=my, track=track):
169
+ backtrack(my=my, track=track)
170
+ if placeLeafCondition(my=my):
171
+ placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
172
+ foldGroups[my[indexMy.taskIndex.value]] = groupsOfFolds
173
+
174
+ def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
175
+ groupsOfFolds: int = 0
176
+ doFindGaps = True
177
+ while activeLeafGreaterThan0Condition(my=my):
178
+ if ((doFindGaps := activeLeafIsTheFirstLeafCondition(my=my) or leafBelowSentinelIs1Condition(track=track))
179
+ and activeLeafGreaterThanLeavesTotalCondition(foldGroups=foldGroups, my=my)):
180
+ groupsOfFolds += 1
181
+ elif doFindGaps:
182
+ findGapsInitializeVariables(my=my, track=track)
183
+ while loopingTheDimensions(my=my):
184
+ if dimensionsUnconstrainedCondition(connectionGraph=connectionGraph, my=my):
185
+ dimensionsUnconstrainedDecrement(my=my)
186
+ else:
187
+ leafConnecteeInitialization(connectionGraph=connectionGraph, my=my)
188
+ while loopingLeavesConnectedToActiveLeaf(my=my):
189
+ countGaps(gapsWhere=gapsWhere, my=my, track=track)
190
+ leafConnecteeUpdate(connectionGraph=connectionGraph, my=my, track=track)
191
+ dimension1ndexIncrement(my=my)
192
+ indexMiniGapInitialization(my=my)
193
+ while loopingToActiveGapCeiling(my=my):
194
+ filterCommonGaps(gapsWhere=gapsWhere, my=my, track=track)
195
+ indexMiniGapIncrement(my=my)
196
+ while backtrackCondition(my=my, track=track):
197
+ backtrack(my=my, track=track)
198
+ if placeLeafCondition(my=my):
199
+ placeLeaf(gapsWhere=gapsWhere, my=my, track=track)
200
+ foldGroups[my[indexMy.taskIndex.value]] = groupsOfFolds
201
+
202
+ def doTheNeedful(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]]
203
+ , foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
204
+ , gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
205
+ , mapShape: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
206
+ , my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]]
207
+ , track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]
208
+ ) -> None:
209
+ countInitialize(connectionGraph, gapsWhere, my, track)
210
+
211
+ if my[indexMy.taskDivisions.value] > 0:
212
+ countParallel(connectionGraph, foldGroups, gapsWhere, my, track)
213
+ else:
214
+ countSequential(connectionGraph, foldGroups, gapsWhere, my, track)