mapFolding 0.3.7__py3-none-any.whl → 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. mapFolding/__init__.py +38 -0
  2. mapFolding/basecamp.py +55 -0
  3. mapFolding/beDRY.py +364 -0
  4. mapFolding/oeis.py +329 -0
  5. mapFolding/someAssemblyRequired/makeJob.py +62 -0
  6. mapFolding/someAssemblyRequired/synthesizeModuleJAX.py +29 -0
  7. someAssemblyRequired/synthesizeModuleJob.py → mapFolding/someAssemblyRequired/synthesizeModuleJobNumba.py +81 -27
  8. mapFolding/someAssemblyRequired/synthesizeModulesNumba.py +506 -0
  9. mapFolding/syntheticModules/__init__.py +3 -0
  10. syntheticModules/Initialize.py → mapFolding/syntheticModules/numba_countInitialize.py +5 -4
  11. syntheticModules/Parallel.py → mapFolding/syntheticModules/numba_countParallel.py +10 -5
  12. syntheticModules/Sequential.py → mapFolding/syntheticModules/numba_countSequential.py +5 -4
  13. mapFolding/syntheticModules/numba_doTheNeedful.py +33 -0
  14. mapFolding/theDao.py +214 -0
  15. mapFolding/theSSOT.py +269 -0
  16. mapFolding-0.3.9.dist-info/LICENSE +407 -0
  17. {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/METADATA +9 -5
  18. mapFolding-0.3.9.dist-info/RECORD +40 -0
  19. mapFolding-0.3.9.dist-info/top_level.txt +2 -0
  20. tests/__init__.py +1 -0
  21. tests/conftest.py +224 -0
  22. tests/conftest_tmpRegistry.py +62 -0
  23. tests/conftest_uniformTests.py +53 -0
  24. tests/test_oeis.py +200 -0
  25. tests/test_other.py +258 -0
  26. tests/test_tasks.py +44 -0
  27. tests/test_types.py +5 -0
  28. benchmarks/benchmarking.py +0 -67
  29. citations/updateCitation.py +0 -238
  30. mapFolding-0.3.7.dist-info/RECORD +0 -25
  31. mapFolding-0.3.7.dist-info/top_level.txt +0 -5
  32. someAssemblyRequired/makeJob.py +0 -34
  33. someAssemblyRequired/synthesizeModules.py +0 -216
  34. syntheticModules/__init__.py +0 -4
  35. {reference → mapFolding/reference}/flattened.py +0 -0
  36. {reference → mapFolding/reference}/hunterNumba.py +0 -0
  37. {reference → mapFolding/reference}/irvineJavaPort.py +0 -0
  38. {reference → mapFolding/reference}/jax.py +0 -0
  39. {reference → mapFolding/reference}/lunnan.py +0 -0
  40. {reference → mapFolding/reference}/lunnanNumpy.py +0 -0
  41. {reference → mapFolding/reference}/lunnanWhile.py +0 -0
  42. {reference → mapFolding/reference}/rotatedEntryPoint.py +0 -0
  43. {reference → mapFolding/reference}/total_countPlus1vsPlusN.py +0 -0
  44. {someAssemblyRequired → mapFolding/someAssemblyRequired}/__init__.py +0 -0
  45. {someAssemblyRequired → mapFolding/someAssemblyRequired}/getLLVMforNoReason.py +0 -0
  46. {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/WHEEL +0 -0
  47. {mapFolding-0.3.7.dist-info → mapFolding-0.3.9.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,506 @@
1
+ from mapFolding import (
2
+ EnumIndices,
3
+ getAlgorithmSource,
4
+ getPathPackage,
5
+ getPathSyntheticModules,
6
+ hackSSOTdatatype,
7
+ hackSSOTdtype,
8
+ indexMy,
9
+ indexTrack,
10
+ moduleOfSyntheticModules,
11
+ myPackageNameIs,
12
+ ParametersNumba,
13
+ parametersNumbaDEFAULT,
14
+ setDatatypeElephino,
15
+ setDatatypeFoldsTotal,
16
+ setDatatypeLeavesTotal,
17
+ setDatatypeModule,
18
+ )
19
+ from typing import cast, Dict, List, Optional, Sequence, Set, Tuple, Type, Union
20
+ from types import ModuleType
21
+ from collections import namedtuple
22
+ import ast
23
+ import inspect
24
+ import numba
25
+ import numpy
26
+ import pathlib
27
+
28
+ youOughtaKnow = namedtuple('youOughtaKnow', ['callableSynthesized', 'pathFilenameForMe', 'astForCompetentProgrammers'])
29
+
30
+ """TODO
31
+ Convert types
32
+ e.g. `groupsOfFolds: int = 0` to `groupsOfFolds = numba.types.{datatypeLarge}(0)`
33
+ This isn't necessary for Numba, but I may the infrastructure for other compilers or paradigms."""
34
+
35
+ class RecursiveInliner(ast.NodeTransformer):
36
+ """
37
+ Class RecursiveInliner:
38
+ A custom AST NodeTransformer designed to recursively inline function calls from a given dictionary
39
+ of function definitions into the AST. Once a particular function has been inlined, it is marked
40
+ as completed to avoid repeated inlining. This transformation modifies the AST in-place by substituting
41
+ eligible function calls with the body of their corresponding function.
42
+ Attributes:
43
+ dictionaryFunctions (Dict[str, ast.FunctionDef]):
44
+ A mapping of function name to its AST definition, used as a source for inlining.
45
+ callablesCompleted (Set[str]):
46
+ A set to track function names that have already been inlined to prevent multiple expansions.
47
+ Methods:
48
+ inlineFunctionBody(callableTargetName: str) -> Optional[ast.FunctionDef]:
49
+ Retrieves the AST definition for a given function name from dictionaryFunctions
50
+ and recursively inlines any function calls within it. Returns the function definition
51
+ that was inlined or None if the function was already processed.
52
+ visit_Call(callNode: ast.Call) -> ast.AST:
53
+ Inspects calls within the AST. If a function call matches one in dictionaryFunctions,
54
+ it is replaced by the inlined body. If the last statement in the inlined body is a return
55
+ or an expression, that value or expression is substituted; otherwise, a constant is returned.
56
+ visit_Expr(node: ast.Expr) -> Union[ast.AST, List[ast.AST]]:
57
+ Handles expression nodes in the AST. If the expression is a function call from
58
+ dictionaryFunctions, its statements are expanded in place, effectively inlining
59
+ the called function's statements into the surrounding context.
60
+ """
61
+ def __init__(self, dictionaryFunctions: Dict[str, ast.FunctionDef]):
62
+ self.dictionaryFunctions = dictionaryFunctions
63
+ self.callablesCompleted: Set[str] = set()
64
+
65
+ def inlineFunctionBody(self, callableTargetName: str) -> Optional[ast.FunctionDef]:
66
+ if (callableTargetName in self.callablesCompleted):
67
+ return None
68
+
69
+ self.callablesCompleted.add(callableTargetName)
70
+ inlineDefinition = self.dictionaryFunctions[callableTargetName]
71
+ for astNode in ast.walk(inlineDefinition):
72
+ self.visit(astNode)
73
+ return inlineDefinition
74
+
75
+ def visit_Call(self, node: ast.Call) -> ast.AST:
76
+ callNodeVisited = self.generic_visit(node)
77
+ if (isinstance(callNodeVisited, ast.Call) and isinstance(callNodeVisited.func, ast.Name) and callNodeVisited.func.id in self.dictionaryFunctions):
78
+ inlineDefinition = self.inlineFunctionBody(callNodeVisited.func.id)
79
+ if (inlineDefinition and inlineDefinition.body):
80
+ statementTerminating = inlineDefinition.body[-1]
81
+ if (isinstance(statementTerminating, ast.Return) and statementTerminating.value is not None):
82
+ return self.visit(statementTerminating.value)
83
+ elif (isinstance(statementTerminating, ast.Expr) and statementTerminating.value is not None):
84
+ return self.visit(statementTerminating.value)
85
+ return ast.Constant(value=None)
86
+ return callNodeVisited
87
+
88
+ def visit_Expr(self, node: ast.Expr) -> Union[ast.AST, List[ast.AST]]:
89
+ if (isinstance(node.value, ast.Call)):
90
+ if (isinstance(node.value.func, ast.Name) and node.value.func.id in self.dictionaryFunctions):
91
+ inlineDefinition = self.inlineFunctionBody(node.value.func.id)
92
+ if (inlineDefinition):
93
+ return [self.visit(stmt) for stmt in inlineDefinition.body]
94
+ return self.generic_visit(node)
95
+
96
+ def decorateCallableWithNumba(astCallable: ast.FunctionDef, parallel: bool=False) -> ast.FunctionDef:
97
+ """
98
+ Decorates an AST function definition with Numba JIT compilation parameters.
99
+
100
+ This function processes an AST FunctionDef node and adds Numba-specific decorators
101
+ for JIT compilation. It handles array parameter typing and compilation options.
102
+
103
+ Parameters
104
+ ----------
105
+
106
+ astCallable : ast.FunctionDef
107
+ The AST node representing the function to be decorated with Numba JIT.
108
+ parallel : bool, optional
109
+ Whether to enable parallel execution in Numba compilation.
110
+ Default is False.
111
+
112
+ Returns
113
+ -------
114
+
115
+ ast.FunctionDef
116
+ The modified AST function definition node with added Numba decorators.
117
+
118
+ Notes
119
+ -----
120
+ The function performs the following main tasks:
121
+ 1. Processes function parameters to create Numba-compatible type signatures
122
+ 2. Constructs appropriate Numba compilation parameters
123
+ 3. Creates and attaches a @numba.jit decorator to the function
124
+ Special handling is included for the 'countInitialize' function, which receives
125
+ empty compilation parameters.
126
+ The function relies on external parameters:
127
+ - parametersNumbaDEFAULT: Default Numba compilation parameters
128
+ - ParametersNumba: Class/type for handling Numba parameters
129
+ - hackSSOTdatatype: Function for determining default datatypes
130
+ """
131
+ def makeNumbaParameterSignatureElement(signatureElement: ast.arg):
132
+ """
133
+ Converts an AST function parameter signature element into a Numba-compatible type annotation.
134
+
135
+ This function processes parameter annotations for array types, handling both shape and datatype
136
+ specifications. It supports multi-dimensional arrays through tuple-based shape definitions and
137
+ various numeric datatypes.
138
+
139
+ Parameters
140
+ ----------
141
+ signatureElement : ast.arg
142
+ The AST argument node containing the parameter's name and type annotation.
143
+ Expected annotation format: Type[shape_tuple, dtype]
144
+ where shape_tuple can be either a single dimension or a tuple of dimensions,
145
+ and dtype specifies the data type.
146
+
147
+ Returns
148
+ -------
149
+ ast.Subscript
150
+ A Numba-compatible type annotation as an AST node, representing an array type
151
+ with the specified shape and datatype.
152
+
153
+ Notes
154
+ -----
155
+ The function handles two main cases for shape specifications:
156
+ 1. Multi-dimensional arrays with tuple-based shapes
157
+ 2. Single-dimension arrays with simple slice notation
158
+ The datatype can be either explicitly specified in the annotation or determined
159
+ through a fallback mechanism using hackSSOTdatatype().
160
+ """
161
+ if isinstance(signatureElement.annotation, ast.Subscript) and isinstance(signatureElement.annotation.slice, ast.Tuple):
162
+ annotationShape = signatureElement.annotation.slice.elts[0]
163
+ if isinstance(annotationShape, ast.Subscript) and isinstance(annotationShape.slice, ast.Tuple):
164
+ shapeAsListSlices: Sequence[ast.expr] = [ast.Slice() for axis in range(len(annotationShape.slice.elts))]
165
+ shapeAsListSlices[-1] = ast.Slice(step=ast.Constant(value=1))
166
+ shapeAST = ast.Tuple(elts=list(shapeAsListSlices), ctx=ast.Load())
167
+ else:
168
+ shapeAST = ast.Slice(step=ast.Constant(value=1))
169
+
170
+ annotationDtype = signatureElement.annotation.slice.elts[1]
171
+ if (isinstance(annotationDtype, ast.Subscript) and isinstance(annotationDtype.slice, ast.Attribute)):
172
+ datatypeAST = annotationDtype.slice.attr
173
+ else:
174
+ datatypeAST = None
175
+
176
+ ndarrayName = signatureElement.arg
177
+ Z0Z_hacky_dtype = hackSSOTdatatype(ndarrayName)
178
+ datatype_attr = datatypeAST or Z0Z_hacky_dtype
179
+
180
+ datatypeNumba = ast.Attribute(value=ast.Name(id='numba', ctx=ast.Load()), attr=datatype_attr, ctx=ast.Load())
181
+
182
+ return ast.Subscript(value=datatypeNumba, slice=shapeAST, ctx=ast.Load())
183
+
184
+ # TODO: more explicit handling of decorators. I'm able to ignore this because I know `algorithmSource` doesn't have any decorators.
185
+ # callableSourceDecorators = [decorator for decorator in callableInlined.decorator_list]
186
+
187
+ listNumbaParameterSignature: Sequence[ast.expr] = []
188
+ for parameter in astCallable.args.args:
189
+ signatureElement = makeNumbaParameterSignatureElement(parameter)
190
+ if (signatureElement):
191
+ listNumbaParameterSignature.append(signatureElement)
192
+
193
+ astArgsNumbaSignature = ast.Tuple(elts=listNumbaParameterSignature, ctx=ast.Load())
194
+
195
+ if astCallable.name == 'countInitialize' or astCallable.name == 'doTheNeedful':
196
+ parametersNumba = {}
197
+ else:
198
+ parametersNumba = parametersNumbaDEFAULT if not parallel else ParametersNumba({**parametersNumbaDEFAULT, 'parallel': True})
199
+ listKeywordsNumbaSignature = [ast.keyword(arg=parameterName, value=ast.Constant(value=parameterValue)) for parameterName, parameterValue in parametersNumba.items()]
200
+
201
+ astDecoratorNumba = ast.Call(func=ast.Attribute(value=ast.Name(id='numba', ctx=ast.Load()), attr='jit', ctx=ast.Load()), args=[astArgsNumbaSignature], keywords=listKeywordsNumbaSignature)
202
+
203
+ astCallable.decorator_list = [astDecoratorNumba]
204
+ return astCallable
205
+
206
+ class UnpackArrayAccesses(ast.NodeTransformer):
207
+ """
208
+ A class that transforms array accesses using enum indices into local variables.
209
+
210
+ This AST transformer identifies array accesses using enum indices and replaces them
211
+ with local variables, adding initialization statements at the start of functions.
212
+
213
+ Parameters:
214
+ enumIndexClass (Type[EnumIndices]): The enum class used for array indexing
215
+ arrayName (str): The name of the array being accessed
216
+
217
+ Attributes:
218
+ enumIndexClass (Type[EnumIndices]): Stored enum class for index lookups
219
+ arrayName (str): Name of the array being transformed
220
+ substitutions (dict): Tracks variable substitutions and their original nodes
221
+
222
+ The transformer handles two main cases:
223
+ 1. Scalar array access - array[EnumIndices.MEMBER]
224
+ 2. Array slice access - array[EnumIndices.MEMBER, other_indices...]
225
+ For each identified access pattern, it:
226
+ 1. Creates a local variable named after the enum member
227
+ 2. Adds initialization code at function start
228
+ 3. Replaces original array access with the local variable
229
+ """
230
+
231
+ def __init__(self, enumIndexClass: Type[EnumIndices], arrayName: str):
232
+ self.enumIndexClass = enumIndexClass
233
+ self.arrayName = arrayName
234
+ self.substitutions = {}
235
+
236
+ def extract_member_name(self, node: ast.AST) -> Optional[str]:
237
+ """Recursively extract enum member name from any node in the AST."""
238
+ if isinstance(node, ast.Attribute) and node.attr == 'value':
239
+ innerAttribute = node.value
240
+ while isinstance(innerAttribute, ast.Attribute):
241
+ if (isinstance(innerAttribute.value, ast.Name) and innerAttribute.value.id == self.enumIndexClass.__name__):
242
+ return innerAttribute.attr
243
+ innerAttribute = innerAttribute.value
244
+ return None
245
+
246
+ def transform_slice_element(self, node: ast.AST) -> ast.AST:
247
+ """Transform any enum references within a slice element."""
248
+ if isinstance(node, ast.Subscript):
249
+ if isinstance(node.slice, ast.Attribute):
250
+ member_name = self.extract_member_name(node.slice)
251
+ if member_name:
252
+ return ast.Name(id=member_name, ctx=node.ctx)
253
+ elif isinstance(node, ast.Tuple):
254
+ # Handle tuple slices by transforming each element
255
+ return ast.Tuple(elts=cast(List[ast.expr], [self.transform_slice_element(elt) for elt in node.elts]), ctx=node.ctx)
256
+ elif isinstance(node, ast.Attribute):
257
+ member_name = self.extract_member_name(node)
258
+ if member_name:
259
+ return ast.Name(id=member_name, ctx=ast.Load())
260
+ return node
261
+
262
+ def visit_Subscript(self, node: ast.Subscript) -> ast.AST:
263
+ # Recursively visit any nested subscripts in value or slice
264
+ node.value = self.visit(node.value)
265
+ node.slice = self.visit(node.slice)
266
+ # If node.value is not our arrayName, just return node
267
+ if not (isinstance(node.value, ast.Name) and node.value.id == self.arrayName):
268
+ return node
269
+
270
+ # Handle scalar array access
271
+ if isinstance(node.slice, ast.Attribute):
272
+ memberName = self.extract_member_name(node.slice)
273
+ if memberName:
274
+ self.substitutions[memberName] = ('scalar', node)
275
+ return ast.Name(id=memberName, ctx=ast.Load())
276
+
277
+ # Handle array slice access
278
+ if isinstance(node.slice, ast.Tuple) and node.slice.elts:
279
+ firstElement = node.slice.elts[0]
280
+ memberName = self.extract_member_name(firstElement)
281
+ sliceRemainder = [self.visit(elem) for elem in node.slice.elts[1:]]
282
+ if memberName:
283
+ self.substitutions[memberName] = ('array', node)
284
+ if len(sliceRemainder) == 0:
285
+ return ast.Name(id=memberName, ctx=ast.Load())
286
+ return ast.Subscript(value=ast.Name(id=memberName, ctx=ast.Load()), slice=ast.Tuple(elts=sliceRemainder, ctx=ast.Load()) if len(sliceRemainder) > 1 else sliceRemainder[0], ctx=ast.Load())
287
+
288
+ # If single-element tuple, unwrap
289
+ if isinstance(node.slice, ast.Tuple) and len(node.slice.elts) == 1:
290
+ node.slice = node.slice.elts[0]
291
+
292
+ return node
293
+
294
+ def visit_FunctionDef(self, node: ast.FunctionDef) -> ast.FunctionDef:
295
+ node = cast(ast.FunctionDef, self.generic_visit(node))
296
+
297
+ initializations = []
298
+ for name, (kind, original_node) in self.substitutions.items():
299
+ if kind == 'scalar':
300
+ initializations.append(ast.Assign(targets=[ast.Name(id=name, ctx=ast.Store())], value=original_node))
301
+ else: # array
302
+ initializations.append(
303
+ ast.Assign(
304
+ targets=[ast.Name(id=name, ctx=ast.Store())],
305
+ value=ast.Subscript(value=ast.Name(id=self.arrayName, ctx=ast.Load()),
306
+ slice=ast.Attribute(value=ast.Attribute(
307
+ value=ast.Name(id=self.enumIndexClass.__name__, ctx=ast.Load()),
308
+ attr=name, ctx=ast.Load()), attr='value', ctx=ast.Load()), ctx=ast.Load())))
309
+
310
+ node.body = initializations + node.body
311
+ return node
312
+
313
+ def inlineOneCallable(codeSource: str, callableTarget: str):
314
+ """
315
+ Inlines a target callable function and its dependencies within the provided code source.
316
+
317
+ This function performs function inlining, optionally adds Numba decorators, and handles array access unpacking
318
+ for specific callable targets. It processes the source code through AST manipulation and returns the modified source.
319
+
320
+ Parameters:
321
+ codeSource (str): The source code containing the callable to be inlined.
322
+ callableTarget (str): The name of the callable function to be inlined. Special handling is provided for
323
+ 'countParallel', 'countInitialize', and 'countSequential'.
324
+
325
+ Returns:
326
+ str: The modified source code with the inlined callable and necessary imports.
327
+
328
+ The function performs the following operations:
329
+ 1. Parses the source code into an AST
330
+ 2. Extracts import statements and function definitions
331
+ 3. Inlines the target function using RecursiveInliner
332
+ 4. Applies Numba decoration if needed
333
+ 5. Handles special array access unpacking for 'countSequential'
334
+ 6. Reconstructs and returns the modified source code
335
+
336
+ Note:
337
+ - Special handling is provided for 'countParallel', 'countInitialize', and 'countSequential' targets
338
+ - For 'countSequential', additional array access unpacking is performed for 'my' and 'track' indices
339
+ - `UnpackArrayAccesses` would need modification to handle 'countParallel'
340
+ """
341
+
342
+ codeParsed: ast.Module = ast.parse(codeSource, type_comments=True)
343
+ codeSourceImportStatements = {statement for statement in codeParsed.body if isinstance(statement, (ast.Import, ast.ImportFrom))}
344
+ dictionaryFunctions = {statement.name: statement for statement in codeParsed.body if isinstance(statement, ast.FunctionDef)}
345
+ callableInlinerWorkhorse = RecursiveInliner(dictionaryFunctions)
346
+ callableInlined = callableInlinerWorkhorse.inlineFunctionBody(callableTarget)
347
+
348
+ if callableInlined:
349
+ ast.fix_missing_locations(callableInlined)
350
+ parallel = callableTarget == 'countParallel'
351
+ callableDecorated = decorateCallableWithNumba(callableInlined, parallel)
352
+
353
+ if callableTarget == 'countSequential':
354
+ unpackerMy = UnpackArrayAccesses(indexMy, 'my')
355
+ callableDecorated = cast(ast.FunctionDef, unpackerMy.visit(callableDecorated))
356
+ ast.fix_missing_locations(callableDecorated)
357
+
358
+ unpackerTrack = UnpackArrayAccesses(indexTrack, 'track')
359
+ callableDecorated = cast(ast.FunctionDef, unpackerTrack.visit(callableDecorated))
360
+ ast.fix_missing_locations(callableDecorated)
361
+
362
+ moduleAST = ast.Module(body=cast(List[ast.stmt], list(codeSourceImportStatements) + [callableDecorated]), type_ignores=[])
363
+ ast.fix_missing_locations(moduleAST)
364
+ moduleSource = ast.unparse(moduleAST)
365
+ return moduleSource
366
+
367
+ def makeDispatcherNumba(codeSource: str, callableTarget: str, listStuffYouOughtaKnow: List[youOughtaKnow]) -> str:
368
+ """Creates AST for the dispatcher module that coordinates the optimized functions."""
369
+ docstringDispatcherNumba = """
370
+ What in tarnation is this stupid module and function?
371
+
372
+ - This function is not in the same module as `countFolds` so that we can delay Numba just-in-time (jit) compilation of this function and the finalization of its settings until we are ready.
373
+ - This function is not in the same module as the next function, which does the hard work, so that we can delay `numba.jit` compilation of the next function.
374
+ - This function is "jitted" but the next function is super jitted, which makes it too arrogant to talk to plebian Python functions. It will, however, reluctantly talk to basic jitted functions.
375
+ - So this module can talk to the next function, and because this module isn't as arrogant, it will talk to the low-class `countFolds` that called this function. Well, with a few restrictions, of course:
376
+ - No `TypedDict`
377
+ - The plebs must clean up their own memory problems
378
+ - No oversized integers
379
+ - No global variables, only global constants
380
+ - It won't accept pleb nonlocal variables either
381
+ - Python "class": they are all inferior to the jit class
382
+ - No `**kwargs`
383
+ - and just a few dozen-jillion other things.
384
+ """
385
+
386
+ # Parse source code
387
+ sourceAST = ast.parse(codeSource)
388
+
389
+ # Extract imports and target function definition
390
+ importsAST = [node for node in sourceAST.body if isinstance(node, (ast.Import, ast.ImportFrom))]
391
+ FunctionDefTarget = next((node for node in sourceAST.body if isinstance(node, ast.FunctionDef) and node.name == callableTarget), None)
392
+
393
+ if not FunctionDefTarget:
394
+ raise ValueError(f"Could not find function {callableTarget} in source code")
395
+
396
+ # Zero-out the decorator list
397
+ FunctionDefTarget.decorator_list=[]
398
+ # TODO: more explicit handling of decorators. I'm able to ignore this because I know `algorithmSource` doesn't have any decorators.
399
+ # FunctionDefTargetDecorators = [decorator for decorator in FunctionDefTarget.decorator_list]
400
+
401
+ # Add Numba decorator
402
+ FunctionDefTarget = decorateCallableWithNumba(FunctionDefTarget, parallel=False)
403
+ FunctionDefTarget.body.insert(0, ast.Expr(value=ast.Constant(value=docstringDispatcherNumba)))
404
+
405
+ # Combine everything into a module
406
+ moduleAST = ast.Module(
407
+ body=cast(List[ast.stmt]
408
+ , importsAST
409
+ + [Don_Lapre_The_Road_to_Self_Improvement_For_Programmers_by_Using_Short_Identifiers.astForCompetentProgrammers
410
+ for Don_Lapre_The_Road_to_Self_Improvement_For_Programmers_by_Using_Short_Identifiers in listStuffYouOughtaKnow]
411
+ + [FunctionDefTarget])
412
+ , type_ignores=[]
413
+ )
414
+
415
+ ast.fix_missing_locations(moduleAST)
416
+ return ast.unparse(moduleAST)
417
+
418
+ def makeNumbaOptimizedFlow(listCallablesInline: List[str], callableDispatcher: Optional[str] = None, algorithmSource: Optional[ModuleType] = None):
419
+ """Synthesizes numba-optimized versions of map folding functions."""
420
+
421
+ if not algorithmSource:
422
+ algorithmSource = getAlgorithmSource()
423
+
424
+ formatModuleNameDEFAULT = "numba_{callableTarget}"
425
+
426
+ # When I am a more competent programmer, I will make getPathFilenameWrite dependent on makeAstImport or vice versa,
427
+ # so the name of the physical file doesn't get out of whack with the name of the logical module.
428
+ def getPathFilenameWrite(callableTarget: str
429
+ , pathWrite: Optional[pathlib.Path] = None
430
+ , formatFilenameWrite: Optional[str] = None
431
+ ) -> pathlib.Path:
432
+ if not pathWrite:
433
+ pathWrite = getPathSyntheticModules()
434
+ if not formatFilenameWrite:
435
+ formatFilenameWrite = formatModuleNameDEFAULT + '.py'
436
+
437
+ pathFilename = pathWrite / formatFilenameWrite.format(callableTarget=callableTarget)
438
+ return pathFilename
439
+
440
+ def makeAstImport(callableTarget: str
441
+ , packageName: Optional[str] = None
442
+ , subPackageName: Optional[str] = None
443
+ , moduleName: Optional[str] = None
444
+ , astNodeLogicalPathThingy: Optional[ast.AST] = None
445
+ ) -> ast.ImportFrom:
446
+ """Creates import AST node for synthetic modules."""
447
+ if astNodeLogicalPathThingy is None:
448
+ if packageName is None:
449
+ packageName = myPackageNameIs
450
+ if subPackageName is None:
451
+ subPackageName = moduleOfSyntheticModules
452
+ if moduleName is None:
453
+ moduleName = formatModuleNameDEFAULT.format(callableTarget=callableTarget)
454
+ module=f'{packageName}.{subPackageName}.{moduleName}'
455
+ else:
456
+ module = str(astNodeLogicalPathThingy)
457
+ return ast.ImportFrom(
458
+ module=module,
459
+ names=[ast.alias(name=callableTarget, asname=None)],
460
+ level=0
461
+ )
462
+
463
+ listStuffYouOughtaKnow: List[youOughtaKnow] = []
464
+
465
+ for callableTarget in listCallablesInline:
466
+ codeSource = inspect.getsource(algorithmSource)
467
+ moduleSource = inlineOneCallable(codeSource, callableTarget)
468
+ if not moduleSource:
469
+ raise Exception("Pylance, OMG! The sky is falling!")
470
+
471
+ pathFilename = getPathFilenameWrite(callableTarget)
472
+ astImport = makeAstImport(callableTarget)
473
+
474
+ listStuffYouOughtaKnow.append(youOughtaKnow(
475
+ callableSynthesized=callableTarget,
476
+ pathFilenameForMe=pathFilename,
477
+ astForCompetentProgrammers=astImport
478
+ ))
479
+ pathFilename.write_text(moduleSource)
480
+
481
+ # Generate dispatcher if requested
482
+ if callableDispatcher:
483
+ codeSource = inspect.getsource(algorithmSource)
484
+ moduleSource = makeDispatcherNumba(codeSource, callableDispatcher, listStuffYouOughtaKnow)
485
+ if not moduleSource:
486
+ raise Exception("Pylance, OMG! The sky is falling!")
487
+
488
+ pathFilename = getPathFilenameWrite(callableDispatcher)
489
+ astImport = makeAstImport(callableDispatcher)
490
+
491
+ listStuffYouOughtaKnow.append(youOughtaKnow(
492
+ callableSynthesized=callableDispatcher,
493
+ pathFilenameForMe=pathFilename,
494
+ astForCompetentProgrammers=astImport
495
+ ))
496
+ pathFilename.write_text(moduleSource)
497
+
498
+ if __name__ == '__main__':
499
+ setDatatypeModule('numpy', sourGrapes=True)
500
+ setDatatypeFoldsTotal('int64', sourGrapes=True)
501
+ setDatatypeElephino('uint8', sourGrapes=True)
502
+ setDatatypeLeavesTotal('uint8', sourGrapes=True)
503
+ listCallablesInline: List[str] = ['countInitialize', 'countParallel', 'countSequential']
504
+ callableDispatcher = 'doTheNeedful'
505
+ makeNumbaOptimizedFlow(listCallablesInline, callableDispatcher)
506
+ # makeNumbaOptimizedFlow(listCallablesInline)
@@ -0,0 +1,3 @@
1
+ from .numba_countInitialize import countInitialize
2
+ from .numba_countParallel import countParallel
3
+ from .numba_countSequential import countSequential
@@ -1,11 +1,12 @@
1
+ from numpy.typing import NDArray
1
2
  import numpy
2
- from typing import Any, Tuple
3
- from mapFolding import indexMy, indexTrack
4
- import numba
5
3
  from numpy import integer
4
+ import numba
5
+ from mapFolding import indexMy, indexTrack
6
+ from typing import Any, Tuple
6
7
 
7
8
  @numba.jit((numba.uint8[:, :, ::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]))
8
- def countInitialize(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
9
+ def countInitialize(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
9
10
  while my[indexMy.leaf1ndex.value]:
10
11
  if my[indexMy.leaf1ndex.value] <= 1 or track[indexTrack.leafBelow.value, 0] == 1:
11
12
  my[indexMy.dimensionsUnconstrained.value] = my[indexMy.dimensionsTotal.value]
@@ -1,13 +1,18 @@
1
+ from numpy.typing import NDArray
2
+ import numpy
1
3
  from numpy import integer
2
- from typing import Any, Tuple
3
4
  import numba
4
5
  from mapFolding import indexMy, indexTrack
5
- import numpy
6
+ from typing import Any, Tuple
6
7
 
7
8
  @numba.jit((numba.uint8[:, :, ::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=False, inline='never', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=True)
8
- def countParallel(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWherePARALLEL: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], myPARALLEL: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], trackPARALLEL: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
9
- for indexSherpa in numba.prange(myPARALLEL[indexMy.taskDivisions.value]):
10
- groupsOfFolds = numba.types.int64(0)
9
+ def countParallel(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
10
+ gapsWherePARALLEL = gapsWhere.copy()
11
+ myPARALLEL = my.copy()
12
+ trackPARALLEL = track.copy()
13
+ taskDivisionsPrange = myPARALLEL[indexMy.taskDivisions.value]
14
+ for indexSherpa in numba.prange(taskDivisionsPrange):
15
+ groupsOfFolds: int = 0
11
16
  gapsWhere = gapsWherePARALLEL.copy()
12
17
  my = myPARALLEL.copy()
13
18
  my[indexMy.taskIndex.value] = indexSherpa
@@ -1,11 +1,12 @@
1
+ from numpy.typing import NDArray
2
+ import numpy
1
3
  from numpy import integer
2
- from typing import Any, Tuple
3
4
  import numba
4
5
  from mapFolding import indexMy, indexTrack
5
- import numpy
6
+ from typing import Any, Tuple
6
7
 
7
8
  @numba.jit((numba.uint8[:, :, ::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]), _nrt=True, boundscheck=False, cache=True, error_model='numpy', fastmath=True, forceinline=False, inline='never', looplift=False, no_cfunc_wrapper=True, no_cpython_wrapper=True, nopython=True, parallel=False)
8
- def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]):
9
+ def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
9
10
  leafBelow = track[indexTrack.leafBelow.value]
10
11
  gapRangeStart = track[indexTrack.gapRangeStart.value]
11
12
  countDimensionsGapped = track[indexTrack.countDimensionsGapped.value]
@@ -19,7 +20,7 @@ def countSequential(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.d
19
20
  indexMiniGap = my[indexMy.indexMiniGap.value]
20
21
  gap1ndex = my[indexMy.gap1ndex.value]
21
22
  taskIndex = my[indexMy.taskIndex.value]
22
- groupsOfFolds = numba.types.int64(0)
23
+ groupsOfFolds: int = 0
23
24
  doFindGaps = True
24
25
  while leaf1ndex:
25
26
  if (doFindGaps := (leaf1ndex <= 1 or leafBelow[0] == 1)) and leaf1ndex > foldGroups[-1]:
@@ -0,0 +1,33 @@
1
+ from mapFolding import indexMy, indexTrack
2
+ from numpy import integer
3
+ from numpy.typing import NDArray
4
+ from typing import Any, Tuple
5
+ import numba
6
+ import numpy
7
+ from mapFolding.syntheticModules.numba_countInitialize import countInitialize
8
+ from mapFolding.syntheticModules.numba_countParallel import countParallel
9
+ from mapFolding.syntheticModules.numba_countSequential import countSequential
10
+
11
+ @numba.jit((numba.uint8[:, :, ::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:, ::1]))
12
+ def doTheNeedful(connectionGraph: numpy.ndarray[Tuple[int, int, int], numpy.dtype[integer[Any]]], foldGroups: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], gapsWhere: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], mapShape: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], my: numpy.ndarray[Tuple[int], numpy.dtype[integer[Any]]], track: numpy.ndarray[Tuple[int, int], numpy.dtype[integer[Any]]]) -> None:
13
+ """
14
+ What in tarnation is this stupid module and function?
15
+
16
+ - This function is not in the same module as `countFolds` so that we can delay Numba just-in-time (jit) compilation of this function and the finalization of its settings until we are ready.
17
+ - This function is not in the same module as the next function, which does the hard work, so that we can delay `numba.jit` compilation of the next function.
18
+ - This function is "jitted" but the next function is super jitted, which makes it too arrogant to talk to plebian Python functions. It will, however, reluctantly talk to basic jitted functions.
19
+ - So this module can talk to the next function, and because this module isn't as arrogant, it will talk to the low-class `countFolds` that called this function. Well, with a few restrictions, of course:
20
+ - No `TypedDict`
21
+ - The plebs must clean up their own memory problems
22
+ - No oversized integers
23
+ - No global variables, only global constants
24
+ - It won't accept pleb nonlocal variables either
25
+ - Python "class": they are all inferior to the jit class
26
+ - No `**kwargs`
27
+ - and just a few dozen-jillion other things.
28
+ """
29
+ countInitialize(connectionGraph, gapsWhere, my, track)
30
+ if my[indexMy.taskDivisions.value] > 0:
31
+ countParallel(connectionGraph, foldGroups, gapsWhere, my, track)
32
+ else:
33
+ countSequential(connectionGraph, foldGroups, gapsWhere, my, track)