mapFolding 0.2.7__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mapFolding
3
- Version: 0.2.7
3
+ Version: 0.3.1
4
4
  Summary: Count distinct ways to fold a map (or a strip of stamps)
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
@@ -29,10 +29,12 @@ Requires-Dist: jupyter; extra == "benchmark"
29
29
  Requires-Dist: pandas; extra == "benchmark"
30
30
  Requires-Dist: tqdm; extra == "benchmark"
31
31
  Provides-Extra: testing
32
+ Requires-Dist: more_itertools; extra == "testing"
32
33
  Requires-Dist: pytest; extra == "testing"
33
34
  Requires-Dist: pytest-cov; extra == "testing"
34
35
  Requires-Dist: pytest-env; extra == "testing"
35
36
  Requires-Dist: pytest-xdist; extra == "testing"
37
+ Requires-Dist: python_minifier; extra == "testing"
36
38
 
37
39
  # Algorithm(s) for counting distinct ways to fold a map (or a strip of stamps)
38
40
 
@@ -0,0 +1,22 @@
1
+ benchmarks/benchmarking.py,sha256=HD_0NSvuabblg94ftDre6LFnXShTe8MYj3hIodW-zV0,3076
2
+ reference/flattened.py,sha256=6blZ2Y9G8mu1F3gV8SKndPE398t2VVFlsgKlyeJ765A,16538
3
+ reference/hunterNumba.py,sha256=HWndRgsajOf76rbb2LDNEZ6itsdYbyV-k3wgOFjeR6c,7104
4
+ reference/irvineJavaPort.py,sha256=Sj-63Z-OsGuDoEBXuxyjRrNmmyl0d7Yz_XuY7I47Oyg,4250
5
+ reference/jax.py,sha256=rojyK80lOATtbzxjGOHWHZngQa47CXCLJHZwIdN2MwI,14955
6
+ reference/lunnan.py,sha256=XEcql_gxvCCghb6Or3qwmPbn4IZUbZTaSmw_fUjRxZE,5037
7
+ reference/lunnanNumpy.py,sha256=HqDgSwTOZA-G0oophOEfc4zs25Mv4yw2aoF1v8miOLk,4653
8
+ reference/lunnanWhile.py,sha256=7NY2IKO5XBgol0aWWF_Fi-7oTL9pvu_z6lB0TF1uVHk,4063
9
+ reference/rotatedEntryPoint.py,sha256=z0QyDQtnMvXNj5ntWzzJUQUMFm1-xHGLVhtYzwmczUI,11530
10
+ reference/total_countPlus1vsPlusN.py,sha256=usenM8Yn_G1dqlPl7NKKkcnbohBZVZBXTQRm2S3_EDA,8106
11
+ someAssemblyRequired/__init__.py,sha256=nDtS5UFMKN-F5pTp0qKA0J0I-XR3n3OFxV2bosieBu8,131
12
+ someAssemblyRequired/countInitialize.py,sha256=hwo9SW_IvB-bgKNfaGvjl40yayFEmZZmeywiEMDSQDw,1840
13
+ someAssemblyRequired/countParallel.py,sha256=4MfRYLBA2IBxRiXb04voNcxSDAtmZOe7lrji1c3kFls,2560
14
+ someAssemblyRequired/countSequential.py,sha256=8YILeL3rflOhW1ts78ZSgYkPYXMPfusYOPkPtf8Xa3M,3217
15
+ someAssemblyRequired/getLLVMforNoReason.py,sha256=FtJzw2pZS3A4NimWdZsegXaU-vKeCw8m67kcfb5wvGM,894
16
+ someAssemblyRequired/synthesizeJob.py,sha256=xrcEW-QQPKogYh3O0TaRBSZEd8HgMLtXBTeS0Ps_pns,7500
17
+ someAssemblyRequired/synthesizeModules.py,sha256=4mDgzfhm36zIDT8LlcPZzfGfoxRi1Z5rb4rgkAWVtkQ,8625
18
+ mapFolding-0.3.1.dist-info/METADATA,sha256=sDKpDeoFSg3rgNkz20rj9Ec3FL-_wVEv2svn8RuwFfI,7526
19
+ mapFolding-0.3.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
20
+ mapFolding-0.3.1.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
21
+ mapFolding-0.3.1.dist-info/top_level.txt,sha256=yHhQq-bIJhB4pZcof5hXDTIjan0nxcFuOEWb7gy1DuU,42
22
+ mapFolding-0.3.1.dist-info/RECORD,,
reference/flattened.py CHANGED
@@ -236,7 +236,7 @@ class computationState(TypedDict):
236
236
  track: NDArray[integer[Any]]
237
237
 
238
238
  dtypeLarge = numpy.int64
239
- dtypeDefault = dtypeLarge
239
+ dtypeMedium = dtypeLarge
240
240
 
241
241
  def getLeavesTotal(listDimensions: Sequence[int]) -> int:
242
242
  """
@@ -284,7 +284,7 @@ def getTaskDivisions(computationDivisions: Optional[Union[int, str]], concurrenc
284
284
  return taskDivisions
285
285
 
286
286
  def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optional[Type]) -> NDArray[integer[Any]]:
287
- datatype = keywordArguments.get('datatype', dtypeDefault)
287
+ datatype = keywordArguments.get('datatype', dtypeMedium)
288
288
  mapShape = validateListDimensions(listDimensions)
289
289
  leavesTotal = getLeavesTotal(mapShape)
290
290
  arrayDimensions = numpy.array(mapShape, dtype=datatype)
@@ -317,14 +317,14 @@ def makeConnectionGraph(listDimensions: Sequence[int], **keywordArguments: Optio
317
317
 
318
318
  def makeDataContainer(shape, datatype: Optional[Type] = None):
319
319
  if datatype is None:
320
- datatype = dtypeDefault
320
+ datatype = dtypeMedium
321
321
  return numpy.zeros(shape, dtype=datatype)
322
322
 
323
323
  def outfitFoldings(listDimensions: Sequence[int], computationDivisions: Optional[Union[int, str]] = None, CPUlimit: Optional[Union[bool, float, int]] = None, **keywordArguments: Optional[Type]) -> computationState:
324
- datatypeDefault = keywordArguments.get('datatypeDefault', dtypeDefault)
324
+ datatypeMedium = keywordArguments.get('datatypeMedium', dtypeMedium)
325
325
  datatypeLarge = keywordArguments.get('datatypeLarge', dtypeLarge)
326
326
 
327
- the = makeDataContainer(len(indexThe), datatypeDefault)
327
+ the = makeDataContainer(len(indexThe), datatypeMedium)
328
328
 
329
329
  mapShape = tuple(sorted(validateListDimensions(listDimensions)))
330
330
  the[indexThe.leavesTotal] = getLeavesTotal(mapShape)
@@ -333,11 +333,11 @@ def outfitFoldings(listDimensions: Sequence[int], computationDivisions: Optional
333
333
  the[indexThe.taskDivisions] = getTaskDivisions(computationDivisions, concurrencyLimit, CPUlimit, listDimensions)
334
334
 
335
335
  stateInitialized = computationState(
336
- connectionGraph = makeConnectionGraph(mapShape, datatype=datatypeDefault),
336
+ connectionGraph = makeConnectionGraph(mapShape, datatype=datatypeMedium),
337
337
  foldsSubTotals = makeDataContainer(the[indexThe.leavesTotal], datatypeLarge),
338
338
  mapShape = mapShape,
339
339
  my = makeDataContainer(len(indexMy), datatypeLarge),
340
- gapsWhere = makeDataContainer(int(the[indexThe.leavesTotal]) * int(the[indexThe.leavesTotal]) + 1, datatypeDefault),
340
+ gapsWhere = makeDataContainer(int(the[indexThe.leavesTotal]) * int(the[indexThe.leavesTotal]) + 1, datatypeMedium),
341
341
  the = the,
342
342
  track = makeDataContainer((len(indexTrack), the[indexThe.leavesTotal] + 1), datatypeLarge)
343
343
  )
reference/hunterNumba.py CHANGED
@@ -19,7 +19,7 @@ def countFolds(listDimensions: List[int]) -> int:
19
19
  def integerLarge(value) -> numpy.uint64:
20
20
  return numpy.uint64(value)
21
21
 
22
- dtypeDefault = numpy.uint8
22
+ dtypeMedium = numpy.uint8
23
23
  dtypeMaximum = numpy.uint16
24
24
 
25
25
  leavesTotal = integerSmall(1)
@@ -30,19 +30,19 @@ def countFolds(listDimensions: List[int]) -> int:
30
30
  """How to build a leaf connection graph, also called a "Cartesian Product Decomposition"
31
31
  or a "Dimensional Product Mapping", with sentinels:
32
32
  Step 1: find the cumulative product of the map's dimensions"""
33
- cumulativeProduct = numpy.ones(dimensionsTotal + 1, dtype=dtypeDefault)
33
+ cumulativeProduct = numpy.ones(dimensionsTotal + 1, dtype=dtypeMedium)
34
34
  for dimension1ndex in range(1, dimensionsTotal + 1):
35
35
  cumulativeProduct[dimension1ndex] = cumulativeProduct[dimension1ndex - 1] * listDimensions[dimension1ndex - 1]
36
36
 
37
37
  """Step 2: for each dimension, create a coordinate system """
38
38
  """coordinateSystem[dimension1ndex, leaf1ndex] holds the dimension1ndex-th coordinate of leaf leaf1ndex"""
39
- coordinateSystem = numpy.zeros((dimensionsTotal + 1, leavesTotal + 1), dtype=dtypeDefault)
39
+ coordinateSystem = numpy.zeros((dimensionsTotal + 1, leavesTotal + 1), dtype=dtypeMedium)
40
40
  for dimension1ndex in range(1, dimensionsTotal + 1):
41
41
  for leaf1ndex in range(1, leavesTotal + 1):
42
42
  coordinateSystem[dimension1ndex, leaf1ndex] = ((leaf1ndex - 1) // cumulativeProduct[dimension1ndex - 1]) % listDimensions[dimension1ndex - 1] + 1
43
43
 
44
44
  """Step 3: create a huge empty connection graph"""
45
- connectionGraph = numpy.zeros((dimensionsTotal + 1, leavesTotal + 1, leavesTotal + 1), dtype=dtypeDefault)
45
+ connectionGraph = numpy.zeros((dimensionsTotal + 1, leavesTotal + 1, leavesTotal + 1), dtype=dtypeMedium)
46
46
 
47
47
  """Step for... for... for...: fill the connection graph"""
48
48
  for dimension1ndex in range(1, dimensionsTotal + 1):
@@ -61,7 +61,7 @@ def countFolds(listDimensions: List[int]) -> int:
61
61
  leafBelow = numba.literally(1)
62
62
  countDimensionsGapped = numba.literally(2)
63
63
  gapRangeStart = numba.literally(3)
64
- track = numpy.zeros((4, leavesTotal + 1), dtype=dtypeDefault)
64
+ track = numpy.zeros((4, leavesTotal + 1), dtype=dtypeMedium)
65
65
 
66
66
  gapsWhere = numpy.zeros(integerLarge(integerLarge(leavesTotal) * integerLarge(leavesTotal) + 1), dtype=dtypeMaximum)
67
67
 
reference/jax.py CHANGED
@@ -5,7 +5,7 @@ from typing import List, Tuple
5
5
  import jax
6
6
  import jaxtyping
7
7
 
8
- dtypeDefault = jax.numpy.uint32
8
+ dtypeMedium = jax.numpy.uint32
9
9
  dtypeMaximum = jax.numpy.uint32
10
10
 
11
11
  def countFolds(listDimensions: List[int]) -> int:
@@ -15,7 +15,7 @@ def countFolds(listDimensions: List[int]) -> int:
15
15
  d: int = len(listDimensions)
16
16
  import numpy
17
17
  D: numpy.ndarray = makeConnectionGraph(listDimensionsPositive)
18
- connectionGraph = jax.numpy.asarray(D, dtype=dtypeDefault)
18
+ connectionGraph = jax.numpy.asarray(D, dtype=dtypeMedium)
19
19
  del listDimensionsPositive
20
20
 
21
21
  return foldingsJAX(n, d, connectionGraph)
@@ -191,10 +191,10 @@ def foldingsJAX(leavesTotal: jaxtyping.UInt32, dimensionsTotal: jaxtyping.UInt32
191
191
  return (leafAbove, leafBelow, allValues[2], gapRangeStart, gapsWhere, allValues[5], activeLeaf1ndex, activeGap1ndex)
192
192
 
193
193
  # Dynamic values
194
- A = jax.numpy.zeros(leavesTotal + 1, dtype=dtypeDefault)
195
- B = jax.numpy.zeros(leavesTotal + 1, dtype=dtypeDefault)
196
- count = jax.numpy.zeros(leavesTotal + 1, dtype=dtypeDefault)
197
- gapter = jax.numpy.zeros(leavesTotal + 1, dtype=dtypeDefault)
194
+ A = jax.numpy.zeros(leavesTotal + 1, dtype=dtypeMedium)
195
+ B = jax.numpy.zeros(leavesTotal + 1, dtype=dtypeMedium)
196
+ count = jax.numpy.zeros(leavesTotal + 1, dtype=dtypeMedium)
197
+ gapter = jax.numpy.zeros(leavesTotal + 1, dtype=dtypeMedium)
198
198
  gap = jax.numpy.zeros(leavesTotal * leavesTotal + 1, dtype=dtypeMaximum)
199
199
 
200
200
  foldingsTotal = jax.numpy.uint32(0)
@@ -0,0 +1,3 @@
1
+ from .countSequential import countSequential
2
+ from .countParallel import countParallel
3
+ from .countInitialize import countInitialize
@@ -0,0 +1,45 @@
1
+ from mapFolding import indexMy, indexTrack
2
+ import numba
3
+
4
+ @numba.jit((numba.uint8[:,:,::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:,::1]), parallel=False, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)
5
+ def countInitialize(connectionGraph, gapsWhere, my, track):
6
+ while my[7] > 0:
7
+ if my[7] <= 1 or track[1, 0] == 1:
8
+ my[1] = my[0]
9
+ my[3] = track[3, my[7] - 1]
10
+ my[4] = 0
11
+ while my[4] < my[0]:
12
+ if connectionGraph[my[4], my[7], my[7]] == my[7]:
13
+ my[1] -= 1
14
+ else:
15
+ my[8] = connectionGraph[my[4], my[7], my[7]]
16
+ while my[8] != my[7]:
17
+ gapsWhere[my[3]] = my[8]
18
+ if track[2, my[8]] == 0:
19
+ my[3] += 1
20
+ track[2, my[8]] += 1
21
+ my[8] = connectionGraph[my[4], my[7], track[1, my[8]]]
22
+ my[4] += 1
23
+ if not my[1]:
24
+ my[5] = 0
25
+ while my[5] < my[7]:
26
+ gapsWhere[my[3]] = my[5]
27
+ my[3] += 1
28
+ my[5] += 1
29
+ my[6] = my[2]
30
+ while my[6] < my[3]:
31
+ gapsWhere[my[2]] = gapsWhere[my[6]]
32
+ if track[2, gapsWhere[my[6]]] == my[1]:
33
+ my[2] += 1
34
+ track[2, gapsWhere[my[6]]] = 0
35
+ my[6] += 1
36
+ if my[7] > 0:
37
+ my[2] -= 1
38
+ track[0, my[7]] = gapsWhere[my[2]]
39
+ track[1, my[7]] = track[1, track[0, my[7]]]
40
+ track[1, track[0, my[7]]] = my[7]
41
+ track[0, track[1, my[7]]] = my[7]
42
+ track[3, my[7]] = my[2]
43
+ my[7] += 1
44
+ if my[2] > 0:
45
+ return
@@ -0,0 +1,52 @@
1
+ from mapFolding import indexMy, indexTrack
2
+ import numba
3
+
4
+ @numba.jit((numba.uint8[:,:,::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:,::1]), parallel=True, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)
5
+ def countParallel(connectionGraph, foldGroups, gapsWherePARALLEL, myPARALLEL, trackPARALLEL):
6
+ for indexSherpa in numba.prange(myPARALLEL[9]):
7
+ gapsWhere = gapsWherePARALLEL.copy()
8
+ my = myPARALLEL.copy()
9
+ my[10] = indexSherpa
10
+ track = trackPARALLEL.copy()
11
+ groupsOfFolds: int = 0
12
+ while my[7] > 0:
13
+ if my[7] <= 1 or track[1, 0] == 1:
14
+ if my[7] > foldGroups[-1]:
15
+ groupsOfFolds = groupsOfFolds + 1
16
+ else:
17
+ my[1] = my[0]
18
+ my[3] = track[3, my[7] - 1]
19
+ my[4] = 0
20
+ while my[4] < my[0]:
21
+ if connectionGraph[my[4], my[7], my[7]] == my[7]:
22
+ my[1] -= 1
23
+ else:
24
+ my[8] = connectionGraph[my[4], my[7], my[7]]
25
+ while my[8] != my[7]:
26
+ if my[7] != my[9] or my[8] % my[9] == my[10]:
27
+ gapsWhere[my[3]] = my[8]
28
+ if track[2, my[8]] == 0:
29
+ my[3] += 1
30
+ track[2, my[8]] += 1
31
+ my[8] = connectionGraph[my[4], my[7], track[1, my[8]]]
32
+ my[4] += 1
33
+ my[6] = my[2]
34
+ while my[6] < my[3]:
35
+ gapsWhere[my[2]] = gapsWhere[my[6]]
36
+ if track[2, gapsWhere[my[6]]] == my[1]:
37
+ my[2] += 1
38
+ track[2, gapsWhere[my[6]]] = 0
39
+ my[6] += 1
40
+ while my[7] > 0 and my[2] == track[3, my[7] - 1]:
41
+ my[7] -= 1
42
+ track[1, track[0, my[7]]] = track[1, my[7]]
43
+ track[0, track[1, my[7]]] = track[0, my[7]]
44
+ if my[7] > 0:
45
+ my[2] -= 1
46
+ track[0, my[7]] = gapsWhere[my[2]]
47
+ track[1, my[7]] = track[1, track[0, my[7]]]
48
+ track[1, track[0, my[7]]] = my[7]
49
+ track[0, track[1, my[7]]] = my[7]
50
+ track[3, my[7]] = my[2]
51
+ my[7] += 1
52
+ foldGroups[my[10]] = groupsOfFolds
@@ -0,0 +1,59 @@
1
+ from mapFolding import indexMy, indexTrack
2
+ import numba
3
+
4
+ @numba.jit((numba.uint8[:,:,::1], numba.int64[::1], numba.uint8[::1], numba.uint8[::1], numba.uint8[:,::1]), parallel=False, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)
5
+ def countSequential(connectionGraph, foldGroups, gapsWhere, my, track):
6
+ doFindGaps = True
7
+ dimensionsTotal = my[indexMy.dimensionsTotal.value]
8
+ dimensionsUnconstrained = my[indexMy.dimensionsUnconstrained.value]
9
+ gap1ndex = my[indexMy.gap1ndex.value]
10
+ gap1ndexCeiling = my[indexMy.gap1ndexCeiling.value]
11
+ indexDimension = my[indexMy.indexDimension.value]
12
+ indexMiniGap = my[indexMy.indexMiniGap.value]
13
+ leaf1ndex = my[indexMy.leaf1ndex.value]
14
+ leafConnectee = my[indexMy.leafConnectee.value]
15
+ taskIndex = my[indexMy.taskIndex.value]
16
+ leafAbove = track[indexTrack.leafAbove.value]
17
+ leafBelow = track[indexTrack.leafBelow.value]
18
+ countDimensionsGapped = track[indexTrack.countDimensionsGapped.value]
19
+ gapRangeStart = track[indexTrack.gapRangeStart.value]
20
+ groupsOfFolds: int = 0
21
+ while leaf1ndex > 0:
22
+ if (doFindGaps := (leaf1ndex <= 1 or leafBelow[0] == 1)) and leaf1ndex > foldGroups[-1]:
23
+ groupsOfFolds = groupsOfFolds + 1
24
+ elif doFindGaps:
25
+ dimensionsUnconstrained = dimensionsTotal
26
+ gap1ndexCeiling = gapRangeStart[leaf1ndex - 1]
27
+ indexDimension = 0
28
+ while indexDimension < dimensionsTotal:
29
+ if connectionGraph[indexDimension, leaf1ndex, leaf1ndex] == leaf1ndex:
30
+ dimensionsUnconstrained -= 1
31
+ else:
32
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leaf1ndex]
33
+ while leafConnectee != leaf1ndex:
34
+ gapsWhere[gap1ndexCeiling] = leafConnectee
35
+ if countDimensionsGapped[leafConnectee] == 0:
36
+ gap1ndexCeiling += 1
37
+ countDimensionsGapped[leafConnectee] += 1
38
+ leafConnectee = connectionGraph[indexDimension, leaf1ndex, leafBelow[leafConnectee]]
39
+ indexDimension += 1
40
+ indexMiniGap = gap1ndex
41
+ while indexMiniGap < gap1ndexCeiling:
42
+ gapsWhere[gap1ndex] = gapsWhere[indexMiniGap]
43
+ if countDimensionsGapped[gapsWhere[indexMiniGap]] == dimensionsUnconstrained:
44
+ gap1ndex += 1
45
+ countDimensionsGapped[gapsWhere[indexMiniGap]] = 0
46
+ indexMiniGap += 1
47
+ while leaf1ndex > 0 and gap1ndex == gapRangeStart[leaf1ndex - 1]:
48
+ leaf1ndex -= 1
49
+ leafBelow[leafAbove[leaf1ndex]] = leafBelow[leaf1ndex]
50
+ leafAbove[leafBelow[leaf1ndex]] = leafAbove[leaf1ndex]
51
+ if leaf1ndex > 0:
52
+ gap1ndex -= 1
53
+ leafAbove[leaf1ndex] = gapsWhere[gap1ndex]
54
+ leafBelow[leaf1ndex] = leafBelow[leafAbove[leaf1ndex]]
55
+ leafBelow[leafAbove[leaf1ndex]] = leaf1ndex
56
+ leafAbove[leafBelow[leaf1ndex]] = leaf1ndex
57
+ gapRangeStart[leaf1ndex] = gap1ndex
58
+ leaf1ndex += 1
59
+ foldGroups[taskIndex] = groupsOfFolds
@@ -0,0 +1,19 @@
1
+ import importlib
2
+ import importlib.util
3
+ import llvmlite.binding
4
+ import pathlib
5
+
6
+ def writeModuleLLVM(pathFilename: pathlib.Path, identifierCallable: str) -> pathlib.Path:
7
+ """Import the generated module directly and get its LLVM IR."""
8
+ specTarget = importlib.util.spec_from_file_location("generatedModule", pathFilename)
9
+ if specTarget is None or specTarget.loader is None:
10
+ raise ImportError(f"Could not create module spec or loader for {pathFilename}")
11
+ moduleTarget = importlib.util.module_from_spec(specTarget)
12
+ specTarget.loader.exec_module(moduleTarget)
13
+
14
+ # Get LLVM IR and write to file
15
+ linesLLVM = moduleTarget.__dict__[identifierCallable].inspect_llvm()[()]
16
+ moduleLLVM = llvmlite.binding.module.parse_assembly(linesLLVM)
17
+ pathFilenameLLVM = pathFilename.with_suffix(".ll")
18
+ pathFilenameLLVM.write_text(str(moduleLLVM))
19
+ return pathFilenameLLVM
@@ -0,0 +1,170 @@
1
+ from mapFolding import getPathFilenameFoldsTotal, dtypeNumpyDefaults, thisSeemsVeryComplicated
2
+ from mapFolding import make_dtype, datatypeLarge, dtypeLarge, datatypeMedium, dtypeMedium, datatypeSmall, dtypeSmall
3
+ from mapFolding import outfitCountFolds, computationState, indexMy, indexTrack
4
+ from someAssemblyRequired import countInitialize, countSequential
5
+ from typing import Any, Optional, Sequence, Type
6
+ import more_itertools
7
+ import inspect
8
+ import numpy
9
+ import pathlib
10
+ import pickle
11
+ import python_minifier
12
+
13
+ identifierCallableLaunch = "goGoGadgetAbsurdity"
14
+
15
+ def makeStateJob(listDimensions: Sequence[int], **keywordArguments: Optional[Type[Any]]):
16
+ stateUniversal = outfitCountFolds(listDimensions, computationDivisions=None, CPUlimit=None, **keywordArguments)
17
+ countInitialize(stateUniversal['connectionGraph'], stateUniversal['gapsWhere'], stateUniversal['my'], stateUniversal['track'])
18
+
19
+ pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal['mapShape'])
20
+ suffix = pathFilenameChopChop.suffix
21
+ pathJob = pathlib.Path(str(pathFilenameChopChop)[0:-len(suffix)])
22
+ pathJob.mkdir(parents=True, exist_ok=True)
23
+ pathFilenameJob = pathJob / 'stateJob.pkl'
24
+
25
+ pathFilenameJob.write_bytes(pickle.dumps(stateUniversal))
26
+ return pathFilenameJob
27
+
28
+ def convertNDArrayToStr(arrayTarget: numpy.ndarray, identifierName: str) -> str:
29
+ def process_nested_array(arraySlice):
30
+ if isinstance(arraySlice, numpy.ndarray) and arraySlice.ndim > 1:
31
+ return [process_nested_array(arraySlice[index]) for index in range(arraySlice.shape[0])]
32
+ elif isinstance(arraySlice, numpy.ndarray) and arraySlice.ndim == 1:
33
+ listWithRanges = []
34
+ for group in more_itertools.consecutive_groups(arraySlice.tolist()):
35
+ ImaSerious = list(group)
36
+ if len(ImaSerious) <= 4:
37
+ listWithRanges += ImaSerious
38
+ else:
39
+ ImaRange = [range(ImaSerious[0], ImaSerious[-1] + 1)]
40
+ listWithRanges += ImaRange
41
+ return listWithRanges
42
+ return arraySlice
43
+
44
+ arrayAsNestedLists = process_nested_array(arrayTarget)
45
+
46
+ stringMinimized = python_minifier.minify(str(arrayAsNestedLists))
47
+ commaZeroMaximum = arrayTarget.shape[-1] - 1
48
+ stringMinimized = stringMinimized.replace('[0' + ',0'*commaZeroMaximum + ']', '[0]*'+str(commaZeroMaximum+1))
49
+ for countZeros in range(commaZeroMaximum, 2, -1):
50
+ stringMinimized = stringMinimized.replace(',0'*countZeros + ']', ']+[0]*'+str(countZeros))
51
+
52
+ stringMinimized = stringMinimized.replace('range', '*range')
53
+
54
+ return f"{identifierName} = numpy.array({stringMinimized}, dtype=numpy.{arrayTarget.dtype})"
55
+
56
+ def writeModuleWithNumba(listDimensions, **keywordArguments: Optional[str]) -> pathlib.Path:
57
+ datatypeLargeAsStr = keywordArguments.get('datatypeLarge', thisSeemsVeryComplicated.datatypeLarge)
58
+ datatypeMediumAsStr = keywordArguments.get('datatypeMedium', thisSeemsVeryComplicated.datatypeMedium)
59
+ datatypeSmallAsStr = keywordArguments.get('datatypeSmall', thisSeemsVeryComplicated.datatypeSmall)
60
+
61
+ numpy_dtypeLarge = make_dtype(datatypeLargeAsStr) # type: ignore
62
+ numpy_dtypeMedium = make_dtype(datatypeMediumAsStr) # type: ignore
63
+ numpy_dtypeSmall = make_dtype(datatypeSmallAsStr) # type: ignore
64
+
65
+ pathFilenameJob = makeStateJob(listDimensions, dtypeLarge = numpy_dtypeLarge, dtypeMedium = numpy_dtypeMedium, dtypeSmall = numpy_dtypeSmall)
66
+ stateJob: computationState = pickle.loads(pathFilenameJob.read_bytes())
67
+ pathFilenameFoldsTotal = getPathFilenameFoldsTotal(stateJob['mapShape'], pathFilenameJob.parent)
68
+
69
+ codeSource = inspect.getsource(countSequential)
70
+
71
+ # forceinline=True might actually be useful
72
+ parametersNumba = f"numba.types.{datatypeLargeAsStr}(), \
73
+ cache=True, \
74
+ nopython=True, \
75
+ fastmath=True, \
76
+ forceinline=True, \
77
+ inline='always', \
78
+ looplift=False, \
79
+ _nrt=True, \
80
+ error_model='numpy', \
81
+ parallel=False, \
82
+ boundscheck=False, \
83
+ no_cfunc_wrapper=False, \
84
+ no_cpython_wrapper=False, \
85
+ "
86
+ # no_cfunc_wrapper=True, \
87
+ # no_cpython_wrapper=True, \
88
+
89
+ lineNumba = f"@numba.jit({parametersNumba})"
90
+
91
+ linesImport = "\n".join([
92
+ "import numpy"
93
+ , "import numba"
94
+ ])
95
+
96
+ ImaIndent = ' '
97
+ linesDataDynamic = """"""
98
+ linesDataDynamic = "\n".join([linesDataDynamic
99
+ , ImaIndent + f"foldsTotal = numba.types.{datatypeLargeAsStr}(0)"
100
+ , ImaIndent + convertNDArrayToStr(stateJob['foldGroups'], 'foldGroups')
101
+ , ImaIndent + convertNDArrayToStr(stateJob['gapsWhere'], 'gapsWhere')
102
+ ])
103
+
104
+ linesDataStatic = """"""
105
+ linesDataStatic = "\n".join([linesDataStatic
106
+ , ImaIndent + convertNDArrayToStr(stateJob['connectionGraph'], 'connectionGraph')
107
+ ])
108
+
109
+ my = stateJob['my']
110
+ track = stateJob['track']
111
+ linesAlgorithm = """"""
112
+ for lineSource in codeSource.splitlines():
113
+ if lineSource.startswith(('#', 'import', 'from', '@numba.jit')):
114
+ continue
115
+ elif not lineSource:
116
+ continue
117
+ elif lineSource.startswith('def '):
118
+ lineSource = "\n".join([lineNumba
119
+ , f"def {identifierCallableLaunch}():"
120
+ , linesDataDynamic
121
+ , linesDataStatic
122
+ ])
123
+ elif 'my[indexMy.' in lineSource:
124
+ # leaf1ndex = my[indexMy.leaf1ndex.value]
125
+ identifier, statement = lineSource.split('=')
126
+ lineSource = ImaIndent + identifier.strip() + '=' + str(eval(statement.strip()))
127
+ elif 'track[indexTrack.' in lineSource:
128
+ # leafAbove = track[indexTrack.leafAbove.value]
129
+ identifier, statement = lineSource.split('=')
130
+ lineSource = ImaIndent + convertNDArrayToStr(eval(statement.strip()), identifier.strip())
131
+
132
+ linesAlgorithm = "\n".join([linesAlgorithm
133
+ , lineSource
134
+ ])
135
+
136
+ linesLaunch = """"""
137
+ linesLaunch = linesLaunch + f"""
138
+ if __name__ == '__main__':
139
+ import time
140
+ timeStart = time.perf_counter()
141
+ {identifierCallableLaunch}()
142
+ print(time.perf_counter() - timeStart)"""
143
+
144
+ linesWriteFoldsTotal = """"""
145
+ linesWriteFoldsTotal = "\n".join([linesWriteFoldsTotal
146
+ , " foldsTotal = foldGroups[0:-1].sum() * foldGroups[-1]"
147
+ , " print(foldsTotal)"
148
+ , " with numba.objmode():"
149
+ , f" open('{pathFilenameFoldsTotal.as_posix()}', 'w').write(str(foldsTotal))"
150
+ , " return foldsTotal"
151
+ ])
152
+
153
+ linesAll = "\n".join([
154
+ linesImport
155
+ , linesAlgorithm
156
+ , linesWriteFoldsTotal
157
+ , linesLaunch
158
+ ])
159
+
160
+ pathFilenameDestination = pathFilenameJob.with_stem(pathFilenameJob.parent.name).with_suffix(".py")
161
+ pathFilenameDestination.write_text(linesAll)
162
+
163
+ return pathFilenameDestination
164
+
165
+ if __name__ == '__main__':
166
+ listDimensions = [3,15]
167
+ datatypeLarge = 'int64'
168
+ datatypeMedium = 'uint8'
169
+ datatypeSmall = datatypeMedium
170
+ writeModuleWithNumba(listDimensions, datatypeLarge=datatypeLarge, datatypeMedium=datatypeMedium, datatypeSmall=datatypeSmall)
@@ -1,7 +1,18 @@
1
- from mapFolding import indexMy, indexTrack
1
+ from mapFolding import indexMy, indexTrack, theDao, datatypeMedium, datatypeLarge, datatypeSmall
2
2
  import ast
3
- import copy
4
3
  import pathlib
4
+ import inspect
5
+
6
+ algorithmSource = theDao
7
+
8
+ dictionaryDecorators={
9
+ 'countInitialize':
10
+ f'@numba.jit((numba.{datatypeSmall}[:,:,::1], numba.{datatypeMedium}[::1], numba.{datatypeSmall}[::1], numba.{datatypeMedium}[:,::1]), parallel=False, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)\n',
11
+ 'countParallel':
12
+ f'@numba.jit((numba.{datatypeSmall}[:,:,::1], numba.{datatypeLarge}[::1], numba.{datatypeMedium}[::1], numba.{datatypeSmall}[::1], numba.{datatypeMedium}[:,::1]), parallel=True, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)\n',
13
+ 'countSequential':
14
+ f'@numba.jit((numba.{datatypeSmall}[:,:,::1], numba.{datatypeLarge}[::1], numba.{datatypeMedium}[::1], numba.{datatypeSmall}[::1], numba.{datatypeMedium}[:,::1]), parallel=False, boundscheck=False, cache=True, error_model="numpy", fastmath=True, looplift=False, nogil=True, nopython=True)\n',
15
+ }
5
16
 
6
17
  def getDictionaryEnumValues():
7
18
  dictionaryEnumValues = {}
@@ -11,10 +22,13 @@ def getDictionaryEnumValues():
11
22
  return dictionaryEnumValues
12
23
 
13
24
  class RecursiveInlinerWithEnum(ast.NodeTransformer):
25
+ """Process AST nodes to inline functions and substitute enum values.
26
+ Also handles function decorators during inlining."""
27
+
14
28
  def __init__(self, dictionaryFunctions, dictionaryEnumValues):
15
29
  self.dictionaryFunctions = dictionaryFunctions
16
30
  self.dictionaryEnumValues = dictionaryEnumValues
17
- self.processed = set() # Track processed functions to avoid infinite recursion
31
+ self.processed = set()
18
32
 
19
33
  def inlineFunctionBody(self, functionName):
20
34
  if functionName in self.processed:
@@ -39,7 +53,7 @@ class RecursiveInlinerWithEnum(ast.NodeTransformer):
39
53
  callNode = self.generic_visit(node)
40
54
  if isinstance(callNode, ast.Call) and isinstance(callNode.func, ast.Name) and callNode.func.id in self.dictionaryFunctions:
41
55
  inlineDefinition = self.inlineFunctionBody(callNode.func.id)
42
- if inlineDefinition and inlineDefinition.body:
56
+ if (inlineDefinition and inlineDefinition.body):
43
57
  lastStmt = inlineDefinition.body[-1]
44
58
  if isinstance(lastStmt, ast.Return) and lastStmt.value is not None:
45
59
  return self.visit(lastStmt.value)
@@ -78,20 +92,21 @@ def findRequiredImports(node):
78
92
 
79
93
  def generateImports(requiredImports):
80
94
  """Generate import statements based on required modules."""
81
- importStatements = []
95
+ importStatements = {'import numba', 'from mapFolding import indexMy, indexTrack'}
82
96
 
83
- # Map of module names to their import statements
84
97
  importMapping = {
85
98
  'numba': 'import numba',
86
99
  }
87
100
 
88
101
  for moduleName in sorted(requiredImports):
89
102
  if moduleName in importMapping:
90
- importStatements.append(importMapping[moduleName])
103
+ importStatements.add(importMapping[moduleName])
91
104
 
92
105
  return '\n'.join(importStatements)
93
106
 
94
- def inlineFunctions(sourceCode, targetFunctionName, dictionaryEnumValues):
107
+ def inlineFunctions(sourceCode, targetFunctionName, dictionaryEnumValues, skipEnum=False):
108
+ if skipEnum:
109
+ dictionaryEnumValues = {}
95
110
  dictionaryParsed = ast.parse(sourceCode)
96
111
  dictionaryFunctions = {
97
112
  element.name: element
@@ -103,50 +118,70 @@ def inlineFunctions(sourceCode, targetFunctionName, dictionaryEnumValues):
103
118
  nodeInlined = nodeInliner.visit(nodeTarget)
104
119
  ast.fix_missing_locations(nodeInlined)
105
120
 
106
- # Generate imports
107
121
  requiredImports = findRequiredImports(nodeInlined)
108
122
  importStatements = generateImports(requiredImports)
109
123
 
110
- # Combine imports with inlined code
111
- inlinedCode = importStatements + '\n\n' + ast.unparse(ast.Module(body=[nodeInlined], type_ignores=[]))
124
+ lineNumbaDecorator = dictionaryDecorators[targetFunctionName]
125
+ inlinedCode = importStatements + '\n\n' + lineNumbaDecorator + ast.unparse(ast.Module(body=[nodeInlined], type_ignores=[]))
112
126
  return inlinedCode
113
127
 
128
+ def unpackArrays(codeInlined: str, callableTarget: str) -> str:
129
+ dictionaryReplaceScalars = {
130
+ 'my[indexMy.dimensionsTotal.value]': 'dimensionsTotal',
131
+ 'my[indexMy.dimensionsUnconstrained.value]': 'dimensionsUnconstrained',
132
+ 'my[indexMy.gap1ndex.value]': 'gap1ndex',
133
+ 'my[indexMy.gap1ndexCeiling.value]': 'gap1ndexCeiling',
134
+ 'my[indexMy.indexDimension.value]': 'indexDimension',
135
+ # 'my[indexMy.indexLeaf.value]': 'indexLeaf',
136
+ 'my[indexMy.indexMiniGap.value]': 'indexMiniGap',
137
+ 'my[indexMy.leaf1ndex.value]': 'leaf1ndex',
138
+ 'my[indexMy.leafConnectee.value]': 'leafConnectee',
139
+ # 'my[indexMy.taskDivisions.value]': 'taskDivisions',
140
+ 'my[indexMy.taskIndex.value]': 'taskIndex',
141
+ # 'foldGroups[-1]': 'leavesTotal',
142
+ }
143
+
144
+ dictionaryReplaceArrays = {
145
+ "track[indexTrack.leafAbove.value, ": 'leafAbove[',
146
+ "track[indexTrack.leafBelow.value, ": 'leafBelow[',
147
+ 'track[indexTrack.countDimensionsGapped.value, ': 'countDimensionsGapped[',
148
+ 'track[indexTrack.gapRangeStart.value, ': 'gapRangeStart[',
149
+ }
150
+
151
+ ImaIndent = " "
152
+ linesInitialize = """"""
153
+
154
+ for find, replace in dictionaryReplaceScalars.items():
155
+ linesInitialize += f"{ImaIndent}{replace} = {find}\n"
156
+ codeInlined = codeInlined.replace(find, replace)
157
+
158
+ for find, replace in dictionaryReplaceArrays.items():
159
+ linesInitialize += f"{ImaIndent}{replace[0:-1]} = {find[0:-2]}]\n"
160
+ codeInlined = codeInlined.replace(find, replace)
161
+
162
+ ourGuyOnTheInside = " doFindGaps = True\n"
163
+ linesInitialize = ourGuyOnTheInside + linesInitialize
164
+
165
+ codeInlined = codeInlined.replace(ourGuyOnTheInside, linesInitialize)
166
+
167
+ return codeInlined
168
+
114
169
  def Z0Z_inlineMapFolding():
115
170
  dictionaryEnumValues = getDictionaryEnumValues()
171
+ codeSource = inspect.getsource(algorithmSource)
172
+ pathFilenameAlgorithm = pathlib.Path(inspect.getfile(algorithmSource))
116
173
 
117
- pathFilenameSource = pathlib.Path("/apps/mapFolding/mapFolding/lovelace.py")
118
- codeSource = pathFilenameSource.read_text()
119
-
120
- listCallables = [
121
- 'countInitialize',
122
- 'countParallel',
123
- 'countSequential',
124
- ]
174
+ listCallables = [ 'countInitialize', 'countParallel', 'countSequential', ]
125
175
 
126
176
  listPathFilenamesDestination: list[pathlib.Path] = []
127
177
  for callableTarget in listCallables:
128
- pathFilenameDestination = pathFilenameSource.parent / "someAssemblyRequired" / pathFilenameSource.with_stem(callableTarget).name
129
- codeInlined = inlineFunctions(codeSource, callableTarget, dictionaryEnumValues)
130
- pathFilenameDestination.write_text(codeInlined)
178
+ skipEnum = (callableTarget == 'countInitialize')
179
+ skipEnum = (callableTarget == 'countSequential')
180
+ pathFilenameDestination = pathFilenameAlgorithm.parent / "someAssemblyRequired" / pathFilenameAlgorithm.with_stem(callableTarget).name
181
+ codeInlined = inlineFunctions(codeSource, callableTarget, dictionaryEnumValues, skipEnum)
182
+ codeUnpacked = unpackArrays(codeInlined, callableTarget)
183
+ pathFilenameDestination.write_text(codeUnpacked)
131
184
  listPathFilenamesDestination.append(pathFilenameDestination)
132
185
 
133
- listNoNumba = [
134
- 'countInitialize',
135
- 'countSequential',
136
- ]
137
-
138
- listPathFilenamesNoNumba = []
139
- for pathFilename in listPathFilenamesDestination:
140
- if pathFilename.stem in listNoNumba:
141
- pathFilenameNoNumba = pathFilename.with_name(pathFilename.stem + 'NoNumba' + pathFilename.suffix)
142
- else:
143
- continue
144
- codeNoNumba = pathFilename.read_text()
145
- for codeLine in copy.copy(codeNoNumba.splitlines()):
146
- if 'numba' in codeLine:
147
- codeNoNumba = codeNoNumba.replace(codeLine, '')
148
- pathFilenameNoNumba.write_text(codeNoNumba)
149
- listPathFilenamesNoNumba.append(pathFilenameNoNumba)
150
-
151
186
  if __name__ == '__main__':
152
187
  Z0Z_inlineMapFolding()
@@ -1,19 +0,0 @@
1
- benchmarks/benchmarking.py,sha256=HD_0NSvuabblg94ftDre6LFnXShTe8MYj3hIodW-zV0,3076
2
- reference/flattened.py,sha256=X9nvRzg7YDcpCtSDTL4YiidjshlX9rg2e6JVCY6i2u0,16547
3
- reference/hunterNumba.py,sha256=0giUyqAFzP-XKcq3Kz8wIWCK0BVFhjABVJ1s-w4Jhu0,7109
4
- reference/irvineJavaPort.py,sha256=Sj-63Z-OsGuDoEBXuxyjRrNmmyl0d7Yz_XuY7I47Oyg,4250
5
- reference/jax.py,sha256=bB34dGdi3VSz4cRFbmCPn_erAmQ3FyrSED8uJ7CsES0,14961
6
- reference/lunnan.py,sha256=XEcql_gxvCCghb6Or3qwmPbn4IZUbZTaSmw_fUjRxZE,5037
7
- reference/lunnanNumpy.py,sha256=HqDgSwTOZA-G0oophOEfc4zs25Mv4yw2aoF1v8miOLk,4653
8
- reference/lunnanWhile.py,sha256=7NY2IKO5XBgol0aWWF_Fi-7oTL9pvu_z6lB0TF1uVHk,4063
9
- reference/rotatedEntryPoint.py,sha256=z0QyDQtnMvXNj5ntWzzJUQUMFm1-xHGLVhtYzwmczUI,11530
10
- reference/total_countPlus1vsPlusN.py,sha256=usenM8Yn_G1dqlPl7NKKkcnbohBZVZBXTQRm2S3_EDA,8106
11
- someAssemblyRequired/inlineAfunction.py,sha256=_hGAxVSVUAKCsNT92p0--Y636HkudpxHDEClXKrmmNE,6181
12
- someAssemblyRequired/jobsAndTasks.py,sha256=zHgqvY4GHiCmH1fQrOHxsyDFUWoOcNtLAKPLwL6NVvU,2155
13
- someAssemblyRequired/makeNuitkaSource.py,sha256=jTK34OWzm6OsgFPd2mHwETxFo2X83io0M4YiEHRgk3U,3262
14
- someAssemblyRequired/makeNumbaJob.py,sha256=L1JORHfVKjBve7GjlE4-JMnzT7wJGJISGzLbWyRCyxU,5899
15
- mapFolding-0.2.7.dist-info/METADATA,sha256=i3nAtGhadvid_ODpCN9uXk9e4L3zQhXJfuElSQUmY50,7425
16
- mapFolding-0.2.7.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
17
- mapFolding-0.2.7.dist-info/entry_points.txt,sha256=F3OUeZR1XDTpoH7k3wXuRb3KF_kXTTeYhu5AGK1SiOQ,146
18
- mapFolding-0.2.7.dist-info/top_level.txt,sha256=yHhQq-bIJhB4pZcof5hXDTIjan0nxcFuOEWb7gy1DuU,42
19
- mapFolding-0.2.7.dist-info/RECORD,,
@@ -1,46 +0,0 @@
1
- from typing import Any, Optional, Sequence, Type
2
-
3
- def Z0Z_makeJob(listDimensions: Sequence[int], **keywordArguments: Optional[Type[Any]]):
4
- from mapFolding import outfitCountFolds
5
- stateUniversal = outfitCountFolds(listDimensions, computationDivisions=None, CPUlimit=None, **keywordArguments)
6
- from mapFolding.someAssemblyRequired.countInitializeNoNumba import countInitialize
7
- countInitialize(stateUniversal['connectionGraph'], stateUniversal['gapsWhere'], stateUniversal['my'], stateUniversal['track'])
8
- from mapFolding import getPathFilenameFoldsTotal
9
- pathFilenameChopChop = getPathFilenameFoldsTotal(stateUniversal['mapShape'])
10
- import pathlib
11
- suffix = pathFilenameChopChop.suffix
12
- pathJob = pathlib.Path(str(pathFilenameChopChop)[0:-len(suffix)])
13
- pathJob.mkdir(parents=True, exist_ok=True)
14
- pathFilenameJob = pathJob / 'stateJob.pkl'
15
-
16
- pathFilenameFoldsTotal = getPathFilenameFoldsTotal(stateUniversal['mapShape'], pathFilenameJob.parent)
17
- stateJob = {**stateUniversal, 'pathFilenameFoldsTotal': pathFilenameFoldsTotal}
18
-
19
- del stateJob['mapShape']
20
-
21
- import pickle
22
- pathFilenameJob.write_bytes(pickle.dumps(stateJob))
23
- return pathFilenameJob
24
-
25
- def runJob(pathFilename):
26
- from typing import Final
27
- import numpy
28
- from pathlib import Path
29
- pathFilenameJob = Path(pathFilename)
30
- from pickle import loads
31
- stateJob = loads(pathFilenameJob.read_bytes())
32
-
33
- connectionGraph: numpy.ndarray = stateJob['connectionGraph']
34
- foldsSubTotals: numpy.ndarray = stateJob['foldsSubTotals']
35
- gapsWhere: numpy.ndarray = stateJob['gapsWhere']
36
- my: numpy.ndarray = stateJob['my']
37
- pathFilenameFoldsTotal: Final[Path] = stateJob['pathFilenameFoldsTotal']
38
- track: numpy.ndarray = stateJob['track']
39
-
40
- from mapFolding.someAssemblyRequired.countSequentialNoNumba import countSequential
41
- countSequential(connectionGraph, foldsSubTotals, gapsWhere, my, track)
42
-
43
- print(foldsSubTotals.sum().item())
44
- Path(pathFilenameFoldsTotal).parent.mkdir(parents=True, exist_ok=True)
45
- Path(pathFilenameFoldsTotal).write_text(str(foldsSubTotals.sum().item()))
46
- print(pathFilenameFoldsTotal)
@@ -1,99 +0,0 @@
1
- """NOTE make a special venv for nuitka, then run nuitka from that venv"""
2
- from pathlib import Path
3
- from pickle import loads
4
- from typing import Final
5
- import numpy
6
- from mapFolding.someAssemblyRequired.jobsAndTasks import Z0Z_makeJob
7
-
8
- """
9
- Section: configure every time"""
10
-
11
- # TODO configure this
12
- mapShape = [3]*3
13
- # NOTE ^^^^^^ pay attention
14
-
15
- """
16
- Section: settings"""
17
-
18
- pathFilenameData = Z0Z_makeJob(mapShape)
19
-
20
- pathJob = pathFilenameData.parent
21
-
22
- pathFilenameAlgorithm = Path('/apps/mapFolding/mapFolding/countSequentialNoNumba.py')
23
- pathFilenameDestination = Path(f"/apps/mapFolding/nn/{pathJob.name}.py")
24
-
25
- """
26
- Section: did you handle and include this stuff?"""
27
-
28
- lineImportNumPy = "import numpy"
29
- linePrintFoldsTotal = "print(foldsSubTotals.sum().item())"
30
- linesAlgorithm = """"""
31
- linesData = """"""
32
- settingsNuitkaProject=f"""
33
- # nuitka-project: --mode=onefile
34
- # nuitka-project: --onefile-no-compression
35
- # nuitka-project: --lto=yes
36
- # nuitka-project: --clang
37
- # nuitka-project: --output-dir={pathJob}
38
- # nuitka-project: --output-filename={pathJob.name}.exe
39
- """
40
- # nuitka-project:
41
- """
42
- Section: do the work"""
43
-
44
- WTFamIdoing = pathFilenameAlgorithm.read_text()
45
- for lineSource in WTFamIdoing.splitlines():
46
- ImaIndent = ' '
47
- if lineSource.startswith(ImaIndent):
48
- lineSource = lineSource[len(ImaIndent):None]
49
- elif lineSource.startswith('#'):
50
- continue
51
- elif not lineSource:
52
- continue
53
- elif lineSource.startswith('def '):
54
- continue
55
- else:
56
- raise NotImplementedError("You didn't anticipate this.")
57
- linesAlgorithm = "\n".join([linesAlgorithm
58
- , lineSource
59
- ])
60
-
61
- stateJob = loads(pathFilenameData.read_bytes())
62
- connectionGraph: Final[numpy.ndarray] = stateJob['connectionGraph']
63
- foldsSubTotals: numpy.ndarray = stateJob['foldsSubTotals']
64
- gapsWhere: numpy.ndarray = stateJob['gapsWhere']
65
- my: numpy.ndarray = stateJob['my']
66
- the: numpy.ndarray = stateJob['the']
67
- track: numpy.ndarray = stateJob['track']
68
-
69
- pathFilenameFoldsTotal = stateJob['pathFilenameFoldsTotal']
70
- lineDataPathFilenameFoldsTotal = "pathFilenameFoldsTotal = r'" + str(pathFilenameFoldsTotal) + "'\n"
71
-
72
- def archivistFormatsArrayToCode(arrayTarget: numpy.ndarray, identifierName: str) -> str:
73
- """Format numpy array into a code string that recreates the array."""
74
- arrayAsTypeStr = numpy.array2string(
75
- arrayTarget,
76
- threshold=10000,
77
- max_line_width=100,
78
- separator=','
79
- )
80
- return f"{identifierName} = numpy.array({arrayAsTypeStr}, dtype=numpy.{arrayTarget.dtype})\n"
81
-
82
- linesData = "\n".join([linesData
83
- , lineDataPathFilenameFoldsTotal
84
- , archivistFormatsArrayToCode(the, 'the')
85
- , archivistFormatsArrayToCode(my, 'my')
86
- , archivistFormatsArrayToCode(foldsSubTotals, 'foldsSubTotals')
87
- , archivistFormatsArrayToCode(gapsWhere, 'gapsWhere')
88
- , archivistFormatsArrayToCode(connectionGraph, 'connectionGraph')
89
- , archivistFormatsArrayToCode(track, 'track')
90
- ])
91
-
92
- linesAll = "\n".join([settingsNuitkaProject
93
- , lineImportNumPy
94
- , linesData
95
- , linesAlgorithm
96
- , linePrintFoldsTotal
97
- ])
98
-
99
- pathFilenameDestination.write_text(linesAll)
@@ -1,147 +0,0 @@
1
- """Create a python module hardcoded to compute a map's foldsTotal.
2
- - NumPy ndarray.
3
- - Numba optimized.
4
- - Absolutely no other imports.
5
-
6
- Can create LLVM IR from the module: of unknown utility.
7
- """
8
- # from mapFolding import dtypeDefault, dtypeSmall
9
- from mapFolding import make_dtype, datatypeLarge, dtypeLarge
10
- from mapFolding.someAssemblyRequired.inlineAfunction import Z0Z_inlineMapFolding
11
- from mapFolding.someAssemblyRequired.jobsAndTasks import Z0Z_makeJob
12
- import importlib
13
- import llvmlite.binding
14
- import numpy
15
- import pathlib
16
- import pickle
17
- import python_minifier
18
-
19
- listDimensions = [5,5]
20
-
21
- # NOTE this overwrites files
22
- Z0Z_inlineMapFolding()
23
-
24
- identifierCallableLaunch = "goGoGadgetAbsurdity"
25
-
26
- def convertNDArrayToStr(arrayTarget: numpy.ndarray, identifierName: str) -> str:
27
- arrayAsTypeStr = numpy.array2string(arrayTarget, threshold=100000, max_line_width=200, separator=',')
28
- stringMinimized = python_minifier.minify(arrayAsTypeStr)
29
- commaZeroMaximum = arrayTarget.shape[-1] - 1
30
- stringMinimized = stringMinimized.replace('[0' + ',0'*commaZeroMaximum + ']', '[0]*'+str(commaZeroMaximum+1))
31
- for countZeros in range(commaZeroMaximum, 2, -1):
32
- stringMinimized = stringMinimized.replace(',0'*countZeros + ']', ']+[0]*'+str(countZeros))
33
- return f"{identifierName} = numpy.array({stringMinimized}, dtype=numpy.{arrayTarget.dtype})"
34
-
35
- def writeModuleWithNumba(listDimensions):
36
- numpy_dtypeLarge = dtypeLarge
37
- # numpy_dtypeDefault = dtypeDefault
38
- datatypeDefault = 'uint8'
39
- numpy_dtypeDefault = make_dtype(datatypeDefault)
40
- numpy_dtypeSmall = numpy_dtypeDefault
41
- # forceinline=True might actually be useful
42
- parametersNumba = f"numba.types.{datatypeLarge}(), \
43
- cache=True, \
44
- nopython=True, \
45
- fastmath=True, \
46
- forceinline=True, \
47
- inline='always', \
48
- looplift=False, \
49
- _nrt=True, \
50
- error_model='numpy', \
51
- parallel=False, \
52
- boundscheck=False, \
53
- no_cfunc_wrapper=False, \
54
- no_cpython_wrapper=False, \
55
- "
56
- # no_cfunc_wrapper=True, \
57
- # no_cpython_wrapper=True, \
58
-
59
- pathFilenameData = Z0Z_makeJob(listDimensions, datatypeDefault=numpy_dtypeDefault, datatypeLarge=numpy_dtypeLarge, datatypeSmall=numpy_dtypeSmall)
60
-
61
- pathFilenameAlgorithm = pathlib.Path('/apps/mapFolding/mapFolding/someAssemblyRequired/countSequentialNoNumba.py')
62
- pathFilenameDestination = pathFilenameData.with_stem(pathFilenameData.parent.name).with_suffix(".py")
63
-
64
- lineNumba = f"@numba.jit({parametersNumba})"
65
-
66
- linesImport = "\n".join([
67
- "import numpy"
68
- , "import numba"
69
- ])
70
-
71
- stateJob = pickle.loads(pathFilenameData.read_bytes())
72
-
73
- ImaIndent = ' '
74
- linesDataDynamic = """"""
75
- linesDataDynamic = "\n".join([linesDataDynamic
76
- , ImaIndent + f"foldsTotal = numba.types.{datatypeLarge}(0)"
77
- , ImaIndent + convertNDArrayToStr(stateJob['my'], 'my')
78
- , ImaIndent + convertNDArrayToStr(stateJob['foldGroups'], 'foldGroups')
79
- , ImaIndent + convertNDArrayToStr(stateJob['gapsWhere'], 'gapsWhere')
80
- , ImaIndent + convertNDArrayToStr(stateJob['track'], 'track')
81
- ])
82
-
83
- linesDataStatic = """"""
84
- linesDataStatic = "\n".join([linesDataStatic
85
- , ImaIndent + convertNDArrayToStr(stateJob['connectionGraph'], 'connectionGraph')
86
- ])
87
-
88
- pathFilenameFoldsTotal: pathlib.Path = stateJob['pathFilenameFoldsTotal']
89
-
90
- linesAlgorithm = """"""
91
- for lineSource in pathFilenameAlgorithm.read_text().splitlines():
92
- if lineSource.startswith('#'):
93
- continue
94
- elif not lineSource:
95
- continue
96
- elif lineSource.startswith('def '):
97
- lineSource = "\n".join([lineNumba
98
- , f"def {identifierCallableLaunch}():"
99
- , linesDataDynamic
100
- , linesDataStatic
101
- ])
102
- linesAlgorithm = "\n".join([linesAlgorithm
103
- , lineSource
104
- ])
105
-
106
- linesLaunch = """"""
107
- linesLaunch = linesLaunch + f"""
108
- if __name__ == '__main__':
109
- import time
110
- timeStart = time.perf_counter()
111
- {identifierCallableLaunch}()
112
- print(time.perf_counter() - timeStart)"""
113
-
114
- linesWriteFoldsTotal = """"""
115
- linesWriteFoldsTotal = "\n".join([linesWriteFoldsTotal
116
- , " foldsTotal = foldGroups[0:-1].sum() * foldGroups[-1]"
117
- , " print(foldsTotal)"
118
- , " with numba.objmode():"
119
- , f" open('{pathFilenameFoldsTotal.as_posix()}', 'w').write(str(foldsTotal))"
120
- , " return foldsTotal"
121
- ])
122
-
123
- linesAll = "\n".join([
124
- linesImport
125
- , linesAlgorithm
126
- , linesWriteFoldsTotal
127
- , linesLaunch
128
- ])
129
-
130
- pathFilenameDestination.write_text(linesAll)
131
-
132
- return pathFilenameDestination
133
-
134
- def writeModuleLLVM(pathFilenamePythonFile: pathlib.Path) -> pathlib.Path:
135
- pathRootPackage = pathlib.Path('c:/apps/mapFolding')
136
- relativePathModule = pathFilenamePythonFile.relative_to(pathRootPackage)
137
- moduleTarget = '.'.join(relativePathModule.parts)[0:-len(relativePathModule.suffix)]
138
- moduleTargetImported = importlib.import_module(moduleTarget)
139
- linesLLVM = moduleTargetImported.__dict__[identifierCallableLaunch].inspect_llvm()[()]
140
- moduleLLVM = llvmlite.binding.module.parse_assembly(linesLLVM)
141
- pathFilenameLLVM = pathFilenamePythonFile.with_suffix(".ll")
142
- pathFilenameLLVM.write_text(str(moduleLLVM))
143
- return pathFilenameLLVM
144
-
145
- if __name__ == '__main__':
146
- pathFilenamePythonFile = writeModuleWithNumba(listDimensions)
147
- pathFilenameLLVM = writeModuleLLVM(pathFilenamePythonFile)