processing-graph 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- processing_graph/BaseProcessor.py +408 -0
- processing_graph/ProcessingGraph_tests.py +755 -0
- processing_graph/ProcessingNode.py +651 -0
- processing_graph/__init__.py +0 -0
- processing_graph-0.1.0.dist-info/METADATA +150 -0
- processing_graph-0.1.0.dist-info/RECORD +8 -0
- processing_graph-0.1.0.dist-info/WHEEL +5 -0
- processing_graph-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,408 @@
|
|
|
1
|
+
"""
|
|
2
|
+
A Processing Network is a facade to a collection of processing nodes.
|
|
3
|
+
A Processing Network (PN) is given a json description of a processing network.
|
|
4
|
+
The PN constructs this network, and then gives access to the central 'process' method.
|
|
5
|
+
JG: Wrote this fucking thing 5 years ago. Barely remeber how it works, but it works really, really, well.
|
|
6
|
+
JG-2: True
|
|
7
|
+
JG-3: Its nice now
|
|
8
|
+
JG-4: Its not tho... no input/output declarations
|
|
9
|
+
}
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import sys
|
|
13
|
+
import inspect, sys
|
|
14
|
+
|
|
15
|
+
from .ProcessingNode import ProcessingNode
|
|
16
|
+
from nodejobs.dependencies.BaseData import BaseData, BaseField
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ProcessingGraph(BaseData):
|
|
21
|
+
def add_node(self, node: dict) -> bool:
|
|
22
|
+
node = ProcessingNode.ExecutionNode(node)
|
|
23
|
+
self[node[node.f_name]] = node
|
|
24
|
+
return True
|
|
25
|
+
|
|
26
|
+
def add_nodes(self, nodes: list):
|
|
27
|
+
return [self.add_node(n) for n in nodes]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# TODO - Add in a proper Field type, and back integrate it into BaseData.
|
|
31
|
+
# For now the tuple method is good, minimal, and expressive
|
|
32
|
+
class Commit(BaseData):
|
|
33
|
+
graph: (ProcessingGraph, {})
|
|
34
|
+
execution_query: dict
|
|
35
|
+
root_name: (str, None)
|
|
36
|
+
result_path: (list, None)
|
|
37
|
+
from_ref: (list, None)
|
|
38
|
+
|
|
39
|
+
def do_pre_process(self, in_dict):
|
|
40
|
+
c = Commit
|
|
41
|
+
if c.from_ref in in_dict and in_dict[c.from_ref] != None:
|
|
42
|
+
in_dict[c.root_name] = in_dict[c.from_ref][1]
|
|
43
|
+
in_dict[c.result_path] = in_dict[c.from_ref][2:]
|
|
44
|
+
del in_dict[c.from_ref]
|
|
45
|
+
return super().do_pre_process(in_dict)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class CommitExecution(BaseData):
|
|
49
|
+
result: Any
|
|
50
|
+
output: dict
|
|
51
|
+
error: (Any, None)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class BaseProcessor:
|
|
55
|
+
class ProcessingGraph(ProcessingGraph):
|
|
56
|
+
pass
|
|
57
|
+
|
|
58
|
+
def __init__(self, networkDef, root="", context=None):
|
|
59
|
+
assert isinstance(networkDef, dict), f"Totally invalid graph {networkDef}"
|
|
60
|
+
self.executed_nodes = []
|
|
61
|
+
self.networkTemplate = networkDef.copy()
|
|
62
|
+
self.networkDef = networkDef
|
|
63
|
+
self.instanceMap = {}
|
|
64
|
+
self.root = root
|
|
65
|
+
self.globalsContext = context
|
|
66
|
+
for instanceName in networkDef:
|
|
67
|
+
if instanceName == "__outputs":
|
|
68
|
+
continue
|
|
69
|
+
networkDef[instanceName]["name"] = instanceName
|
|
70
|
+
# TODO fix this -- but I dont understand the invisible dependencies impacted by rebuilding the keys.
|
|
71
|
+
# lol -- so the name field gets demolished? -- TODO -- should be the other way around. The name field should be the important one
|
|
72
|
+
|
|
73
|
+
self.createNodeRecursive(networkDef[instanceName])
|
|
74
|
+
self.lastFeature = {}
|
|
75
|
+
|
|
76
|
+
@staticmethod
|
|
77
|
+
def run_graph(graph, feature, val_path):
|
|
78
|
+
assert isinstance(graph, dict), f"Invalid graph found {graph}"
|
|
79
|
+
pn = BaseProcessor(graph)
|
|
80
|
+
out = pn.process(feature, rootIn=val_path[0])
|
|
81
|
+
exec_val = out
|
|
82
|
+
try:
|
|
83
|
+
for key in val_path:
|
|
84
|
+
exec_val = exec_val[key]
|
|
85
|
+
except:
|
|
86
|
+
raise Exception(
|
|
87
|
+
f"No Results in val_path {val_path}, last_valid_obj: {exec_val}"
|
|
88
|
+
)
|
|
89
|
+
try:
|
|
90
|
+
commit_execution = CommitExecution(
|
|
91
|
+
{
|
|
92
|
+
CommitExecution.output: out,
|
|
93
|
+
CommitExecution.result: exec_val,
|
|
94
|
+
CommitExecution.error: None,
|
|
95
|
+
}
|
|
96
|
+
)
|
|
97
|
+
except Exception as e:
|
|
98
|
+
commit_execution = CommitExecution(
|
|
99
|
+
{
|
|
100
|
+
CommitExecution.output: out,
|
|
101
|
+
CommitExecution.result: [],
|
|
102
|
+
CommitExecution.error: e,
|
|
103
|
+
}
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
return commit_execution
|
|
107
|
+
|
|
108
|
+
@staticmethod
|
|
109
|
+
def run_commit(commit) -> CommitExecution:
|
|
110
|
+
commit = Commit(commit)
|
|
111
|
+
EG = Commit
|
|
112
|
+
graph = commit.graph
|
|
113
|
+
root_id = commit.root_name
|
|
114
|
+
result_path = commit.result_path
|
|
115
|
+
feature = commit.execution_query
|
|
116
|
+
# print(graph)
|
|
117
|
+
# raise("WHAT IS THE GRAPH")
|
|
118
|
+
val_path = [root_id, *result_path]
|
|
119
|
+
ce = BaseProcessor.run_graph(graph, feature, val_path)
|
|
120
|
+
cme = CommitExecution(ce)
|
|
121
|
+
return cme
|
|
122
|
+
|
|
123
|
+
@staticmethod # NEW
|
|
124
|
+
def build_context(mod=None, ns=None, predicate=None):
|
|
125
|
+
"""
|
|
126
|
+
Return {ClassName: ClassObj} from a module or namespace.
|
|
127
|
+
- mod: module object (e.g., sys.modules[__name__])
|
|
128
|
+
- ns: dict namespace (e.g., globals())
|
|
129
|
+
- predicate: optional filter: (cls) -> bool
|
|
130
|
+
"""
|
|
131
|
+
if ns is None and mod is None:
|
|
132
|
+
frm = inspect.stack()[1].frame
|
|
133
|
+
mod = sys.modules.get(frm.f_globals.get("__name__"))
|
|
134
|
+
|
|
135
|
+
items = ns.items() if ns is not None else inspect.getmembers(mod)
|
|
136
|
+
ctx = {name: obj for name, obj in items if inspect.isclass(obj)}
|
|
137
|
+
if mod is not None and ns is None:
|
|
138
|
+
ctx = {
|
|
139
|
+
n: c
|
|
140
|
+
for n, c in ctx.items()
|
|
141
|
+
if getattr(c, "__module__", None) == mod.__name__
|
|
142
|
+
}
|
|
143
|
+
if predicate is not None:
|
|
144
|
+
ctx = {n: c for n, c in ctx.items() if predicate(c)}
|
|
145
|
+
return ctx
|
|
146
|
+
|
|
147
|
+
def getNetworkTemplate(self):
|
|
148
|
+
return self.networkTemplate
|
|
149
|
+
|
|
150
|
+
def export_state(self):
|
|
151
|
+
# Extract state information comprehensively
|
|
152
|
+
state_data = {
|
|
153
|
+
node_name: node_instance.settings
|
|
154
|
+
for node_name, node_instance in self.instanceMap.items()
|
|
155
|
+
}
|
|
156
|
+
return state_data
|
|
157
|
+
|
|
158
|
+
def import_state(self, state_data):
|
|
159
|
+
for node_name, settings in state_data.items():
|
|
160
|
+
if node_name in self.instanceMap:
|
|
161
|
+
for key, val in settings.items():
|
|
162
|
+
self.instanceMap[node_name].setSetting(key, val)
|
|
163
|
+
else:
|
|
164
|
+
# Handle cases where nodes are not initially present
|
|
165
|
+
print(f"Warning: Node {node_name} not found in instanceMap.")
|
|
166
|
+
|
|
167
|
+
def createNodeRecursive(self, instanceDict):
|
|
168
|
+
# TODO -- all methods, use schema validator
|
|
169
|
+
instanceDict["settings"] = ProcessingNode.NodeSettings(instanceDict["settings"])
|
|
170
|
+
iName = instanceDict["name"]
|
|
171
|
+
if not iName in self.instanceMap:
|
|
172
|
+
# if 'settings' in self.networkDef[iName]:
|
|
173
|
+
# settings=self.networkDef[iName]['settings']
|
|
174
|
+
# else:
|
|
175
|
+
# settings=None
|
|
176
|
+
upstream_dependency_list = {}
|
|
177
|
+
dependency_list = {}
|
|
178
|
+
settings = instanceDict["settings"]
|
|
179
|
+
if "upstream_dependencies" in self.networkDef[iName]:
|
|
180
|
+
input_list = self.networkDef[iName]["upstream_dependencies"]
|
|
181
|
+
for ik in input_list.keys():
|
|
182
|
+
iItem = input_list[ik]
|
|
183
|
+
if isinstance(iItem, tuple):
|
|
184
|
+
upstream_dependency_list[ik] = iItem
|
|
185
|
+
elif isinstance(iItem, list) and iItem[0] == "__ref":
|
|
186
|
+
iItem = tuple(iItem)
|
|
187
|
+
iItem = iItem[1:]
|
|
188
|
+
upstream_dependency_list[ik] = iItem
|
|
189
|
+
|
|
190
|
+
else: # TODO figure out why settings are populated here. idk
|
|
191
|
+
settings[ik] = iItem
|
|
192
|
+
|
|
193
|
+
if "dependencies" in self.networkDef[iName]:
|
|
194
|
+
input_list = self.networkDef[iName]["dependencies"]
|
|
195
|
+
for ik in input_list.keys():
|
|
196
|
+
iItem = input_list[ik]
|
|
197
|
+
dependency_list[ik] = iItem
|
|
198
|
+
|
|
199
|
+
if (
|
|
200
|
+
"type" not in self.networkDef[iName]
|
|
201
|
+
or self.networkDef[iName]["type"] == None
|
|
202
|
+
):
|
|
203
|
+
self.networkDef[iName]["type"] = ProcessingNode
|
|
204
|
+
typeVar = self.networkDef[iName]["type"]
|
|
205
|
+
# print(self.globalsContext.keys())
|
|
206
|
+
# raise hell
|
|
207
|
+
if type(self.networkDef[iName]["clas"]) == str:
|
|
208
|
+
clas = self.networkDef[iName]["clas"]
|
|
209
|
+
assert self.networkDef[iName]["clas"] in self.globalsContext, (
|
|
210
|
+
f"If you are passing string as a class: ({clas}), you must have it in the context"
|
|
211
|
+
)
|
|
212
|
+
self.networkDef[iName]["clas"] = self.globalsContext[
|
|
213
|
+
self.networkDef[iName]["clas"]
|
|
214
|
+
]
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
self.instanceMap[iName] = typeVar(
|
|
218
|
+
{
|
|
219
|
+
"settings": settings,
|
|
220
|
+
"dependency_list": dependency_list,
|
|
221
|
+
"upstream_dependency_list": upstream_dependency_list,
|
|
222
|
+
"clas": self.networkDef[iName]["clas"],
|
|
223
|
+
}
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
except Exception as e:
|
|
227
|
+
import traceback
|
|
228
|
+
|
|
229
|
+
err_str = traceback.format_exc(limit=50)
|
|
230
|
+
print(err_str)
|
|
231
|
+
print("Trouble instancing a ProcessingNode, here is some info:")
|
|
232
|
+
print("-----------------------------------")
|
|
233
|
+
print("iName", iName)
|
|
234
|
+
print("settings", settings)
|
|
235
|
+
print("dependency_list", dependency_list)
|
|
236
|
+
print("upstream_dependency_list", upstream_dependency_list)
|
|
237
|
+
print("type", typeVar)
|
|
238
|
+
print("class", self.networkDef[iName]["clas"])
|
|
239
|
+
print("-----------Re-raising error---------")
|
|
240
|
+
raise e
|
|
241
|
+
instance = self.instanceMap[iName]
|
|
242
|
+
instanceDict["instance"] = self.instanceMap[iName]
|
|
243
|
+
|
|
244
|
+
instance.setSetting("name", iName)
|
|
245
|
+
for depParameter in instanceDict["dependencies"].keys():
|
|
246
|
+
depName = instanceDict["dependencies"][depParameter]
|
|
247
|
+
refrences_unprocessed = [depName]
|
|
248
|
+
ind = 0
|
|
249
|
+
while len(refrences_unprocessed) > 0:
|
|
250
|
+
ind = ind + 1
|
|
251
|
+
refrence = refrences_unprocessed.pop(0)
|
|
252
|
+
|
|
253
|
+
# Recurse and search for sub refrences if you find a dict
|
|
254
|
+
if isinstance(refrence, dict):
|
|
255
|
+
for sub_refrence in list(refrence.values()):
|
|
256
|
+
refrences_unprocessed.append(sub_refrence)
|
|
257
|
+
elif isinstance(refrence, list) and (
|
|
258
|
+
len(refrence) == 0 or refrence[0] != "__ref"
|
|
259
|
+
):
|
|
260
|
+
refrences_unprocessed.extend(refrence)
|
|
261
|
+
# If you find an instance (Tuple) register the dependency
|
|
262
|
+
# DUDE TODO Fix this horrible code
|
|
263
|
+
# Expects : ['__ref', 'PointBuffer','generate', 'data']
|
|
264
|
+
# Which is ref (flag), class, function, inner field
|
|
265
|
+
if (
|
|
266
|
+
isinstance(refrence, list)
|
|
267
|
+
and len(refrence) > 0
|
|
268
|
+
and refrence[0] == "__ref"
|
|
269
|
+
):
|
|
270
|
+
refrence = tuple(refrence)
|
|
271
|
+
refrence = refrence[1:]
|
|
272
|
+
|
|
273
|
+
if isinstance(refrence, tuple):
|
|
274
|
+
try:
|
|
275
|
+
depInstance = self.createNodeRecursive(
|
|
276
|
+
self.networkDef[refrence[0]]
|
|
277
|
+
)
|
|
278
|
+
instance.setDependency(depParameter + str(ind), depInstance)
|
|
279
|
+
except:
|
|
280
|
+
pass
|
|
281
|
+
return self.instanceMap[iName]
|
|
282
|
+
|
|
283
|
+
def getInstance(self, iName):
|
|
284
|
+
return self.instanceMap[iName]
|
|
285
|
+
|
|
286
|
+
def getInnerInstance(self, iName):
|
|
287
|
+
pn: ProcessingNode = self.instanceMap[iName]
|
|
288
|
+
return pn.getInnerInstance()
|
|
289
|
+
|
|
290
|
+
def do_preprocess(self): # LOL I thought these would be useful
|
|
291
|
+
pass
|
|
292
|
+
|
|
293
|
+
def do_postprocess(self):
|
|
294
|
+
pass
|
|
295
|
+
|
|
296
|
+
def process_node(
|
|
297
|
+
self, proc_node: ProcessingNode, feature, lastFeature={}, for_graph=None
|
|
298
|
+
):
|
|
299
|
+
proc_node.lastFeature = lastFeature
|
|
300
|
+
proc_node.feature = feature
|
|
301
|
+
if proc_node.settings["name"] in feature:
|
|
302
|
+
return feature[proc_node.settings["name"]]
|
|
303
|
+
|
|
304
|
+
# Process Dependencies Recursively
|
|
305
|
+
for k in proc_node.dependencies.keys():
|
|
306
|
+
# Because I totally forgot before:
|
|
307
|
+
# proc_node.dependencies[k], k is NOT the node or ref name.
|
|
308
|
+
# This is because dependencies are pooled ina giant dict. What happens is that when
|
|
309
|
+
# duplicate EDGES are detected leading to the same reference, they are disambiguated
|
|
310
|
+
# by appending an int (1...N) to ref name. Like "write_files_2". These edges may lead
|
|
311
|
+
# to the same cached nodes, and each runtime edge is hot computed when the node is instanced
|
|
312
|
+
# So these cached edges are never seen in any output files / stored. They are runtime linkages
|
|
313
|
+
# Overall I find this lazy loading and instancing of edges very functional, and performant, but
|
|
314
|
+
# they lack any real ability to declare, analyze, or debug
|
|
315
|
+
# Overall this whole scheme could use a refactor, however as it is such a deep feature, it
|
|
316
|
+
# could easly take 10-20 hours of dev to just fix this up, and since it works VERY WELL,
|
|
317
|
+
# it just seems like something to leave alone for now.
|
|
318
|
+
# The only reason to really refactor this, would be if one wanted to do some real-time visualization
|
|
319
|
+
# and debugging of the graph at an edge level. I have not had this issue ever, over dozens of projects
|
|
320
|
+
# So even though I hate it, I will opt to just "leave it alone"
|
|
321
|
+
# However, I want to leave this comment about this none issue.
|
|
322
|
+
if proc_node.dependencies[k].settings["name"] not in feature:
|
|
323
|
+
self.process_node(
|
|
324
|
+
proc_node.dependencies[k], feature, lastFeature, for_graph
|
|
325
|
+
)
|
|
326
|
+
assert proc_node.dependencies[k].settings["name"] in feature, (
|
|
327
|
+
"The node did not correctly deposit results"
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
features = {}
|
|
331
|
+
# Build your personal processing feature
|
|
332
|
+
for key in proc_node.settings: # It has your dependencies
|
|
333
|
+
features[key] = proc_node.settings[key]
|
|
334
|
+
for key in proc_node.dependency_list: # It resolves any values
|
|
335
|
+
features[key] = proc_node.get_dependency_value(key)
|
|
336
|
+
for (
|
|
337
|
+
key
|
|
338
|
+
) in proc_node.upstream_dependency_list: # Also pulls in any forward references
|
|
339
|
+
features[key] = proc_node.get_upstream_dependency_value(key)
|
|
340
|
+
for key in features:
|
|
341
|
+
proc_node.settings[key] = features[key]
|
|
342
|
+
feature[proc_node.settings["name"]] = proc_node.do_process(
|
|
343
|
+
features, proc_node.settings, for_graph
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
proc_node.retVal = feature
|
|
347
|
+
return proc_node.retVal
|
|
348
|
+
|
|
349
|
+
def process(self, feature=None, rootIn=""):
|
|
350
|
+
# We have to set every node in the network to it's "unprocessed" state
|
|
351
|
+
self.do_preprocess()
|
|
352
|
+
if feature == None:
|
|
353
|
+
feature = {}
|
|
354
|
+
targetNode = rootIn
|
|
355
|
+
if rootIn == "":
|
|
356
|
+
targetNode = self.root
|
|
357
|
+
|
|
358
|
+
if targetNode == "":
|
|
359
|
+
# raise Exception("Never need to blindly execute the graph like this")
|
|
360
|
+
for instanceName in self.networkDef.keys():
|
|
361
|
+
if instanceName == "__outputs":
|
|
362
|
+
continue
|
|
363
|
+
if not instanceName in feature or not feature[instanceName]:
|
|
364
|
+
# feature = self.instanceMap[instanceName].process(feature,self.lastFeature,self)
|
|
365
|
+
feature = self.process_node(
|
|
366
|
+
self.instanceMap[instanceName], feature, self.lastFeature, self
|
|
367
|
+
)
|
|
368
|
+
else:
|
|
369
|
+
try:
|
|
370
|
+
self.executed_nodes.append("Root Entry: " + targetNode)
|
|
371
|
+
# feature = self.instanceMap[targetNode].process(feature,self.lastFeature,self)
|
|
372
|
+
feature = self.process_node(
|
|
373
|
+
self.instanceMap[targetNode], feature, self.lastFeature, self
|
|
374
|
+
)
|
|
375
|
+
except Exception as exc:
|
|
376
|
+
# raise exc
|
|
377
|
+
# print succinct graph trace
|
|
378
|
+
print("\nGraphError:")
|
|
379
|
+
if len(self.executed_nodes) > 1:
|
|
380
|
+
for n in self.executed_nodes[:-1]:
|
|
381
|
+
print(f" - {n}")
|
|
382
|
+
|
|
383
|
+
if len(self.executed_nodes) > 0:
|
|
384
|
+
print(f" - {self.executed_nodes[-1]} < ---- Error")
|
|
385
|
+
else:
|
|
386
|
+
print("\n - Empty graph?")
|
|
387
|
+
|
|
388
|
+
_, _, tb = sys.exc_info()
|
|
389
|
+
# skip until the first frame *not* in ProcessingNode
|
|
390
|
+
while tb is not None:
|
|
391
|
+
mod = tb.tb_frame.f_globals.get("__name__", "")
|
|
392
|
+
# print(mod)
|
|
393
|
+
if mod.startswith(
|
|
394
|
+
"processing_graph.ProcessingNode"
|
|
395
|
+
) or mod.startswith("processing_graph.BaseProcessor"):
|
|
396
|
+
tb = tb.tb_next
|
|
397
|
+
else:
|
|
398
|
+
break
|
|
399
|
+
|
|
400
|
+
# re-raise, attaching only the remaining traceback
|
|
401
|
+
# raise exc.with_traceback(tb)
|
|
402
|
+
raise exc
|
|
403
|
+
self.lastFeature = feature
|
|
404
|
+
self.do_postprocess()
|
|
405
|
+
return feature
|
|
406
|
+
|
|
407
|
+
def __str__(self):
|
|
408
|
+
return self.networkDef
|