@shaderfrog/core 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.json +3 -0
- package/.prettierrc.js +3 -0
- package/README.md +3 -0
- package/babel.config.js +6 -0
- package/package.json +47 -0
- package/src/ast/manipulate.ts +392 -0
- package/src/ast/shader-sections.ts +323 -0
- package/src/core/engine.ts +214 -0
- package/src/core/file.js +53 -0
- package/src/core/graph.ts +1007 -0
- package/src/core/nodes/code-nodes.ts +66 -0
- package/src/core/nodes/core-node.ts +48 -0
- package/src/core/nodes/data-nodes.ts +344 -0
- package/src/core/nodes/edge.ts +23 -0
- package/src/core/nodes/engine-node.ts +266 -0
- package/src/core/strategy.ts +520 -0
- package/src/core.test.ts +312 -0
- package/src/plugins/babylon/bablyengine.ts +670 -0
- package/src/plugins/babylon/examples.ts +512 -0
- package/src/plugins/babylon/importers.ts +69 -0
- package/src/plugins/babylon/index.ts +6 -0
- package/src/plugins/three/examples.ts +680 -0
- package/src/plugins/three/importers.ts +18 -0
- package/src/plugins/three/index.ts +6 -0
- package/src/plugins/three/threngine.tsx +571 -0
- package/src/util/ensure.ts +10 -0
- package/src/util/id.ts +2 -0
|
@@ -0,0 +1,1007 @@
|
|
|
1
|
+
import { parser, generate } from '@shaderfrog/glsl-parser';
|
|
2
|
+
import groupBy from 'lodash.groupby';
|
|
3
|
+
|
|
4
|
+
import {
|
|
5
|
+
renameBindings,
|
|
6
|
+
renameFunctions,
|
|
7
|
+
} from '@shaderfrog/glsl-parser/parser/utils';
|
|
8
|
+
import {
|
|
9
|
+
visit,
|
|
10
|
+
AstNode,
|
|
11
|
+
NodeVisitors,
|
|
12
|
+
Path,
|
|
13
|
+
Program,
|
|
14
|
+
FunctionNode,
|
|
15
|
+
} from '@shaderfrog/glsl-parser/ast';
|
|
16
|
+
import { Engine, EngineContext } from './engine';
|
|
17
|
+
import {
|
|
18
|
+
emptyShaderSections,
|
|
19
|
+
findShaderSections,
|
|
20
|
+
mergeShaderSections,
|
|
21
|
+
ShaderSections,
|
|
22
|
+
} from '../ast/shader-sections';
|
|
23
|
+
import preprocess from '@shaderfrog/glsl-parser/preprocessor';
|
|
24
|
+
import {
|
|
25
|
+
convert300MainToReturn,
|
|
26
|
+
from2To3,
|
|
27
|
+
makeExpression,
|
|
28
|
+
makeExpressionWithScopes,
|
|
29
|
+
makeFnStatement,
|
|
30
|
+
} from '../ast/manipulate';
|
|
31
|
+
import { ensure } from '../util/ensure';
|
|
32
|
+
import { applyStrategy } from './strategy';
|
|
33
|
+
import { DataNode } from './nodes/data-nodes';
|
|
34
|
+
import { Edge } from './nodes/edge';
|
|
35
|
+
import {
|
|
36
|
+
BinaryNode,
|
|
37
|
+
CodeNode,
|
|
38
|
+
mapInputName,
|
|
39
|
+
NodeProperty,
|
|
40
|
+
SourceNode,
|
|
41
|
+
} from './nodes/code-nodes';
|
|
42
|
+
import { InputCategory, nodeInput, NodeInput } from './nodes/core-node';
|
|
43
|
+
import { makeId } from '../util/id';
|
|
44
|
+
|
|
45
|
+
export type ShaderStage = 'fragment' | 'vertex';
|
|
46
|
+
|
|
47
|
+
export enum NodeType {
|
|
48
|
+
OUTPUT = 'output',
|
|
49
|
+
BINARY = 'binary',
|
|
50
|
+
SOURCE = 'source',
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export type GraphNode = SourceNode | DataNode;
|
|
54
|
+
|
|
55
|
+
export interface Graph {
|
|
56
|
+
nodes: GraphNode[];
|
|
57
|
+
edges: Edge[];
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export const alphabet = 'abcdefghijklmnopqrstuvwxyz';
|
|
61
|
+
|
|
62
|
+
export type NodeFiller = (
|
|
63
|
+
node: SourceNode,
|
|
64
|
+
ast: Program | AstNode
|
|
65
|
+
) => AstNode | void;
|
|
66
|
+
export const emptyFiller: NodeFiller = () => {};
|
|
67
|
+
|
|
68
|
+
export const isDataNode = (node: GraphNode): node is DataNode =>
|
|
69
|
+
'value' in node;
|
|
70
|
+
|
|
71
|
+
export const isSourceNode = (node: GraphNode): node is SourceNode =>
|
|
72
|
+
!isDataNode(node);
|
|
73
|
+
|
|
74
|
+
export const MAGIC_OUTPUT_STMTS = 'mainStmts';
|
|
75
|
+
|
|
76
|
+
export type InputFiller = (a: AstNode | Program) => AstNode | Program;
|
|
77
|
+
export type InputFillerGroup = {
|
|
78
|
+
filler: InputFiller;
|
|
79
|
+
args?: AstNode[];
|
|
80
|
+
};
|
|
81
|
+
export type InputFillers = Record<string, InputFillerGroup>;
|
|
82
|
+
export type NodeContext = {
|
|
83
|
+
ast: AstNode | Program;
|
|
84
|
+
source?: string;
|
|
85
|
+
id: string;
|
|
86
|
+
inputFillers: InputFillers;
|
|
87
|
+
errors?: NodeErrors;
|
|
88
|
+
};
|
|
89
|
+
|
|
90
|
+
type FillerArguments = AstNode[];
|
|
91
|
+
export type ComputedInput = [NodeInput, InputFiller, FillerArguments?];
|
|
92
|
+
|
|
93
|
+
export type FindInputs = (
|
|
94
|
+
engineContext: EngineContext,
|
|
95
|
+
node: SourceNode,
|
|
96
|
+
ast: Program | AstNode,
|
|
97
|
+
inputEdges: Edge[]
|
|
98
|
+
) => ComputedInput[];
|
|
99
|
+
|
|
100
|
+
export type OnBeforeCompile = (
|
|
101
|
+
graph: Graph,
|
|
102
|
+
engineContext: EngineContext,
|
|
103
|
+
node: SourceNode,
|
|
104
|
+
sibling?: SourceNode
|
|
105
|
+
) => Promise<void>;
|
|
106
|
+
|
|
107
|
+
export type ProduceAst = (
|
|
108
|
+
engineContext: EngineContext,
|
|
109
|
+
engine: Engine,
|
|
110
|
+
graph: Graph,
|
|
111
|
+
node: SourceNode,
|
|
112
|
+
inputEdges: Edge[]
|
|
113
|
+
) => AstNode | Program;
|
|
114
|
+
|
|
115
|
+
export type Evaluator = (node: GraphNode) => any;
|
|
116
|
+
export type Evaluate = (
|
|
117
|
+
node: SourceNode,
|
|
118
|
+
inputEdges: Edge[],
|
|
119
|
+
inputNodes: GraphNode[],
|
|
120
|
+
evaluate: Evaluator
|
|
121
|
+
) => any;
|
|
122
|
+
|
|
123
|
+
type CoreNodeParser = {
|
|
124
|
+
produceAst: ProduceAst;
|
|
125
|
+
findInputs: FindInputs;
|
|
126
|
+
produceFiller: NodeFiller;
|
|
127
|
+
evaluate?: Evaluate;
|
|
128
|
+
};
|
|
129
|
+
|
|
130
|
+
export type ManipulateAst = (
|
|
131
|
+
engineContext: EngineContext,
|
|
132
|
+
engine: Engine,
|
|
133
|
+
graph: Graph,
|
|
134
|
+
node: SourceNode,
|
|
135
|
+
ast: AstNode | Program,
|
|
136
|
+
inputEdges: Edge[]
|
|
137
|
+
) => AstNode | Program;
|
|
138
|
+
|
|
139
|
+
export type NodeParser = {
|
|
140
|
+
// cacheKey?: (graph: Graph, node: GraphNode, sibling?: GraphNode) => string;
|
|
141
|
+
onBeforeCompile?: OnBeforeCompile;
|
|
142
|
+
manipulateAst?: ManipulateAst;
|
|
143
|
+
findInputs?: FindInputs;
|
|
144
|
+
produceFiller?: NodeFiller;
|
|
145
|
+
};
|
|
146
|
+
|
|
147
|
+
export const findNode = (graph: Graph, id: string): GraphNode =>
|
|
148
|
+
ensure(graph.nodes.find((node) => node.id === id));
|
|
149
|
+
|
|
150
|
+
export const doesLinkThruShader = (graph: Graph, node: GraphNode): boolean => {
|
|
151
|
+
const edges = graph.edges.filter((edge) => edge.from === node.id);
|
|
152
|
+
if (edges.length === 0) {
|
|
153
|
+
return false;
|
|
154
|
+
}
|
|
155
|
+
return edges.reduce<boolean>((foundShader, edge: Edge) => {
|
|
156
|
+
const upstreamNode = ensure(
|
|
157
|
+
graph.nodes.find((node) => node.id === edge.to)
|
|
158
|
+
);
|
|
159
|
+
return (
|
|
160
|
+
foundShader ||
|
|
161
|
+
// TODO: LARD this probably will introduce some insidius hard to track
|
|
162
|
+
// down bug, as I try to pull toon and phong up out of core, I need to
|
|
163
|
+
// know if a graph links through a "shader" which now means somehting
|
|
164
|
+
// different... does a config object need isShader? Can we compute it from
|
|
165
|
+
// inputs/ outputs/source?
|
|
166
|
+
(!(upstreamNode as CodeNode).expressionOnly &&
|
|
167
|
+
upstreamNode.type !== NodeType.OUTPUT) ||
|
|
168
|
+
doesLinkThruShader(graph, upstreamNode)
|
|
169
|
+
);
|
|
170
|
+
}, false);
|
|
171
|
+
};
|
|
172
|
+
|
|
173
|
+
type CoreParser = { [key: string]: CoreNodeParser };
|
|
174
|
+
|
|
175
|
+
export const nodeName = (node: GraphNode): string =>
|
|
176
|
+
'main_' + node.name.replace(/[^a-zA-Z0-9]/g, ' ').replace(/ +/g, '_');
|
|
177
|
+
|
|
178
|
+
export const mangleName = (name: string, node: GraphNode) => {
|
|
179
|
+
// Mangle names by using the next stage id, if present
|
|
180
|
+
const id = ('nextStageNodeId' in node && node.nextStageNodeId) || node.id;
|
|
181
|
+
return `${name}_${id}`;
|
|
182
|
+
};
|
|
183
|
+
|
|
184
|
+
export const mangleVar = (name: string, engine: Engine, node: GraphNode) =>
|
|
185
|
+
engine.preserve.has(name) ? name : mangleName(name, node);
|
|
186
|
+
|
|
187
|
+
export const mangleEntireProgram = (
|
|
188
|
+
ast: Program,
|
|
189
|
+
node: SourceNode,
|
|
190
|
+
engine: Engine
|
|
191
|
+
) => {
|
|
192
|
+
renameBindings(ast.scopes[0], (name, n) =>
|
|
193
|
+
// @ts-ignore
|
|
194
|
+
n.doNotDescope ? name : mangleVar(name, engine, node)
|
|
195
|
+
);
|
|
196
|
+
mangleMainFn(ast, node);
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
export const mangleMainFn = (ast: Program, node: SourceNode) => {
|
|
200
|
+
renameFunctions(ast.scopes[0], (name) =>
|
|
201
|
+
name === 'main' ? nodeName(node) : mangleName(name, node)
|
|
202
|
+
);
|
|
203
|
+
};
|
|
204
|
+
|
|
205
|
+
export const coreParsers: CoreParser = {
|
|
206
|
+
[NodeType.SOURCE]: {
|
|
207
|
+
produceAst: (engineContext, engine, graph, node, inputEdges) => {
|
|
208
|
+
let ast: Program;
|
|
209
|
+
if (node.expressionOnly) {
|
|
210
|
+
ast = makeExpressionWithScopes(node.source);
|
|
211
|
+
} else {
|
|
212
|
+
const preprocessed =
|
|
213
|
+
node.config.preprocess === false
|
|
214
|
+
? node.source
|
|
215
|
+
: preprocess(node.source, {
|
|
216
|
+
preserve: {
|
|
217
|
+
version: () => true,
|
|
218
|
+
},
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
ast = parser.parse(preprocessed);
|
|
222
|
+
|
|
223
|
+
if (node.config.version === 2 && node.stage) {
|
|
224
|
+
from2To3(ast, node.stage);
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
// This assumes that expressionOnly nodes don't have a stage and that all
|
|
228
|
+
// fragment source code shades have main function, which is probably wrong
|
|
229
|
+
if (node.stage === 'fragment') {
|
|
230
|
+
convert300MainToReturn('main', ast);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
return ast;
|
|
235
|
+
},
|
|
236
|
+
findInputs: (engineContext, node, ast) => {
|
|
237
|
+
let seen = new Set<string>();
|
|
238
|
+
return node.config.strategies
|
|
239
|
+
.flatMap((strategy) => applyStrategy(strategy, node, ast))
|
|
240
|
+
.filter(([input, _]) => {
|
|
241
|
+
if (!seen.has(input.id)) {
|
|
242
|
+
seen.add(input.id);
|
|
243
|
+
return true;
|
|
244
|
+
}
|
|
245
|
+
return false;
|
|
246
|
+
});
|
|
247
|
+
},
|
|
248
|
+
produceFiller: (node, ast) => {
|
|
249
|
+
return node.expressionOnly
|
|
250
|
+
? (ast as Program).program[0]
|
|
251
|
+
: makeExpression(`${nodeName(node)}()`);
|
|
252
|
+
},
|
|
253
|
+
},
|
|
254
|
+
// TODO: Output node assumes strategies are still passed in on node creation,
|
|
255
|
+
// which might be a little awkward for graph creators?
|
|
256
|
+
[NodeType.OUTPUT]: {
|
|
257
|
+
produceAst: (engineContext, engine, graph, node, inputEdges) => {
|
|
258
|
+
return parser.parse(node.source);
|
|
259
|
+
},
|
|
260
|
+
findInputs: (engineContext, node, ast) => {
|
|
261
|
+
return [
|
|
262
|
+
...node.config.strategies.flatMap((strategy) =>
|
|
263
|
+
applyStrategy(strategy, node, ast)
|
|
264
|
+
),
|
|
265
|
+
[
|
|
266
|
+
nodeInput(
|
|
267
|
+
MAGIC_OUTPUT_STMTS,
|
|
268
|
+
`filler_${MAGIC_OUTPUT_STMTS}`,
|
|
269
|
+
'filler',
|
|
270
|
+
'rgba',
|
|
271
|
+
new Set<InputCategory>(['code']),
|
|
272
|
+
false
|
|
273
|
+
),
|
|
274
|
+
(fillerAst) => {
|
|
275
|
+
const fn = (ast as Program).program.find(
|
|
276
|
+
(stmt): stmt is FunctionNode => stmt.type === 'function'
|
|
277
|
+
);
|
|
278
|
+
fn?.body.statements.unshift(makeFnStatement(generate(fillerAst)));
|
|
279
|
+
return ast;
|
|
280
|
+
},
|
|
281
|
+
] as ComputedInput,
|
|
282
|
+
];
|
|
283
|
+
},
|
|
284
|
+
produceFiller: (node, ast) => {
|
|
285
|
+
return makeExpression('impossible_call()');
|
|
286
|
+
},
|
|
287
|
+
},
|
|
288
|
+
[NodeType.BINARY]: {
|
|
289
|
+
produceAst: (engineContext, engine, graph, iNode, inputEdges) => {
|
|
290
|
+
const node = iNode as BinaryNode;
|
|
291
|
+
const fragmentAst: Program = {
|
|
292
|
+
type: 'program',
|
|
293
|
+
program: [
|
|
294
|
+
makeExpression(
|
|
295
|
+
'(' +
|
|
296
|
+
(inputEdges.length
|
|
297
|
+
? inputEdges
|
|
298
|
+
.map((_, index) => alphabet.charAt(index))
|
|
299
|
+
.join(` ${node.operator} `)
|
|
300
|
+
: `a ${node.operator} b`) +
|
|
301
|
+
')'
|
|
302
|
+
),
|
|
303
|
+
],
|
|
304
|
+
scopes: [],
|
|
305
|
+
};
|
|
306
|
+
return fragmentAst;
|
|
307
|
+
},
|
|
308
|
+
findInputs: (engineContext, node, ast, inputEdges) => {
|
|
309
|
+
return new Array(Math.max(inputEdges.length + 1, 2))
|
|
310
|
+
.fill(0)
|
|
311
|
+
.map((_, index) => {
|
|
312
|
+
const letter = alphabet.charAt(index);
|
|
313
|
+
return [
|
|
314
|
+
nodeInput(
|
|
315
|
+
letter,
|
|
316
|
+
letter,
|
|
317
|
+
'filler',
|
|
318
|
+
undefined,
|
|
319
|
+
new Set<InputCategory>(['data', 'code']),
|
|
320
|
+
false
|
|
321
|
+
),
|
|
322
|
+
(fillerAst) => {
|
|
323
|
+
let foundPath: Path<any> | undefined;
|
|
324
|
+
const visitors: NodeVisitors = {
|
|
325
|
+
identifier: {
|
|
326
|
+
enter: (path) => {
|
|
327
|
+
if (path.node.identifier === letter) {
|
|
328
|
+
foundPath = path;
|
|
329
|
+
}
|
|
330
|
+
},
|
|
331
|
+
},
|
|
332
|
+
};
|
|
333
|
+
visit(ast, visitors);
|
|
334
|
+
if (!foundPath) {
|
|
335
|
+
throw new Error(
|
|
336
|
+
`Im drunk and I think this case is impossible, no "${letter}" found in binary node?`
|
|
337
|
+
);
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
if (foundPath.parent && foundPath.key) {
|
|
341
|
+
// @ts-ignore
|
|
342
|
+
foundPath.parent[foundPath.key] = fillerAst;
|
|
343
|
+
return ast;
|
|
344
|
+
} else {
|
|
345
|
+
return fillerAst;
|
|
346
|
+
}
|
|
347
|
+
},
|
|
348
|
+
] as ComputedInput;
|
|
349
|
+
});
|
|
350
|
+
},
|
|
351
|
+
produceFiller: (node, ast) => {
|
|
352
|
+
return (ast as Program).program[0];
|
|
353
|
+
},
|
|
354
|
+
evaluate: (node, inputEdges, inputNodes, evaluateNode) => {
|
|
355
|
+
const operator = (node as BinaryNode).operator;
|
|
356
|
+
return inputNodes.map<number>(evaluateNode).reduce((num, next) => {
|
|
357
|
+
if (operator === '+') {
|
|
358
|
+
return num + next;
|
|
359
|
+
} else if (operator === '*') {
|
|
360
|
+
return num * next;
|
|
361
|
+
} else if (operator === '-') {
|
|
362
|
+
return num - next;
|
|
363
|
+
} else if (operator === '/') {
|
|
364
|
+
return num / next;
|
|
365
|
+
}
|
|
366
|
+
throw new Error(
|
|
367
|
+
`Don't know how to evaluate ${operator} for node ${node.name} (${node.id})`
|
|
368
|
+
);
|
|
369
|
+
});
|
|
370
|
+
},
|
|
371
|
+
},
|
|
372
|
+
};
|
|
373
|
+
|
|
374
|
+
export const toGlsl = (node: DataNode): string => {
|
|
375
|
+
const { type, value } = node;
|
|
376
|
+
if (type === 'vector2') {
|
|
377
|
+
return `vec2(${value[0]}, ${value[1]})`;
|
|
378
|
+
}
|
|
379
|
+
if (type === 'vector3' || type === 'rgb') {
|
|
380
|
+
return `vec3(${value[0]}, ${value[1]}, ${value[2]})`;
|
|
381
|
+
}
|
|
382
|
+
if (type === 'vector4' || type === 'rgba') {
|
|
383
|
+
return `vec4(${value[0]}, ${value[1]}, ${value[2]}, ${value[3]})`;
|
|
384
|
+
}
|
|
385
|
+
throw new Error(`Unknown GLSL inline type: "${node.type}"`);
|
|
386
|
+
};
|
|
387
|
+
|
|
388
|
+
export const evaluateNode = (
|
|
389
|
+
engine: Engine,
|
|
390
|
+
graph: Graph,
|
|
391
|
+
node: GraphNode
|
|
392
|
+
): any => {
|
|
393
|
+
// TODO: Data nodes themselves should have evaluators
|
|
394
|
+
if ('value' in node) {
|
|
395
|
+
return engine.evaluateNode(node);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const { evaluate } = coreParsers[node.type];
|
|
399
|
+
if (!evaluate) {
|
|
400
|
+
throw new Error(`No evaluator for node ${node.name} (${node.id})`);
|
|
401
|
+
}
|
|
402
|
+
const inputEdges = graph.edges.filter((edge) => edge.to === node.id);
|
|
403
|
+
const inputNodes = inputEdges.map(
|
|
404
|
+
(edge) => graph.nodes.find((node) => node.id === edge.from) as GraphNode
|
|
405
|
+
);
|
|
406
|
+
|
|
407
|
+
return evaluate(
|
|
408
|
+
node as SourceNode,
|
|
409
|
+
inputEdges,
|
|
410
|
+
inputNodes,
|
|
411
|
+
evaluateNode.bind(null, engine, graph)
|
|
412
|
+
);
|
|
413
|
+
};
|
|
414
|
+
|
|
415
|
+
type Predicates = {
|
|
416
|
+
node?: (node: GraphNode, inputEdges: Edge[]) => boolean;
|
|
417
|
+
input?: (
|
|
418
|
+
input: NodeInput,
|
|
419
|
+
node: GraphNode,
|
|
420
|
+
inputEdge: Edge | undefined,
|
|
421
|
+
fromNode: GraphNode | undefined
|
|
422
|
+
) => boolean;
|
|
423
|
+
};
|
|
424
|
+
export type SearchResult = {
|
|
425
|
+
nodes: Record<string, GraphNode>;
|
|
426
|
+
inputs: Record<string, NodeInput[]>;
|
|
427
|
+
};
|
|
428
|
+
|
|
429
|
+
/**
|
|
430
|
+
* Create the inputs on a node from the properties. This used to be done at
|
|
431
|
+
* context time. Doing it at node creation time lets us auto-bake edges into
|
|
432
|
+
* the node at initial graph creation time.
|
|
433
|
+
*/
|
|
434
|
+
export const prepopulatePropertyInputs = (node: CodeNode): CodeNode => ({
|
|
435
|
+
...node,
|
|
436
|
+
inputs: [
|
|
437
|
+
...node.inputs,
|
|
438
|
+
...(node.config.properties || []).map((property) =>
|
|
439
|
+
nodeInput(
|
|
440
|
+
property.displayName,
|
|
441
|
+
`property_${property.property}`,
|
|
442
|
+
'property',
|
|
443
|
+
property.type,
|
|
444
|
+
new Set<InputCategory>(['data']),
|
|
445
|
+
!!property.fillerName, // bakeable
|
|
446
|
+
property.property
|
|
447
|
+
)
|
|
448
|
+
),
|
|
449
|
+
],
|
|
450
|
+
});
|
|
451
|
+
|
|
452
|
+
/**
|
|
453
|
+
* Recursively filter the graph, starting from a specific node, looking for
|
|
454
|
+
* nodes and edges that match predicates. This function returns the inputs for
|
|
455
|
+
* matched edges, not the edges themselves, as a convenience for the only
|
|
456
|
+
* consumer of this function, which is finding input names to use as uniforms.
|
|
457
|
+
*
|
|
458
|
+
* Inputs can only be filtered if the graph context has been computed, since
|
|
459
|
+
* inputs aren't created until then.
|
|
460
|
+
*/
|
|
461
|
+
export const filterGraphFromNode = (
|
|
462
|
+
graph: Graph,
|
|
463
|
+
node: GraphNode,
|
|
464
|
+
predicates: Predicates,
|
|
465
|
+
depth = Infinity
|
|
466
|
+
): SearchResult => {
|
|
467
|
+
const { inputs } = node;
|
|
468
|
+
const inputEdges = graph.edges.filter((edge) => edge.to === node.id);
|
|
469
|
+
|
|
470
|
+
const nodeAcc = {
|
|
471
|
+
...(predicates.node && predicates.node(node, inputEdges)
|
|
472
|
+
? { [node.id]: node }
|
|
473
|
+
: {}),
|
|
474
|
+
};
|
|
475
|
+
|
|
476
|
+
return inputEdges.reduce<SearchResult>(
|
|
477
|
+
(acc, inputEdge) => {
|
|
478
|
+
const input = inputs.find((i) => i.id === inputEdge.input);
|
|
479
|
+
const fromNode = inputEdge
|
|
480
|
+
? ensure(graph.nodes.find(({ id }) => id === inputEdge.from))
|
|
481
|
+
: undefined;
|
|
482
|
+
|
|
483
|
+
const inputAcc = {
|
|
484
|
+
...acc.inputs,
|
|
485
|
+
...(input &&
|
|
486
|
+
predicates.input &&
|
|
487
|
+
predicates.input(input, node, inputEdge, fromNode)
|
|
488
|
+
? { [node.id]: [...(acc.inputs[node.id] || []), input] }
|
|
489
|
+
: {}),
|
|
490
|
+
};
|
|
491
|
+
|
|
492
|
+
if (inputEdge && fromNode && depth > 1) {
|
|
493
|
+
const result = filterGraphFromNode(
|
|
494
|
+
graph,
|
|
495
|
+
fromNode,
|
|
496
|
+
predicates,
|
|
497
|
+
depth - 1
|
|
498
|
+
);
|
|
499
|
+
return {
|
|
500
|
+
nodes: { ...acc.nodes, ...result.nodes },
|
|
501
|
+
inputs: { ...acc.inputs, ...inputAcc, ...result.inputs },
|
|
502
|
+
};
|
|
503
|
+
}
|
|
504
|
+
return {
|
|
505
|
+
...acc,
|
|
506
|
+
inputs: {
|
|
507
|
+
...acc.inputs,
|
|
508
|
+
...inputAcc,
|
|
509
|
+
},
|
|
510
|
+
};
|
|
511
|
+
},
|
|
512
|
+
{ inputs: {}, nodes: nodeAcc }
|
|
513
|
+
);
|
|
514
|
+
};
|
|
515
|
+
|
|
516
|
+
export const collectConnectedNodes = (graph: Graph, node: GraphNode): NodeIds =>
|
|
517
|
+
filterGraphFromNode(graph, node, { node: () => true }).nodes;
|
|
518
|
+
|
|
519
|
+
export const filterGraphNodes = (
|
|
520
|
+
graph: Graph,
|
|
521
|
+
nodes: GraphNode[],
|
|
522
|
+
filter: Predicates,
|
|
523
|
+
depth = Infinity
|
|
524
|
+
) =>
|
|
525
|
+
nodes.reduce<SearchResult>(
|
|
526
|
+
(acc, node) => {
|
|
527
|
+
const result = filterGraphFromNode(graph, node, filter, depth);
|
|
528
|
+
return {
|
|
529
|
+
nodes: { ...acc.nodes, ...result.nodes },
|
|
530
|
+
inputs: { ...acc.inputs, ...result.inputs },
|
|
531
|
+
};
|
|
532
|
+
},
|
|
533
|
+
{
|
|
534
|
+
nodes: {},
|
|
535
|
+
inputs: {},
|
|
536
|
+
}
|
|
537
|
+
);
|
|
538
|
+
|
|
539
|
+
type NodeIds = Record<string, GraphNode>;
|
|
540
|
+
export type CompileNodeResult = [ShaderSections, AstNode | void, NodeIds];
|
|
541
|
+
|
|
542
|
+
// before data inputs were known by the input.category being node or data. I
|
|
543
|
+
// tried updating inputs to have acepts: [code|data] and "baked" now is there a
|
|
544
|
+
// way to know if we're plugging in code or data?
|
|
545
|
+
export const isDataInput = (input: NodeInput) =>
|
|
546
|
+
(input.type === 'uniform' || input.type === 'property') && !input.baked;
|
|
547
|
+
|
|
548
|
+
export const compileNode = (
|
|
549
|
+
engine: Engine,
|
|
550
|
+
graph: Graph,
|
|
551
|
+
edges: Edge[],
|
|
552
|
+
engineContext: EngineContext,
|
|
553
|
+
node: GraphNode,
|
|
554
|
+
activeIds: NodeIds = {}
|
|
555
|
+
): CompileNodeResult => {
|
|
556
|
+
// THIS DUPLICATES OTHER LINE
|
|
557
|
+
const parser = {
|
|
558
|
+
...(coreParsers[node.type] || coreParsers[NodeType.SOURCE]),
|
|
559
|
+
...(engine.parsers[node.type] || {}),
|
|
560
|
+
};
|
|
561
|
+
|
|
562
|
+
const { inputs } = node;
|
|
563
|
+
|
|
564
|
+
if (!parser) {
|
|
565
|
+
console.error(node);
|
|
566
|
+
throw new Error(
|
|
567
|
+
`No parser found for ${node.name} (${node.type}, id ${node.id})`
|
|
568
|
+
);
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
const nodeContext = isDataNode(node)
|
|
572
|
+
? null
|
|
573
|
+
: ensure(
|
|
574
|
+
engineContext.nodes[node.id],
|
|
575
|
+
`No node context found for "${node.name}" (id ${node.id})!`
|
|
576
|
+
);
|
|
577
|
+
const { ast, inputFillers } = (nodeContext || {}) as NodeContext;
|
|
578
|
+
if (!inputs) {
|
|
579
|
+
throw new Error("I'm drunk and I think this case should be impossible");
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
let compiledIds = activeIds;
|
|
583
|
+
|
|
584
|
+
const inputEdges = edges.filter((edge) => edge.to === node.id);
|
|
585
|
+
if (inputEdges.length) {
|
|
586
|
+
let continuation = emptyShaderSections();
|
|
587
|
+
inputEdges
|
|
588
|
+
.map((edge) => ({
|
|
589
|
+
edge,
|
|
590
|
+
fromNode: ensure(
|
|
591
|
+
graph.nodes.find((node) => edge.from === node.id),
|
|
592
|
+
`GraphNode for edge ${edge.from} not found`
|
|
593
|
+
),
|
|
594
|
+
input: ensure(
|
|
595
|
+
inputs.find(({ id }) => id == edge.input),
|
|
596
|
+
`GraphNode "${node.name}" has no input ${
|
|
597
|
+
edge.input
|
|
598
|
+
}!\nAvailable:${inputs.map(({ id }) => id).join(', ')}`
|
|
599
|
+
),
|
|
600
|
+
}))
|
|
601
|
+
.filter(({ input }) => !isDataInput(input))
|
|
602
|
+
.forEach(({ fromNode, edge, input }) => {
|
|
603
|
+
const [inputSections, fillerAst, childIds] = compileNode(
|
|
604
|
+
engine,
|
|
605
|
+
graph,
|
|
606
|
+
edges,
|
|
607
|
+
engineContext,
|
|
608
|
+
fromNode,
|
|
609
|
+
activeIds
|
|
610
|
+
);
|
|
611
|
+
if (!fillerAst) {
|
|
612
|
+
throw new TypeError(
|
|
613
|
+
`Expected a filler ast from node ID ${fromNode.id} (${fromNode.type}) but none was returned`
|
|
614
|
+
);
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
continuation = mergeShaderSections(continuation, inputSections);
|
|
618
|
+
compiledIds = { ...compiledIds, ...childIds };
|
|
619
|
+
|
|
620
|
+
let filler: InputFillerGroup;
|
|
621
|
+
let fillerName: string | undefined;
|
|
622
|
+
if (nodeContext) {
|
|
623
|
+
if (input.property) {
|
|
624
|
+
fillerName = ensure(
|
|
625
|
+
((node as CodeNode).config.properties || []).find(
|
|
626
|
+
(p) => p.property === input.property
|
|
627
|
+
)?.fillerName,
|
|
628
|
+
`Node "${node.name}" has no property named "${input.property}" to find the filler for`
|
|
629
|
+
);
|
|
630
|
+
filler = inputFillers[fillerName];
|
|
631
|
+
} else {
|
|
632
|
+
filler = inputFillers[input.id];
|
|
633
|
+
}
|
|
634
|
+
if (!filler) {
|
|
635
|
+
console.error('No filler for property', {
|
|
636
|
+
input,
|
|
637
|
+
node,
|
|
638
|
+
inputFillers,
|
|
639
|
+
fillerName,
|
|
640
|
+
});
|
|
641
|
+
throw new Error(
|
|
642
|
+
`Node "${node.name}" has no filler for input "${input.displayName}" named ${fillerName}`
|
|
643
|
+
);
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
/**
|
|
647
|
+
* +------+ +------+
|
|
648
|
+
* a -- o add o -- o tex |
|
|
649
|
+
* b -- o | +------+
|
|
650
|
+
* +------+
|
|
651
|
+
*
|
|
652
|
+
* This could produce:
|
|
653
|
+
* main_a(v1) + main_b(v2)
|
|
654
|
+
* I guess it has to? or it could produce
|
|
655
|
+
* function add(v1) { return main_a(v1) + main_b(v2); }
|
|
656
|
+
* It can't replace the arg _expression_ in the from shaders, because
|
|
657
|
+
* the expression isn't available there.
|
|
658
|
+
*/
|
|
659
|
+
// TODO: This is a hard coded hack for vUv backfilling. It works in
|
|
660
|
+
// the simple case. Doesn't work for hell (based on world position).
|
|
661
|
+
if (filler.args && fillerAst.type === 'function_call') {
|
|
662
|
+
// Object.values(filterGraphFromNode(graph, node, {
|
|
663
|
+
// node: (n) => n.type === 'source'
|
|
664
|
+
// }).nodes).forEach(sourceNode => {
|
|
665
|
+
if (fromNode.type === 'source') {
|
|
666
|
+
// @ts-ignore
|
|
667
|
+
fillerAst.args = filler.args;
|
|
668
|
+
// const fc = engineContext.nodes[sourceNode.id];
|
|
669
|
+
const fc = engineContext.nodes[fromNode.id];
|
|
670
|
+
// @ts-ignore
|
|
671
|
+
fc.ast.scopes[0].functions.main.references[0].prototype.parameters =
|
|
672
|
+
['vec2 vv'];
|
|
673
|
+
// @ts-ignore
|
|
674
|
+
const scope = fc.ast.scopes[0];
|
|
675
|
+
renameBindings(scope, (name, node) => {
|
|
676
|
+
console.log('renaming binding', name);
|
|
677
|
+
return node.type !== 'declaration' && name === 'vUv'
|
|
678
|
+
? 'vv'
|
|
679
|
+
: name;
|
|
680
|
+
});
|
|
681
|
+
}
|
|
682
|
+
// })
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
// Fill in the input! The return value is the new AST of the filled in
|
|
686
|
+
// fromNode.
|
|
687
|
+
nodeContext.ast = filler.filler(fillerAst);
|
|
688
|
+
}
|
|
689
|
+
// console.log(generate(ast.program));
|
|
690
|
+
});
|
|
691
|
+
|
|
692
|
+
// Order matters here! *Prepend* the input nodes to this one, because
|
|
693
|
+
// you have to declare functions in order of use in GLSL
|
|
694
|
+
const sections = mergeShaderSections(
|
|
695
|
+
continuation,
|
|
696
|
+
isDataNode(node) || (node as SourceNode).expressionOnly
|
|
697
|
+
? emptyShaderSections()
|
|
698
|
+
: findShaderSections(ast as Program)
|
|
699
|
+
);
|
|
700
|
+
|
|
701
|
+
const filler = isDataNode(node)
|
|
702
|
+
? makeExpression(toGlsl(node))
|
|
703
|
+
: parser.produceFiller(node, ast);
|
|
704
|
+
|
|
705
|
+
return [sections, filler, { ...compiledIds, [node.id]: node }];
|
|
706
|
+
} else {
|
|
707
|
+
// TODO: This duplicates the above branch, and also does this mean we
|
|
708
|
+
// recalculate the shader sections and filler for every edge? Can I move
|
|
709
|
+
// these lines above the loop?
|
|
710
|
+
const sections =
|
|
711
|
+
isDataNode(node) || (node as SourceNode).expressionOnly
|
|
712
|
+
? emptyShaderSections()
|
|
713
|
+
: findShaderSections(ast as Program);
|
|
714
|
+
|
|
715
|
+
const filler = isDataNode(node)
|
|
716
|
+
? makeExpression(toGlsl(node))
|
|
717
|
+
: parser.produceFiller(node, ast);
|
|
718
|
+
|
|
719
|
+
return [sections, filler, { ...compiledIds, [node.id]: node }];
|
|
720
|
+
}
|
|
721
|
+
};
|
|
722
|
+
|
|
723
|
+
// Merge existing node inputs, and inputs based on properties, with new ones
|
|
724
|
+
// found from the source code, using the *id* as the uniqueness key. Any filler input gets
|
|
725
|
+
// merged into property inputs with the same id. This preserves the
|
|
726
|
+
// "baked" property on node inputs which is toggle-able in the graph
|
|
727
|
+
const collapseNodeInputs = (
|
|
728
|
+
node: CodeNode,
|
|
729
|
+
updatedInputs: NodeInput[]
|
|
730
|
+
): NodeInput[] =>
|
|
731
|
+
Object.values(groupBy([...updatedInputs, ...node.inputs], (i) => i.id)).map(
|
|
732
|
+
(dupes) => dupes.reduce((node, dupe) => ({ ...node, ...dupe }))
|
|
733
|
+
);
|
|
734
|
+
|
|
735
|
+
type NodeErrors = { type: 'errors'; errors: any[] };
|
|
736
|
+
const makeError = (...errors: any[]): NodeErrors => ({
|
|
737
|
+
type: 'errors',
|
|
738
|
+
errors,
|
|
739
|
+
});
|
|
740
|
+
const isError = (test: any): test is NodeErrors => test?.type === 'errors';
|
|
741
|
+
|
|
742
|
+
const computeNodeContext = async (
|
|
743
|
+
engineContext: EngineContext,
|
|
744
|
+
engine: Engine,
|
|
745
|
+
graph: Graph,
|
|
746
|
+
node: SourceNode
|
|
747
|
+
): Promise<NodeContext | NodeErrors> => {
|
|
748
|
+
// THIS DUPLICATES OTHER LINE
|
|
749
|
+
const parser = {
|
|
750
|
+
...(coreParsers[node.type] || coreParsers[NodeType.SOURCE]),
|
|
751
|
+
...(engine.parsers[node.type] || {}),
|
|
752
|
+
};
|
|
753
|
+
|
|
754
|
+
const { onBeforeCompile, manipulateAst } = parser;
|
|
755
|
+
if (onBeforeCompile) {
|
|
756
|
+
const { groupId } = node as SourceNode;
|
|
757
|
+
const sibling = graph.nodes.find(
|
|
758
|
+
(n) =>
|
|
759
|
+
n !== node && 'groupId' in n && (n as SourceNode).groupId === groupId
|
|
760
|
+
);
|
|
761
|
+
await onBeforeCompile(
|
|
762
|
+
graph,
|
|
763
|
+
engineContext,
|
|
764
|
+
node as SourceNode,
|
|
765
|
+
sibling as SourceNode
|
|
766
|
+
);
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
const inputEdges = graph.edges.filter((edge) => edge.to === node.id);
|
|
770
|
+
|
|
771
|
+
let ast;
|
|
772
|
+
try {
|
|
773
|
+
ast = parser.produceAst(engineContext, engine, graph, node, inputEdges);
|
|
774
|
+
if (manipulateAst) {
|
|
775
|
+
ast = manipulateAst(engineContext, engine, graph, node, ast, inputEdges);
|
|
776
|
+
}
|
|
777
|
+
} catch (error) {
|
|
778
|
+
console.error('Error parsing source code!', error);
|
|
779
|
+
return makeError(error);
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
// Find all the inputs of this node where a "source" code node flows into it,
|
|
783
|
+
// to auto-bake it. This handles the case where a graph is instantiated with
|
|
784
|
+
// a shader plugged into a texture property. The property on the intial node
|
|
785
|
+
// doesn't know if it's baked or not
|
|
786
|
+
const dataInputs = groupBy(
|
|
787
|
+
filterGraphFromNode(
|
|
788
|
+
graph,
|
|
789
|
+
node,
|
|
790
|
+
{
|
|
791
|
+
input: (input, b, c, fromNode) =>
|
|
792
|
+
input.bakeable && fromNode?.type === 'source',
|
|
793
|
+
},
|
|
794
|
+
1
|
|
795
|
+
).inputs[node.id] || [],
|
|
796
|
+
'id'
|
|
797
|
+
);
|
|
798
|
+
|
|
799
|
+
// Find the combination if inputs (data) and fillers (runtime context data)
|
|
800
|
+
// and copy the input data onto the node, and the fillers onto the context
|
|
801
|
+
const computedInputs = parser.findInputs(
|
|
802
|
+
engineContext,
|
|
803
|
+
node,
|
|
804
|
+
ast,
|
|
805
|
+
inputEdges
|
|
806
|
+
);
|
|
807
|
+
|
|
808
|
+
node.inputs = collapseNodeInputs(
|
|
809
|
+
node,
|
|
810
|
+
computedInputs.map(([i]) => ({
|
|
811
|
+
...i,
|
|
812
|
+
displayName: mapInputName(node, i),
|
|
813
|
+
}))
|
|
814
|
+
).map((input) => ({
|
|
815
|
+
// Auto-bake
|
|
816
|
+
...input,
|
|
817
|
+
...(input.id in dataInputs ? { baked: true } : {}),
|
|
818
|
+
}));
|
|
819
|
+
|
|
820
|
+
const nodeContext: NodeContext = {
|
|
821
|
+
ast,
|
|
822
|
+
id: node.id,
|
|
823
|
+
inputFillers: computedInputs.reduce<InputFillers>(
|
|
824
|
+
(acc, [input, filler, args]) => ({
|
|
825
|
+
...acc,
|
|
826
|
+
[input.id]: {
|
|
827
|
+
filler,
|
|
828
|
+
args,
|
|
829
|
+
},
|
|
830
|
+
}),
|
|
831
|
+
{}
|
|
832
|
+
),
|
|
833
|
+
};
|
|
834
|
+
|
|
835
|
+
// Skip mangling if the node tells us to, which probably means it's an engine
|
|
836
|
+
// ndoe where we don't care about renaming all the variables, or if it's
|
|
837
|
+
// an expression, where we want to be in the context of other variables
|
|
838
|
+
if (node.config.mangle !== false && !node.expressionOnly) {
|
|
839
|
+
mangleEntireProgram(ast as Program, node, engine);
|
|
840
|
+
}
|
|
841
|
+
|
|
842
|
+
return nodeContext;
|
|
843
|
+
};
|
|
844
|
+
|
|
845
|
+
export const computeContextForNodes = async (
|
|
846
|
+
engineContext: EngineContext,
|
|
847
|
+
engine: Engine,
|
|
848
|
+
graph: Graph,
|
|
849
|
+
nodes: GraphNode[]
|
|
850
|
+
) =>
|
|
851
|
+
nodes.filter(isSourceNode).reduce(async (ctx, node) => {
|
|
852
|
+
const context = await ctx;
|
|
853
|
+
|
|
854
|
+
let result = await computeNodeContext(engineContext, engine, graph, node);
|
|
855
|
+
let nodeContext = isError(result)
|
|
856
|
+
? {
|
|
857
|
+
errors: result,
|
|
858
|
+
}
|
|
859
|
+
: result;
|
|
860
|
+
|
|
861
|
+
context[node.id] = {
|
|
862
|
+
...(context[node.id] || {}),
|
|
863
|
+
...nodeContext,
|
|
864
|
+
};
|
|
865
|
+
return context;
|
|
866
|
+
}, Promise.resolve(engineContext.nodes));
|
|
867
|
+
|
|
868
|
+
export type CompileGraphResult = {
|
|
869
|
+
fragment: ShaderSections;
|
|
870
|
+
vertex: ShaderSections;
|
|
871
|
+
outputFrag: GraphNode;
|
|
872
|
+
outputVert: GraphNode;
|
|
873
|
+
orphanNodes: GraphNode[];
|
|
874
|
+
activeNodeIds: Set<string>;
|
|
875
|
+
};
|
|
876
|
+
|
|
877
|
+
/**
|
|
878
|
+
* Compute the context for every node in the graph, done on initial graph load
|
|
879
|
+
* to compute the inputs/outputs for every node
|
|
880
|
+
*/
|
|
881
|
+
export const computeAllContexts = (
|
|
882
|
+
engineContext: EngineContext,
|
|
883
|
+
engine: Engine,
|
|
884
|
+
graph: Graph
|
|
885
|
+
) => computeContextForNodes(engineContext, engine, graph, graph.nodes);
|
|
886
|
+
|
|
887
|
+
/**
|
|
888
|
+
* Compute the contexts for nodes starting from the outputs, working backwards.
|
|
889
|
+
* Used to only (re)-compute context for any actively used nodes
|
|
890
|
+
*/
|
|
891
|
+
export const computeGraphContext = async (
|
|
892
|
+
engineContext: EngineContext,
|
|
893
|
+
engine: Engine,
|
|
894
|
+
graph: Graph
|
|
895
|
+
) => {
|
|
896
|
+
const outputFrag = graph.nodes.find(
|
|
897
|
+
(node) => node.type === 'output' && node.stage === 'fragment'
|
|
898
|
+
);
|
|
899
|
+
if (!outputFrag) {
|
|
900
|
+
throw new Error('No fragment output in graph');
|
|
901
|
+
}
|
|
902
|
+
const outputVert = graph.nodes.find(
|
|
903
|
+
(node) => node.type === 'output' && node.stage === 'vertex'
|
|
904
|
+
);
|
|
905
|
+
if (!outputVert) {
|
|
906
|
+
throw new Error('No vertex output in graph');
|
|
907
|
+
}
|
|
908
|
+
|
|
909
|
+
const vertexIds = collectConnectedNodes(graph, outputVert);
|
|
910
|
+
const fragmentIds = collectConnectedNodes(graph, outputFrag);
|
|
911
|
+
const additionalIds = graph.nodes.filter(
|
|
912
|
+
(node) =>
|
|
913
|
+
isSourceNode(node) &&
|
|
914
|
+
node.stage === 'vertex' &&
|
|
915
|
+
node.nextStageNodeId &&
|
|
916
|
+
fragmentIds[node.nextStageNodeId] &&
|
|
917
|
+
!vertexIds[node.id]
|
|
918
|
+
);
|
|
919
|
+
|
|
920
|
+
await computeContextForNodes(engineContext, engine, graph, [
|
|
921
|
+
outputVert,
|
|
922
|
+
...Object.values(vertexIds).filter((node) => node.id !== outputVert.id),
|
|
923
|
+
...additionalIds,
|
|
924
|
+
]);
|
|
925
|
+
await computeContextForNodes(engineContext, engine, graph, [
|
|
926
|
+
outputFrag,
|
|
927
|
+
...Object.values(fragmentIds).filter((node) => node.id !== outputFrag.id),
|
|
928
|
+
]);
|
|
929
|
+
};
|
|
930
|
+
|
|
931
|
+
export const compileGraph = (
|
|
932
|
+
engineContext: EngineContext,
|
|
933
|
+
engine: Engine,
|
|
934
|
+
graph: Graph
|
|
935
|
+
): CompileGraphResult => {
|
|
936
|
+
// computeGraphContext(engineContext, engine, graph);
|
|
937
|
+
|
|
938
|
+
const outputFrag = graph.nodes.find(
|
|
939
|
+
(node) => node.type === 'output' && node.stage === 'fragment'
|
|
940
|
+
);
|
|
941
|
+
if (!outputFrag) {
|
|
942
|
+
throw new Error('No fragment output in graph');
|
|
943
|
+
}
|
|
944
|
+
|
|
945
|
+
const [fragment, , fragmentIds] = compileNode(
|
|
946
|
+
engine,
|
|
947
|
+
graph,
|
|
948
|
+
graph.edges,
|
|
949
|
+
engineContext,
|
|
950
|
+
outputFrag
|
|
951
|
+
);
|
|
952
|
+
|
|
953
|
+
const outputVert = graph.nodes.find(
|
|
954
|
+
(node) => node.type === 'output' && node.stage === 'vertex'
|
|
955
|
+
);
|
|
956
|
+
if (!outputVert) {
|
|
957
|
+
throw new Error('No vertex output in graph');
|
|
958
|
+
}
|
|
959
|
+
|
|
960
|
+
const vertexIds = collectConnectedNodes(graph, outputVert);
|
|
961
|
+
|
|
962
|
+
// Some fragment shaders reference vertex shaders which may not have been
|
|
963
|
+
// given edges in the graph. Build invisible edges from these vertex nodes to
|
|
964
|
+
// the hidden "mainStmts" input on the output node, which inlines the function
|
|
965
|
+
// calls to those vertex main() statements and includes them in the output
|
|
966
|
+
const orphanNodes = graph.nodes.filter(
|
|
967
|
+
(node) =>
|
|
968
|
+
isSourceNode(node) &&
|
|
969
|
+
node.stage === 'vertex' &&
|
|
970
|
+
node.nextStageNodeId &&
|
|
971
|
+
fragmentIds[node.nextStageNodeId] &&
|
|
972
|
+
!vertexIds[node.id]
|
|
973
|
+
);
|
|
974
|
+
|
|
975
|
+
const orphanEdges: Edge[] = orphanNodes.map((node) => ({
|
|
976
|
+
id: makeId(),
|
|
977
|
+
from: node.id,
|
|
978
|
+
to: outputVert.id,
|
|
979
|
+
output: 'main',
|
|
980
|
+
input: `filler_${MAGIC_OUTPUT_STMTS}`,
|
|
981
|
+
stage: 'vertex',
|
|
982
|
+
category: 'code',
|
|
983
|
+
}));
|
|
984
|
+
|
|
985
|
+
const [vertex, ,] = compileNode(
|
|
986
|
+
engine,
|
|
987
|
+
graph,
|
|
988
|
+
[...graph.edges, ...orphanEdges],
|
|
989
|
+
engineContext,
|
|
990
|
+
outputVert
|
|
991
|
+
);
|
|
992
|
+
|
|
993
|
+
// Every compileNode returns the AST so far, as well as the filler for the
|
|
994
|
+
// next node with inputs. On the final step, we discard the filler
|
|
995
|
+
return {
|
|
996
|
+
fragment,
|
|
997
|
+
vertex,
|
|
998
|
+
outputFrag,
|
|
999
|
+
outputVert,
|
|
1000
|
+
orphanNodes,
|
|
1001
|
+
activeNodeIds: new Set<string>([
|
|
1002
|
+
...Object.keys(vertexIds),
|
|
1003
|
+
...Object.keys(fragmentIds),
|
|
1004
|
+
...orphanNodes.map((node) => node.id),
|
|
1005
|
+
]),
|
|
1006
|
+
};
|
|
1007
|
+
};
|