vedana-backoffice 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vedana_backoffice/Caddyfile +17 -0
- vedana_backoffice/__init__.py +0 -0
- vedana_backoffice/components/__init__.py +0 -0
- vedana_backoffice/components/etl_graph.py +132 -0
- vedana_backoffice/components/ui_chat.py +236 -0
- vedana_backoffice/graph/__init__.py +0 -0
- vedana_backoffice/graph/build.py +169 -0
- vedana_backoffice/pages/__init__.py +0 -0
- vedana_backoffice/pages/chat.py +204 -0
- vedana_backoffice/pages/etl.py +353 -0
- vedana_backoffice/pages/eval.py +1006 -0
- vedana_backoffice/pages/jims_thread_list_page.py +894 -0
- vedana_backoffice/pages/main_dashboard.py +483 -0
- vedana_backoffice/py.typed +0 -0
- vedana_backoffice/start_services.py +39 -0
- vedana_backoffice/state.py +0 -0
- vedana_backoffice/states/__init__.py +0 -0
- vedana_backoffice/states/chat.py +368 -0
- vedana_backoffice/states/common.py +66 -0
- vedana_backoffice/states/etl.py +1590 -0
- vedana_backoffice/states/eval.py +1940 -0
- vedana_backoffice/states/jims.py +508 -0
- vedana_backoffice/states/main_dashboard.py +757 -0
- vedana_backoffice/ui.py +115 -0
- vedana_backoffice/util.py +71 -0
- vedana_backoffice/vedana_backoffice.py +23 -0
- vedana_backoffice-0.1.0.dist-info/METADATA +10 -0
- vedana_backoffice-0.1.0.dist-info/RECORD +30 -0
- vedana_backoffice-0.1.0.dist-info/WHEEL +4 -0
- vedana_backoffice-0.1.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
:9000
|
|
2
|
+
|
|
3
|
+
encode gzip
|
|
4
|
+
|
|
5
|
+
@backend_routes path /_event/* /ping /_upload /_upload/*
|
|
6
|
+
handle @backend_routes {
|
|
7
|
+
reverse_proxy localhost:8000 {
|
|
8
|
+
header_up Upgrade {http.request.header.upgrade}
|
|
9
|
+
header_up Connection {http.request.header.connection}
|
|
10
|
+
}
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
root * /srv
|
|
14
|
+
route {
|
|
15
|
+
try_files {path} {path}/ /404.html
|
|
16
|
+
file_server
|
|
17
|
+
}
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import reflex as rx
|
|
2
|
+
|
|
3
|
+
from vedana_backoffice.states.etl import EtlState
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _node_card(node: dict) -> rx.Component:
|
|
7
|
+
is_table = node.get("node_type") == "table" # step for transform, table for table
|
|
8
|
+
return rx.card(
|
|
9
|
+
rx.vstack(
|
|
10
|
+
rx.hstack(
|
|
11
|
+
rx.hstack(
|
|
12
|
+
rx.cond(
|
|
13
|
+
is_table,
|
|
14
|
+
rx.box(),
|
|
15
|
+
rx.badge(node.get("step_type", ""), color_scheme="indigo", variant="soft"),
|
|
16
|
+
),
|
|
17
|
+
spacing="2",
|
|
18
|
+
),
|
|
19
|
+
rx.text(node.get("name", ""), weight="medium"),
|
|
20
|
+
justify="between",
|
|
21
|
+
width="100%",
|
|
22
|
+
),
|
|
23
|
+
rx.text(node.get("labels_str", ""), size="1", color="gray"),
|
|
24
|
+
rx.cond(
|
|
25
|
+
is_table,
|
|
26
|
+
rx.hstack(
|
|
27
|
+
rx.tooltip(rx.text(node.get("last_run", "—"), size="1", color="gray"), content="last update time"),
|
|
28
|
+
rx.hstack(
|
|
29
|
+
rx.tooltip(
|
|
30
|
+
rx.text(node.get("row_count", "—"), size="1", color="gray", weight="bold"),
|
|
31
|
+
content="rows total",
|
|
32
|
+
),
|
|
33
|
+
rx.tooltip(
|
|
34
|
+
rx.text(node.get("last_add", "—"), size="1", color="green"), content="added during last run"
|
|
35
|
+
),
|
|
36
|
+
rx.text("/", size="1", color="gray"),
|
|
37
|
+
rx.tooltip(
|
|
38
|
+
rx.text(node.get("last_upd", "—"), size="1", color="gray"),
|
|
39
|
+
content="updated during last run",
|
|
40
|
+
),
|
|
41
|
+
rx.text("/", size="1", color="gray"),
|
|
42
|
+
rx.tooltip(
|
|
43
|
+
rx.text(node.get("last_rm", "—"), size="1", color="red"), content="deleted during last run"
|
|
44
|
+
),
|
|
45
|
+
spacing="1",
|
|
46
|
+
),
|
|
47
|
+
width="100%",
|
|
48
|
+
justify="between",
|
|
49
|
+
),
|
|
50
|
+
# Step view: show last run time and rows processed
|
|
51
|
+
rx.cond(
|
|
52
|
+
node.get("step_type") != "BatchGenerate", # BatchGenerate has no meta table
|
|
53
|
+
rx.hstack(
|
|
54
|
+
rx.tooltip(
|
|
55
|
+
rx.text(node.get("last_run", "—"), size="1", color="gray"),
|
|
56
|
+
content="last run time (that produced changes)",
|
|
57
|
+
),
|
|
58
|
+
rx.hstack(
|
|
59
|
+
rx.tooltip(
|
|
60
|
+
rx.text(node.get("rows_processed", 0), size="1", color="gray"),
|
|
61
|
+
content="rows processed in last run",
|
|
62
|
+
),
|
|
63
|
+
rx.text("/", size="1", color="gray"),
|
|
64
|
+
rx.tooltip(
|
|
65
|
+
rx.text(node.get("total_success", 0), size="1", color="gray", weight="bold"),
|
|
66
|
+
content="rows processed total",
|
|
67
|
+
),
|
|
68
|
+
rx.cond(
|
|
69
|
+
node.get("has_total_failed", False),
|
|
70
|
+
rx.tooltip(
|
|
71
|
+
rx.text(node.get("total_failed_str", ""), size="1", color="red"),
|
|
72
|
+
content="total failed rows (all time)",
|
|
73
|
+
),
|
|
74
|
+
rx.box(),
|
|
75
|
+
),
|
|
76
|
+
spacing="1",
|
|
77
|
+
),
|
|
78
|
+
width="100%",
|
|
79
|
+
justify="between",
|
|
80
|
+
),
|
|
81
|
+
rx.box(),
|
|
82
|
+
),
|
|
83
|
+
),
|
|
84
|
+
spacing="2",
|
|
85
|
+
width="100%",
|
|
86
|
+
),
|
|
87
|
+
padding="0.75em",
|
|
88
|
+
style={
|
|
89
|
+
"position": "absolute",
|
|
90
|
+
"left": node.get("left", "0px"),
|
|
91
|
+
"top": node.get("top", "0px"),
|
|
92
|
+
"width": node.get("width", "420px"),
|
|
93
|
+
"height": "auto",
|
|
94
|
+
"border": node.get("border_css", "1px solid #e5e7eb"),
|
|
95
|
+
"overflow": "visible",
|
|
96
|
+
"boxSizing": "border-box",
|
|
97
|
+
},
|
|
98
|
+
variant="surface",
|
|
99
|
+
on_click=rx.cond(
|
|
100
|
+
is_table,
|
|
101
|
+
# no direct return values here and that's ok, handled in state
|
|
102
|
+
EtlState.preview_table(table_name=node.get("name", "")), # type: ignore
|
|
103
|
+
EtlState.toggle_node_selection(index=node.get("index_value")), # type: ignore
|
|
104
|
+
),
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def etl_graph() -> rx.Component:
|
|
109
|
+
svg = rx.box(
|
|
110
|
+
rx.html(EtlState.graph_svg),
|
|
111
|
+
style={
|
|
112
|
+
"position": "absolute",
|
|
113
|
+
"left": 0,
|
|
114
|
+
"top": 0,
|
|
115
|
+
"pointerEvents": "none",
|
|
116
|
+
"width": "100%",
|
|
117
|
+
"height": "100%",
|
|
118
|
+
},
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
nodes_layer = rx.box(
|
|
122
|
+
rx.foreach(EtlState.graph_nodes, _node_card),
|
|
123
|
+
style={
|
|
124
|
+
"position": "absolute",
|
|
125
|
+
"left": 0,
|
|
126
|
+
"top": 0,
|
|
127
|
+
"width": "100%",
|
|
128
|
+
"height": "100%",
|
|
129
|
+
},
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
return rx.box(svg, nodes_layer, style={"position": "relative", "width": "100%", "height": "100%"})
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
import reflex as rx
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def render_message_bubble(
|
|
5
|
+
msg: dict,
|
|
6
|
+
on_toggle_details,
|
|
7
|
+
extras: rx.Component | None = None,
|
|
8
|
+
corner_tags_component: rx.Component | None = None,
|
|
9
|
+
) -> rx.Component: # type: ignore[valid-type]
|
|
10
|
+
"""Render a chat-style message bubble.
|
|
11
|
+
|
|
12
|
+
Expects msg dict with keys:
|
|
13
|
+
- content, is_assistant (bool-like), created_at_fmt or created_at
|
|
14
|
+
- has_tech, has_logs, show_details
|
|
15
|
+
- has_models, has_vts, has_cypher, models_str, vts_str, cypher_str (optional)
|
|
16
|
+
- logs_str (optional)
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
tech_block = rx.cond(
|
|
20
|
+
msg.get("has_tech"),
|
|
21
|
+
rx.card(
|
|
22
|
+
rx.vstack(
|
|
23
|
+
rx.cond(
|
|
24
|
+
msg.get("has_models"),
|
|
25
|
+
rx.vstack(
|
|
26
|
+
rx.text("Models", weight="medium"),
|
|
27
|
+
rx.code_block(
|
|
28
|
+
msg.get("models_str", ""),
|
|
29
|
+
font_size="11px",
|
|
30
|
+
language="json",
|
|
31
|
+
# (bug in reflex?) code_block does not pass some custom styling (wordBreak, whiteSpace)
|
|
32
|
+
# https://github.com/reflex-dev/reflex/issues/6051
|
|
33
|
+
code_tag_props={"style": {"whiteSpace": "pre-wrap"}},
|
|
34
|
+
style={
|
|
35
|
+
"display": "block",
|
|
36
|
+
"maxWidth": "100%",
|
|
37
|
+
"boxSizing": "border-box",
|
|
38
|
+
},
|
|
39
|
+
),
|
|
40
|
+
spacing="1",
|
|
41
|
+
width="100%",
|
|
42
|
+
),
|
|
43
|
+
),
|
|
44
|
+
rx.cond(
|
|
45
|
+
msg.get("has_vts"),
|
|
46
|
+
rx.vstack(
|
|
47
|
+
rx.text("VTS Queries", weight="medium"),
|
|
48
|
+
rx.code_block(
|
|
49
|
+
msg.get("vts_str", ""),
|
|
50
|
+
font_size="11px",
|
|
51
|
+
language="python",
|
|
52
|
+
code_tag_props={"style": {"whiteSpace": "pre-wrap"}},
|
|
53
|
+
style={
|
|
54
|
+
"display": "block",
|
|
55
|
+
"maxWidth": "100%",
|
|
56
|
+
"boxSizing": "border-box",
|
|
57
|
+
},
|
|
58
|
+
),
|
|
59
|
+
spacing="1",
|
|
60
|
+
width="100%",
|
|
61
|
+
),
|
|
62
|
+
),
|
|
63
|
+
rx.cond(
|
|
64
|
+
msg.get("has_cypher"),
|
|
65
|
+
rx.vstack(
|
|
66
|
+
rx.text("Cypher Queries", weight="medium"),
|
|
67
|
+
rx.code_block(
|
|
68
|
+
msg.get("cypher_str", ""),
|
|
69
|
+
font_size="11px",
|
|
70
|
+
language="cypher",
|
|
71
|
+
code_tag_props={"style": {"whiteSpace": "pre-wrap"}},
|
|
72
|
+
style={
|
|
73
|
+
"display": "block",
|
|
74
|
+
"maxWidth": "100%",
|
|
75
|
+
"boxSizing": "border-box",
|
|
76
|
+
},
|
|
77
|
+
),
|
|
78
|
+
spacing="1",
|
|
79
|
+
width="100%",
|
|
80
|
+
),
|
|
81
|
+
),
|
|
82
|
+
spacing="2",
|
|
83
|
+
width="100%",
|
|
84
|
+
),
|
|
85
|
+
padding="0.75em",
|
|
86
|
+
variant="surface",
|
|
87
|
+
),
|
|
88
|
+
rx.box(),
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
generic_details_block = rx.cond(
|
|
92
|
+
msg.get("generic_meta"),
|
|
93
|
+
rx.code_block(
|
|
94
|
+
msg.get("event_data_str", ""),
|
|
95
|
+
font_size="11px",
|
|
96
|
+
language="json",
|
|
97
|
+
code_tag_props={"style": {"whiteSpace": "pre-wrap"}},
|
|
98
|
+
style={
|
|
99
|
+
"display": "block",
|
|
100
|
+
"maxWidth": "100%",
|
|
101
|
+
"boxSizing": "border-box",
|
|
102
|
+
},
|
|
103
|
+
),
|
|
104
|
+
rx.box(),
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
logs_block = rx.cond(
|
|
108
|
+
msg.get("has_logs"),
|
|
109
|
+
rx.card(
|
|
110
|
+
rx.vstack(
|
|
111
|
+
rx.text("Logs", weight="medium"),
|
|
112
|
+
rx.scroll_area(
|
|
113
|
+
rx.code_block(
|
|
114
|
+
msg.get("logs_str", ""),
|
|
115
|
+
font_size="11px",
|
|
116
|
+
language="log",
|
|
117
|
+
wrap_long_lines=True,
|
|
118
|
+
style={
|
|
119
|
+
"display": "block",
|
|
120
|
+
"maxWidth": "100%",
|
|
121
|
+
"boxSizing": "border-box",
|
|
122
|
+
},
|
|
123
|
+
code_tag_props={"style": {"whiteSpace": "pre-wrap"}}, # styling workaround
|
|
124
|
+
),
|
|
125
|
+
type="always",
|
|
126
|
+
scrollbars="vertical",
|
|
127
|
+
style={
|
|
128
|
+
"maxHeight": "25vh",
|
|
129
|
+
"width": "100%",
|
|
130
|
+
},
|
|
131
|
+
),
|
|
132
|
+
spacing="1",
|
|
133
|
+
width="100%",
|
|
134
|
+
),
|
|
135
|
+
padding="0.75em",
|
|
136
|
+
width="100%",
|
|
137
|
+
variant="surface",
|
|
138
|
+
),
|
|
139
|
+
rx.box(),
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
details_block = rx.vstack(
|
|
143
|
+
tech_block,
|
|
144
|
+
generic_details_block,
|
|
145
|
+
logs_block,
|
|
146
|
+
spacing="2",
|
|
147
|
+
width="100%",
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Tag badges for feedback
|
|
151
|
+
tags_box = corner_tags_component or rx.box()
|
|
152
|
+
|
|
153
|
+
header_left = rx.hstack(
|
|
154
|
+
# event type label at the very left
|
|
155
|
+
rx.cond(
|
|
156
|
+
msg.get("tag_label", "") != "",
|
|
157
|
+
rx.badge(msg.get("tag_label", ""), variant="soft", size="1", color_scheme="gray"),
|
|
158
|
+
rx.box(),
|
|
159
|
+
),
|
|
160
|
+
rx.cond(
|
|
161
|
+
msg.get("has_tech") | msg.get("has_logs") | msg.get("generic_meta"), # type: ignore[operator]
|
|
162
|
+
rx.button(
|
|
163
|
+
"Details",
|
|
164
|
+
variant="ghost",
|
|
165
|
+
color_scheme="gray",
|
|
166
|
+
size="1",
|
|
167
|
+
on_click=on_toggle_details, # type: ignore[arg-type]
|
|
168
|
+
),
|
|
169
|
+
),
|
|
170
|
+
rx.text(msg.get("created_at", ""), size="1", color="gray"),
|
|
171
|
+
spacing="2",
|
|
172
|
+
align="center",
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
body = rx.vstack(
|
|
176
|
+
rx.hstack(header_left, tags_box, justify="between", width="100%", align="center"), # header
|
|
177
|
+
rx.text(
|
|
178
|
+
msg.get("content", ""),
|
|
179
|
+
style={
|
|
180
|
+
"whiteSpace": "pre-wrap",
|
|
181
|
+
"wordBreak": "break-word",
|
|
182
|
+
},
|
|
183
|
+
),
|
|
184
|
+
rx.cond(msg.get("show_details"), details_block),
|
|
185
|
+
rx.cond(extras is not None, extras or rx.box()),
|
|
186
|
+
spacing="2",
|
|
187
|
+
width="100%",
|
|
188
|
+
style={
|
|
189
|
+
"maxWidth": "100%",
|
|
190
|
+
},
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
assistant_bubble = rx.card(
|
|
194
|
+
body,
|
|
195
|
+
padding="0.75em",
|
|
196
|
+
style={
|
|
197
|
+
"maxWidth": "70%", # 35 vw is 70% of 50% parent card width in vw terms
|
|
198
|
+
"backgroundColor": "#11182714",
|
|
199
|
+
"border": "1px solid #e5e7eb",
|
|
200
|
+
"borderRadius": "12px",
|
|
201
|
+
"wordBreak": "break-word",
|
|
202
|
+
"overflowX": "hidden",
|
|
203
|
+
},
|
|
204
|
+
)
|
|
205
|
+
user_bubble = rx.card(
|
|
206
|
+
body,
|
|
207
|
+
padding="0.75em",
|
|
208
|
+
style={
|
|
209
|
+
"maxWidth": "70%",
|
|
210
|
+
"backgroundColor": "#3b82f614",
|
|
211
|
+
"border": "1px solid #e5e7eb",
|
|
212
|
+
"borderRadius": "12px",
|
|
213
|
+
"wordBreak": "break-word",
|
|
214
|
+
"overflowX": "hidden",
|
|
215
|
+
},
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
return rx.cond(
|
|
219
|
+
msg.get("is_assistant"),
|
|
220
|
+
rx.hstack(
|
|
221
|
+
rx.avatar(fallback="A", size="2", radius="full"),
|
|
222
|
+
assistant_bubble,
|
|
223
|
+
spacing="2",
|
|
224
|
+
width="100%",
|
|
225
|
+
justify="start",
|
|
226
|
+
align="start",
|
|
227
|
+
),
|
|
228
|
+
rx.hstack(
|
|
229
|
+
user_bubble,
|
|
230
|
+
rx.avatar(fallback="U", size="2", radius="full"),
|
|
231
|
+
spacing="2",
|
|
232
|
+
width="100%",
|
|
233
|
+
justify="end",
|
|
234
|
+
align="start",
|
|
235
|
+
),
|
|
236
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any, Dict, Iterable, List, Set, Tuple
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dataclass(frozen=True)
|
|
6
|
+
class StepMeta:
|
|
7
|
+
index: int
|
|
8
|
+
name: str
|
|
9
|
+
step_type: str
|
|
10
|
+
inputs: List[str]
|
|
11
|
+
outputs: List[str]
|
|
12
|
+
labels: List[Tuple[str, str]]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
NodeDict = Dict[str, Any]
|
|
16
|
+
EdgeDict = Dict[str, Any]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class CanonicalGraph:
|
|
21
|
+
steps: List[StepMeta]
|
|
22
|
+
# Indexes for fast joins
|
|
23
|
+
table_to_producers: Dict[str, Set[int]]
|
|
24
|
+
table_to_consumers: Dict[str, Set[int]]
|
|
25
|
+
step_name_by_index: Dict[int, str]
|
|
26
|
+
step_type_by_index: Dict[int, str]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def build_canonical(steps_meta: Iterable[dict]) -> CanonicalGraph:
|
|
30
|
+
"""Build a pipeline representation from step metadata.
|
|
31
|
+
|
|
32
|
+
- All steps with their metadata (name, type, inputs, outputs, labels)
|
|
33
|
+
- Indexed mappings for fast lookups (table -> producers/consumers)
|
|
34
|
+
- Pre-computed relationships for efficient edge derivation
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
steps_meta: Iterable of step metadata dictionaries from pipeline introspection
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
CanonicalGraph: Normalized pipeline representation with indexed relationships
|
|
41
|
+
"""
|
|
42
|
+
steps: List[StepMeta] = []
|
|
43
|
+
table_to_producers: Dict[str, Set[int]] = {}
|
|
44
|
+
table_to_consumers: Dict[str, Set[int]] = {}
|
|
45
|
+
step_name_by_index: Dict[int, str] = {}
|
|
46
|
+
step_type_by_index: Dict[int, str] = {}
|
|
47
|
+
|
|
48
|
+
for m in steps_meta:
|
|
49
|
+
try:
|
|
50
|
+
idx = int(m.get("index", -1))
|
|
51
|
+
except Exception:
|
|
52
|
+
idx = -1
|
|
53
|
+
if idx < 0:
|
|
54
|
+
continue
|
|
55
|
+
name = str(m.get("name", f"step_{idx}"))
|
|
56
|
+
step_type = str(m.get("step_type", ""))
|
|
57
|
+
inputs = [str(x) for x in (m.get("inputs") or [])]
|
|
58
|
+
outputs = [str(x) for x in (m.get("outputs") or [])]
|
|
59
|
+
labels_raw = m.get("labels") or []
|
|
60
|
+
labels = [(str(a), str(b)) for a, b in labels_raw]
|
|
61
|
+
sm = StepMeta(index=idx, name=name, step_type=step_type, inputs=inputs, outputs=outputs, labels=labels)
|
|
62
|
+
steps.append(sm)
|
|
63
|
+
step_name_by_index[idx] = name
|
|
64
|
+
step_type_by_index[idx] = step_type
|
|
65
|
+
|
|
66
|
+
for t in outputs:
|
|
67
|
+
table_to_producers.setdefault(t, set()).add(idx)
|
|
68
|
+
for t in inputs:
|
|
69
|
+
table_to_consumers.setdefault(t, set()).add(idx)
|
|
70
|
+
|
|
71
|
+
return CanonicalGraph(
|
|
72
|
+
steps=steps,
|
|
73
|
+
table_to_producers=table_to_producers,
|
|
74
|
+
table_to_consumers=table_to_consumers,
|
|
75
|
+
step_name_by_index=step_name_by_index,
|
|
76
|
+
step_type_by_index=step_type_by_index,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def derive_step_edges(cg: CanonicalGraph) -> List[Tuple[int, int, List[str]]]:
|
|
81
|
+
"""Edges between steps, labeled by shared tables.
|
|
82
|
+
|
|
83
|
+
Use table indexes to connect producers -> consumers.
|
|
84
|
+
"""
|
|
85
|
+
edges_map: Dict[Tuple[int, int], Set[str]] = {}
|
|
86
|
+
for table, producers in cg.table_to_producers.items():
|
|
87
|
+
consumers = cg.table_to_consumers.get(table, set())
|
|
88
|
+
if not consumers:
|
|
89
|
+
continue
|
|
90
|
+
for s in producers:
|
|
91
|
+
for t in consumers:
|
|
92
|
+
if s == t:
|
|
93
|
+
continue
|
|
94
|
+
edges_map.setdefault((s, t), set()).add(table)
|
|
95
|
+
edges: List[Tuple[int, int, List[str]]] = []
|
|
96
|
+
for (s, t), tables in edges_map.items():
|
|
97
|
+
edges.append((s, t, sorted(list(tables))))
|
|
98
|
+
return edges
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def derive_table_edges(cg: CanonicalGraph) -> List[Tuple[int, int, str]]:
|
|
102
|
+
"""Edges between tables; labeled by step name.
|
|
103
|
+
|
|
104
|
+
For each step, connect each input table (or -1 for BatchGenerate's) to each output table.
|
|
105
|
+
"""
|
|
106
|
+
table_names = sorted(list(set([t for s in cg.steps for t in s.inputs + s.outputs])))
|
|
107
|
+
name_to_id: Dict[str, int] = {name: i for i, name in enumerate(table_names)}
|
|
108
|
+
|
|
109
|
+
edges: List[Tuple[int, int, str]] = []
|
|
110
|
+
for sm in cg.steps:
|
|
111
|
+
in_ids = [name_to_id[t] for t in sm.inputs if t in name_to_id]
|
|
112
|
+
out_ids = [name_to_id[t] for t in sm.outputs if t in name_to_id]
|
|
113
|
+
if not in_ids:
|
|
114
|
+
for ot in out_ids:
|
|
115
|
+
edges.append((-1, ot, sm.name))
|
|
116
|
+
else:
|
|
117
|
+
for it in in_ids:
|
|
118
|
+
for ot in out_ids:
|
|
119
|
+
edges.append((it, ot, sm.name))
|
|
120
|
+
return edges
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def refine_layer_orders(
|
|
124
|
+
layers_dict: dict[int, list[int]],
|
|
125
|
+
parent_map: dict[int, list[int]],
|
|
126
|
+
child_map: dict[int, list[int]],
|
|
127
|
+
max_layer_idx: int,
|
|
128
|
+
passes: int = 2,
|
|
129
|
+
):
|
|
130
|
+
"""Apply iterative barycentric ordering to reduce edge length."""
|
|
131
|
+
|
|
132
|
+
if not layers_dict:
|
|
133
|
+
return
|
|
134
|
+
|
|
135
|
+
order_map: dict[int, float] = {}
|
|
136
|
+
|
|
137
|
+
def _snapshot_layer(layer_idx: int) -> None:
|
|
138
|
+
nodes_in_layer = layers_dict.get(layer_idx, [])
|
|
139
|
+
for pos, node_id in enumerate(nodes_in_layer):
|
|
140
|
+
order_map[node_id] = float(pos)
|
|
141
|
+
|
|
142
|
+
def _barycenter(nodes: list[int]) -> float | None:
|
|
143
|
+
valid = [order_map[n] for n in nodes if n in order_map]
|
|
144
|
+
if not valid:
|
|
145
|
+
return None
|
|
146
|
+
return sum(valid) / len(valid)
|
|
147
|
+
|
|
148
|
+
def _sort_layer(layer_idx: int, neighbor_map: dict[int, list[int]]) -> None:
|
|
149
|
+
nodes_in_layer = layers_dict.get(layer_idx)
|
|
150
|
+
if not nodes_in_layer:
|
|
151
|
+
return
|
|
152
|
+
|
|
153
|
+
def _key(node_id: int) -> tuple[float, float]:
|
|
154
|
+
bc = _barycenter(neighbor_map.get(node_id, []))
|
|
155
|
+
fallback = order_map.get(node_id, 0.0)
|
|
156
|
+
return bc if bc is not None else fallback, fallback
|
|
157
|
+
|
|
158
|
+
nodes_in_layer.sort(key=_key)
|
|
159
|
+
for pos, node_id in enumerate(nodes_in_layer):
|
|
160
|
+
order_map[node_id] = float(pos)
|
|
161
|
+
|
|
162
|
+
for layer_idx in range(0, max_layer_idx + 1):
|
|
163
|
+
_snapshot_layer(layer_idx)
|
|
164
|
+
|
|
165
|
+
for _ in range(max(1, passes)):
|
|
166
|
+
for layer_idx in range(1, max_layer_idx + 1):
|
|
167
|
+
_sort_layer(layer_idx, parent_map)
|
|
168
|
+
for layer_idx in range(max_layer_idx - 1, -1, -1):
|
|
169
|
+
_sort_layer(layer_idx, child_map)
|
|
File without changes
|