patchvec 0.5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,445 @@
1
+ # (C) 2025 Rodrigo Rodrigues da Silva <rodrigopitanga@posteo.net>
2
+ # SPDX-License-Identifier: GPL-3.0-or-later
3
+
4
+ from __future__ import annotations
5
+ import os, json, operator
6
+ from datetime import datetime
7
+ from typing import Dict, Iterable, List, Any
8
+ from threading import Lock
9
+ from contextlib import contextmanager
10
+ from txtai.embeddings import Embeddings
11
+ from pave.stores.base import BaseStore, Record
12
+ from pave.config import CFG as c, LOG as log
13
+
14
+ _LOCKS : dict[str, Lock] = {}
15
+ def get_lock(key: str) -> Lock:
16
+ if key not in _LOCKS:
17
+ _LOCKS[key] = Lock()
18
+ return _LOCKS[key]
19
+
20
+ @contextmanager
21
+ def collection_lock(tenant: str, collection: str):
22
+ lock = get_lock(f"t_{tenant}:c_{collection}")
23
+ lock.acquire()
24
+ try:
25
+ yield
26
+ finally:
27
+ lock.release()
28
+
29
+ class TxtaiStore(BaseStore):
30
+ def __init__(self):
31
+ self._emb: Dict[tuple[str, str], Embeddings] = {}
32
+
33
+ def _base_path(self, tenant: str, collection: str) -> str:
34
+ return os.path.join(c.get("data_dir"), f"t_{tenant}", f"c_{collection}")
35
+
36
+ def _catalog_path(self, tenant: str, collection: str) -> str:
37
+ return os.path.join(self._base_path(tenant, collection), "catalog.json")
38
+
39
+ def _meta_path(self, tenant: str, collection: str) -> str:
40
+ return os.path.join(self._base_path(tenant, collection), "meta.json")
41
+
42
+ def _load_json(self, path: str):
43
+ if os.path.isfile(path):
44
+ try:
45
+ with open(path, "r", encoding="utf-8") as f:
46
+ return json.load(f) or {}
47
+ except Exception:
48
+ return {}
49
+ return {}
50
+
51
+ def _save_json(self, path: str, data):
52
+ os.makedirs(os.path.dirname(path), exist_ok=True)
53
+ with open(path, "w", encoding="utf-8") as f:
54
+ json.dump(data, f, ensure_ascii=False)
55
+
56
+ def _load_catalog(self, tenant: str, collection: str) -> Dict[str, List[str]]:
57
+ return self._load_json(self._catalog_path(tenant, collection))
58
+
59
+ def _save_catalog(self, tenant: str, collection: str,
60
+ cat: Dict[str, List[str]]) -> None:
61
+ self._save_json(self._catalog_path(tenant, collection), cat)
62
+
63
+ def _load_meta(self, tenant: str, collection: str) -> Dict[str, Dict[str, Any]]:
64
+ return self._load_json(self._meta_path(tenant, collection))
65
+
66
+ def _save_meta(self, tenant: str, collection: str,
67
+ meta: Dict[str, Dict[str, Any]]) -> None:
68
+ self._save_json(self._meta_path(tenant, collection), meta)
69
+
70
+ @staticmethod
71
+ def _config():
72
+ model = c.get(
73
+ "vector_store.txtai.embed_model",
74
+ "sentence-transformers/paraphrase-MiniLM-L3-v2"
75
+ )
76
+ backend = c.get("vector_store.txtai.backend", "faiss")
77
+ return {
78
+ "path": model,
79
+ "backend": backend,
80
+ "content": True,
81
+ "store": True,
82
+ "dynamic": True
83
+ }
84
+
85
+ def load_or_init(self, tenant: str, collection: str) -> None:
86
+ key = (tenant, collection)
87
+ if key in self._emb:
88
+ return
89
+
90
+ base = self._base_path(tenant, collection)
91
+ os.makedirs(base, exist_ok=True)
92
+
93
+ em = Embeddings(self._config())
94
+ idxpath = os.path.join(base, "index")
95
+ # consider (existing) index valid only if embeddings file exists
96
+ embeddings_file = os.path.join(idxpath, "embeddings")
97
+
98
+ if os.path.isfile(embeddings_file):
99
+ try:
100
+ em.load(idxpath)
101
+ except Exception:
102
+ # broken index -> start clean
103
+ em = Embeddings(self._config())
104
+
105
+ self._emb[key] = em
106
+
107
+ def save(self, tenant: str, collection: str) -> None:
108
+ key = (tenant, collection)
109
+ em = self._emb.get(key)
110
+ if not em:
111
+ return
112
+ idxpath = os.path.join(self._base_path(tenant, collection), "index")
113
+ os.makedirs(idxpath, exist_ok=True) # ensure target dir
114
+ em.save(idxpath)
115
+
116
+ def delete_collection(self, tenant: str, collection: str) -> None:
117
+ import shutil
118
+ key = (tenant, collection)
119
+ if key in self._emb:
120
+ del self._emb[key]
121
+ p = self._base_path(tenant, collection)
122
+ if os.path.isdir(p):
123
+ shutil.rmtree(p)
124
+
125
+ def has_doc(self, tenant: str, collection: str, docid: str) -> bool:
126
+ cat = self._load_catalog(tenant, collection)
127
+ ids = cat.get(docid)
128
+ return bool(ids)
129
+
130
+ def purge_doc(self, tenant: str, collection: str, docid: str) -> int:
131
+ cat = self._load_catalog(tenant, collection)
132
+ meta = self._load_meta(tenant, collection)
133
+ ids = cat.get(docid, [])
134
+ if not ids:
135
+ return 0
136
+
137
+ with collection_lock(tenant, collection):
138
+ # remove only this docid's metadata and sidecars
139
+ for urid in ids:
140
+ meta.pop(urid, None)
141
+ p = os.path.join(
142
+ self._chunks_dir(tenant, collection),
143
+ self._urid_to_fname(urid)
144
+ )
145
+ if os.path.isfile(p):
146
+ try:
147
+ os.remove(p)
148
+ except Exception:
149
+ pass
150
+ # remove docid from catalog.json
151
+ del cat[docid]
152
+
153
+ self._save_meta(tenant, collection, meta)
154
+ self._save_catalog(tenant, collection, cat)
155
+
156
+ # delete vectors for these chunk ids
157
+ self.load_or_init(tenant, collection)
158
+ em = self._emb.get((tenant, collection))
159
+ if em and ids:
160
+ try:
161
+ em.delete(ids) # txtai embeddings supports deleting by ids
162
+ except Exception:
163
+ # if the installed txtai doesn't expose delete(ids),
164
+ # skip silently. index still consistent via sidecars;
165
+ # searches hydrate from saved text
166
+ pass
167
+
168
+ self.save(tenant, collection)
169
+ return len(ids)
170
+
171
+ def _chunks_dir(self, tenant: str, collection: str) -> str:
172
+ return os.path.join(self._base_path(tenant, collection), "chunks")
173
+
174
+ def _urid_to_fname(self, urid: str) -> str:
175
+ return urid.replace("/", "_").replace("\\", "_").replace(":", "_") + ".txt"
176
+
177
+ def _save_chunk_text(self, tenant: str, collection: str,
178
+ urid: str, t: str) -> None:
179
+ p = os.path.join(self._chunks_dir(tenant, collection),
180
+ self._urid_to_fname(urid))
181
+ os.makedirs(os.path.dirname(p), exist_ok=True)
182
+ with open(p, "w", encoding="utf-8") as f:
183
+ f.write(t or "")
184
+ f.flush()
185
+
186
+ def _load_chunk_text(self, tenant: str, collection: str, urid: str) -> str | None:
187
+ p = os.path.join(self._chunks_dir(tenant, collection),
188
+ self._urid_to_fname(urid))
189
+ if os.path.isfile(p):
190
+ with open(p, "r", encoding="utf-8") as f:
191
+ return f.read()
192
+ return None
193
+
194
+ def index_records(self, tenant: str, collection: str, docid: str,
195
+ records: Iterable[Record]) -> int:
196
+ """
197
+ Ingests records as (rid, text, meta). Guarantees non-null text, coerces
198
+ dict-records, updates catalog/meta, saves index, and verifies content
199
+ storage via a quick lookup. Thread critical.
200
+ """
201
+ self.load_or_init(tenant, collection)
202
+ catalog = self._load_catalog(tenant, collection)
203
+ meta_side = self._load_meta(tenant, collection)
204
+ em = self._emb[(tenant, collection)]
205
+ prepared: list[tuple[str, Any, str]] = []
206
+ record_ids: list[str] = []
207
+
208
+ with collection_lock(tenant, collection):
209
+
210
+ for r in records:
211
+ if isinstance(r, dict):
212
+ rid = r.get("rid") or r.get("id") or r.get("uid")
213
+ txt = r.get("text") or r.get("content")
214
+ md = r.get("meta") or r.get("metadata") or r.get("tags") or {}
215
+ else:
216
+ try:
217
+ rid, txt, md = r
218
+ except Exception:
219
+ continue
220
+
221
+ if not rid or txt is None:
222
+ continue
223
+
224
+ if not isinstance(md, dict):
225
+ if isinstance(md, str):
226
+ try:
227
+ md = json.loads(md)
228
+ except:
229
+ md = {}
230
+ else:
231
+ try:
232
+ md = dict(md)
233
+ except:
234
+ md = {}
235
+
236
+ md["docid"] = docid
237
+ try:
238
+ meta_json = json.dumps(md, ensure_ascii=False)
239
+ md = json.loads(meta_json)
240
+ except:
241
+ md = {}
242
+ meta_json = ""
243
+
244
+ rid = str(rid)
245
+ txt = str(txt)
246
+ if not rid.startswith(f"{docid}::"):
247
+ rid = f"{docid}::{rid}"
248
+
249
+ meta_side[rid] = md
250
+ record_ids.append(rid)
251
+ prepared.append((rid, {"text":txt, **md}, meta_json))
252
+
253
+ self._save_chunk_text(tenant, collection, rid, txt)
254
+ assert txt == (self._load_chunk_text(tenant, collection, rid) or "")
255
+
256
+ if not prepared:
257
+ return 0
258
+
259
+ catalog[docid] = record_ids
260
+ self._save_catalog(tenant, collection, catalog)
261
+ self._save_meta(tenant, collection, meta_side)
262
+ em.upsert(prepared)
263
+ self.save(tenant, collection)
264
+ log.debug(f"PREPARED {len(prepared)} upserts: {prepared}")
265
+ return len(prepared)
266
+
267
+ @staticmethod
268
+ def _matches_filters(m: Dict[str, Any],
269
+ filters: Dict[str, Any] | None) -> bool:
270
+ """
271
+ Evaluates whether metadata `m` satisfies all filter conditions.
272
+ Supports:
273
+ - wildcards (*xyz / xyz*)
274
+ - numeric comparisons (>, <, >=, <=, !=)
275
+ - datetime comparisons (ISO 8601)
276
+ Multiple values in the same key act as OR; multiple keys act as AND.
277
+ """
278
+ log.debug(f"POS FILTERS: {filters}")
279
+ if not filters:
280
+ return True
281
+
282
+ def match(have: Any, cond: str) -> bool:
283
+ if have is None:
284
+ return False
285
+ s = str(cond)
286
+ hv = str(have)
287
+ # Numeric/date ops
288
+ for op in (">=", "<=", "!=", ">", "<"):
289
+ if s.startswith(op):
290
+ val = s[len(op):].strip()
291
+ try:
292
+ hvn, vvn = float(have), float(val)
293
+ return eval(f"hvn {op} vvn")
294
+ except Exception:
295
+ try:
296
+ hd = datetime.fromisoformat(str(have))
297
+ vd = datetime.fromisoformat(val)
298
+ return eval(f"hd {op} vd")
299
+ except Exception:
300
+ return False
301
+ # Wildcards
302
+ if s == "*":
303
+ return True
304
+ if s.startswith("*") and s.endswith("*") and s[1:-1] in hv:
305
+ return True
306
+ if s.startswith("*") and hv.endswith(s[1:]):
307
+ return True
308
+ if s.endswith("*") and hv.startswith(s[:-1]):
309
+ return True
310
+ if s.startswith("!") and len(s)>1:
311
+ return hv != s[1:]
312
+ return hv == s
313
+
314
+ for k, vals in filters.items():
315
+ if not any(match(m.get(k), v) for v in vals):
316
+ return False
317
+ return True
318
+
319
+ @staticmethod
320
+ def _split_filters(filters: dict[str, Any] | None) -> tuple[dict, dict]:
321
+ """Split filters into pre (handled by txtai) and post (handled in Python)."""
322
+ if not filters:
323
+ return {}, {}
324
+
325
+ pre_f, pos_f = {}, {}
326
+ for key, vals in (filters or {}).items():
327
+ if not isinstance(vals, list):
328
+ vals = [vals]
329
+ exacts, extended = [], []
330
+ for v in vals:
331
+ # Anything starting/ending with * or using comparison ops => post
332
+ if isinstance(v, str) and (
333
+ v.startswith("*") or v.endswith("*") or v.startswith("!") or
334
+ any(v.startswith(op) for op in (">=", "<=", ">", "<", "!="))
335
+ ):
336
+ extended.append(v)
337
+ else:
338
+ exacts.append(v)
339
+ if exacts:
340
+ pre_f[key] = exacts
341
+ if extended:
342
+ pos_f[key] = extended
343
+ log.debug(f"after split: PRE {pre_f} POS {pos_f}")
344
+ return pre_f, pos_f
345
+
346
+ @staticmethod
347
+ def _build_sql(query: str, k: int, filters: dict[str, Any], columns: list[str],
348
+ with_similarity: bool = True, avoid_duplicates = True) -> str:
349
+ """
350
+ Builds a generic txtai >=8 query
351
+ Eg SELECT id, text, score FROM txtai WHERE similar('foo') AND (t1='x' OR t1='y')
352
+ """
353
+ cols = ", ".join(columns or ["id", "docid", "text", "score"])
354
+ sql = f"SELECT {cols} FROM txtai"
355
+
356
+ wheres = []
357
+ if with_similarity and query:
358
+ q_safe = query.replace("'", "''")
359
+ wheres.append(f"similar('{q_safe}')")
360
+
361
+ for key, vals in filters.items():
362
+ ors = []
363
+ for v in vals:
364
+ safe_v = str(v).replace("'", "''")
365
+ ors.append(f"[{key}] = '{safe_v}'")
366
+ or_safe = " OR ".join(ors)
367
+ wheres.append(f"({or_safe})")
368
+
369
+ if wheres:
370
+ sql += " WHERE " + " AND ".join(wheres) + " AND id <> '' "
371
+ else:
372
+ sql += " WHERE id <> '' "
373
+
374
+ if avoid_duplicates and cols:
375
+ sql += " GROUP by " + cols
376
+
377
+ if k is not None:
378
+ sql += f" LIMIT {int(k)}"
379
+
380
+ log.debug(f"debug:: QUERY: {query} SQL: {sql}")
381
+ return sql
382
+
383
+ def search(self, tenant: str, collection: str, query: str, k: int = 5,
384
+ filters: Dict[str, Any] | None = None) -> List[Dict[str, Any]]:
385
+ """
386
+ Queries txtai for top-k, keeps overfetch inside the store, preserves text
387
+ from em.search when present, and falls back to lookup if missing.
388
+ """
389
+ kk = max(1, int(k))
390
+ self.load_or_init(tenant, collection)
391
+ em = self._emb[(tenant, collection)]
392
+
393
+ fetch_k = max(50, kk * 5)
394
+ pre_f, pos_f = self._split_filters(filters)
395
+ cols = ["id", "text", "score", "docid"]
396
+ sql = self._build_sql(query, fetch_k, pre_f, cols)
397
+ raw = em.search(sql)
398
+
399
+ # Normalize to (id, score, maybe_text)
400
+ if raw and isinstance(raw[0], dict):
401
+ triples = [
402
+ (r.get("id"), float(r.get("score", 0.0)), r.get("text"))
403
+ for r in raw
404
+ ]
405
+ else: # if raw is a tuple:
406
+ triples = [
407
+ (rid, float(score), None)
408
+ for rid, score in (raw or [])
409
+ ]
410
+
411
+ meta = self._load_meta(tenant, collection)
412
+
413
+ kept: list[tuple[str, float, Any]] = []
414
+ need_lookup_ids: list[str] = []
415
+
416
+ for rid, score, txt in triples:
417
+ if not rid:
418
+ continue
419
+ if self._matches_filters(meta.get(rid, {}), pos_f):
420
+ kept.append((rid, score, txt))
421
+ if txt is None:
422
+ need_lookup_ids.append(rid)
423
+ if len(kept) >= kk:
424
+ break
425
+
426
+ lookup: dict[str, Any] = {}
427
+ if need_lookup_ids and hasattr(em, "lookup"):
428
+ lookup = em.lookup(need_lookup_ids) or {}
429
+
430
+ out: List[Dict[str, Any]] = []
431
+ for rid, score, txt in kept:
432
+ if txt is None:
433
+ txt = lookup.get(rid)
434
+ if txt is None:
435
+ txt = self._load_chunk_text(tenant, collection, rid)
436
+ out.append({
437
+ "id": rid,
438
+ "score": score,
439
+ "text": txt.get("text") if isinstance (txt, dict) else txt,
440
+ "tenant": tenant,
441
+ "collection": collection,
442
+ "meta": meta.get(rid) or {},
443
+ })
444
+ log.info(f"SEARCH-OUT: {out}")
445
+ return out
pave/ui.py ADDED
@@ -0,0 +1,175 @@
1
+ # (C) 2025 Rodrigo Rodrigues da Silva <rodrigopitanga@posteo.net>
2
+ # SPDX-License-Identifier: GPL-3.0-or-later
3
+
4
+ # pave/ui.py — minimal, crash-proof UI wiring
5
+ from fastapi import FastAPI
6
+ from fastapi.openapi.docs import get_swagger_ui_html
7
+ from fastapi.openapi.utils import get_openapi
8
+ from fastapi.responses import HTMLResponse, RedirectResponse, FileResponse
9
+ from starlette.staticfiles import StaticFiles
10
+ from pathlib import Path
11
+ import copy
12
+
13
+ # ultra-simple fallback template (no f-string; plain string -> safe braces)
14
+ _FALLBACK_TMPL = """<!doctype html>
15
+ <html lang="en"><head>
16
+ <meta charset="utf-8"><meta name="viewport" content="width=device-width,initial-scale=1">
17
+ <link rel="icon" href="/favicon.ico" />
18
+ <title>__INST_NAME__ • Search</title>
19
+ </head>
20
+ <body>
21
+ <div class="tabs">
22
+ <button class="tab active" data-target="search" data-title="__INST_NAME__ • Search">Search</button>
23
+ <button class="tab" data-target="ingest" data-title="__INST_NAME__ • Ingest">Ingest</button>
24
+ <div class="desc">__INST_DESC__</div>
25
+ </div>
26
+ <iframe id="search" class="frame active" src="/ui/search" title="Search"></iframe>
27
+ <iframe id="ingest" class="frame" src="/ui/ingest" title="Ingest"></iframe>
28
+ <div class="footer">
29
+ <span>patchvec v__VERSION__</span>
30
+ </div>
31
+ <script>
32
+ const tabs = document.querySelectorAll('.tab');
33
+ const frames = document.querySelectorAll('.frame');
34
+ tabs.forEach(function(tab){
35
+ tab.addEventListener('click', function(){
36
+ tabs.forEach(function(t){ t.classList.remove('active'); });
37
+ frames.forEach(function(f){ f.classList.remove('active'); });
38
+ tab.classList.add('active');
39
+ document.getElementById(tab.dataset.target).classList.add('active');
40
+ document.title = tab.dataset.title || document.title;
41
+ });
42
+ });
43
+ </script>
44
+ </body></html>
45
+ """
46
+
47
+ def attach_ui(app: FastAPI):
48
+ cfg = app.state.cfg
49
+ version = app.state.version
50
+
51
+ # footer links
52
+ repo_url = "https://gitlab.com/flowlexi/patchvec"
53
+ license_name = "GPL-3.0-or-later"
54
+ license_url = "https://www.gnu.org/licenses/gpl-3.0-standalone.html"
55
+
56
+ # static + favicon (hardcoded path relative to this file)
57
+ # never crash if dir missing.
58
+ assets_dir = (Path(__file__).parent / "assets").resolve()
59
+ app.mount(
60
+ "/assets", \
61
+ StaticFiles(directory=str(assets_dir), check_dir=False), \
62
+ name="assets"
63
+ )
64
+
65
+ @app.get("/favicon.ico", include_in_schema=False)
66
+ def favicon():
67
+ return FileResponse(
68
+ str((Path(__file__).parent / "assets" / "patchvec_icon_192.png")\
69
+ .resolve()),
70
+ media_type="image/png",
71
+ )
72
+
73
+ # openapi (bearer + repo/license)
74
+ _openapi_cache = {"doc": None}
75
+ def _openapi_full():
76
+ if _openapi_cache["doc"] is None:
77
+ schema = get_openapi(
78
+ title=app.title,
79
+ version=version,
80
+ description=app.description,
81
+ routes=app.routes
82
+ )
83
+ comps = schema.setdefault("components", {})\
84
+ .setdefault("securitySchemes", {})
85
+ comps["bearerAuth"] = {
86
+ "type": "http",
87
+ "scheme": "bearer", # plain bearer (no JWT - yet)
88
+ "description": "Send Authorization: Bearer <token>",
89
+ }
90
+ schema["security"] = [{"bearerAuth": []}]
91
+ info = schema.setdefault("info", {})
92
+ info["x-repository"] = repo_url
93
+ info["license"] = {"name": license_name, "url": license_url}
94
+ _openapi_cache["doc"] = schema
95
+ return _openapi_cache["doc"]
96
+
97
+ def _filter(schema: dict, pred):
98
+ s = copy.deepcopy(schema)
99
+ for path in list(s.get("paths", {}).keys()):
100
+ methods = s["paths"][path]
101
+ for m in list(methods.keys()):
102
+ if not pred(path, methods[m]):
103
+ methods.pop(m, None)
104
+ if not methods:
105
+ s["paths"].pop(path, None)
106
+ s.pop("tags", None)
107
+ return s
108
+
109
+ def _is_search(path: str, _op: dict) -> bool:
110
+ return "/search" in path
111
+
112
+ def _is_ingest(path: str, _op: dict) -> bool:
113
+ p = path.lower()
114
+ return ("/documents" in p) or \
115
+ ("/collections" in p and "/search" not in p) or \
116
+ p.endswith("/collections")
117
+
118
+ @app.get("/openapi-search.json", include_in_schema=False)
119
+ def openapi_search_only():
120
+ return _filter(_openapi_full(), _is_search)
121
+
122
+ @app.get("/openapi-ingest.json", include_in_schema=False)
123
+ def openapi_ingest_only():
124
+ return _filter(_openapi_full(), _is_ingest)
125
+
126
+ _swui_params = {
127
+ "defaultModelsExpandDepth": -1,
128
+ "displayRequestDuration": True,
129
+ "docExpansion": "list",
130
+ "tryItOutEnabled": True,
131
+ }
132
+
133
+ @app.get("/ui/search", include_in_schema=False)
134
+ def ui_search():
135
+ inst_name = cfg.get("instance.name")
136
+ return get_swagger_ui_html(
137
+ openapi_url="/openapi-search.json",
138
+ title=f"{inst_name} • Search",
139
+ swagger_ui_parameters=_swui_params
140
+ )
141
+
142
+ @app.get("/ui/ingest", include_in_schema=False)
143
+ def ui_ingest():
144
+ inst_name = cfg.get("instance.name")
145
+ return get_swagger_ui_html(
146
+ openapi_url="/openapi-ingest.json",
147
+ title=f"{inst_name} • Ingest",
148
+ swagger_ui_parameters=_swui_params
149
+ )
150
+
151
+ # lazy-read template on request (so missing file never kills startup)
152
+ tmpl_path = assets_dir / "ui.html"
153
+
154
+ @app.get("/ui", include_in_schema=False)
155
+ def ui_home():
156
+ # instance strings
157
+ inst_name = cfg.get("instance.name")
158
+ inst_desc = cfg.get("instance.desc")
159
+ try:
160
+ html = tmpl_path.read_text(encoding="utf-8")
161
+ except Exception:
162
+ html = _FALLBACK_TMPL
163
+ html = (
164
+ html.replace("__INST_NAME__", inst_name)
165
+ .replace("__INST_DESC__", inst_desc)
166
+ .replace("__VERSION__", str(version))
167
+ .replace("__REPO_URL__", repo_url)
168
+ .replace("__LICENSE_NAME__", license_name)
169
+ .replace("__LICENSE_URL__", license_url)
170
+ )
171
+ return HTMLResponse(html)
172
+
173
+ @app.get("/", include_in_schema=False)
174
+ def root_redirect():
175
+ return RedirectResponse("/ui", status_code=308)