abstractgateway 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractgateway/__init__.py +1 -2
- abstractgateway/__main__.py +7 -0
- abstractgateway/app.py +4 -4
- abstractgateway/cli.py +568 -8
- abstractgateway/config.py +15 -5
- abstractgateway/embeddings_config.py +45 -0
- abstractgateway/host_metrics.py +274 -0
- abstractgateway/hosts/bundle_host.py +528 -55
- abstractgateway/hosts/visualflow_host.py +30 -3
- abstractgateway/integrations/__init__.py +2 -0
- abstractgateway/integrations/email_bridge.py +782 -0
- abstractgateway/integrations/telegram_bridge.py +534 -0
- abstractgateway/maintenance/__init__.py +5 -0
- abstractgateway/maintenance/action_tokens.py +100 -0
- abstractgateway/maintenance/backlog_exec_runner.py +1592 -0
- abstractgateway/maintenance/backlog_parser.py +184 -0
- abstractgateway/maintenance/draft_generator.py +451 -0
- abstractgateway/maintenance/llm_assist.py +212 -0
- abstractgateway/maintenance/notifier.py +109 -0
- abstractgateway/maintenance/process_manager.py +1064 -0
- abstractgateway/maintenance/report_models.py +81 -0
- abstractgateway/maintenance/report_parser.py +219 -0
- abstractgateway/maintenance/text_similarity.py +123 -0
- abstractgateway/maintenance/triage.py +507 -0
- abstractgateway/maintenance/triage_queue.py +142 -0
- abstractgateway/migrate.py +155 -0
- abstractgateway/routes/__init__.py +2 -2
- abstractgateway/routes/gateway.py +10817 -179
- abstractgateway/routes/triage.py +118 -0
- abstractgateway/runner.py +689 -14
- abstractgateway/security/gateway_security.py +425 -110
- abstractgateway/service.py +213 -6
- abstractgateway/stores.py +64 -4
- abstractgateway/workflow_deprecations.py +225 -0
- abstractgateway-0.1.1.dist-info/METADATA +135 -0
- abstractgateway-0.1.1.dist-info/RECORD +40 -0
- abstractgateway-0.1.0.dist-info/METADATA +0 -101
- abstractgateway-0.1.0.dist-info/RECORD +0 -18
- {abstractgateway-0.1.0.dist-info → abstractgateway-0.1.1.dist-info}/WHEEL +0 -0
- {abstractgateway-0.1.0.dist-info → abstractgateway-0.1.1.dist-info}/entry_points.txt +0 -0
abstractgateway/config.py
CHANGED
|
@@ -56,12 +56,14 @@ class GatewayHostConfig:
|
|
|
56
56
|
|
|
57
57
|
data_dir: Path
|
|
58
58
|
flows_dir: Path
|
|
59
|
+
store_backend: str = "file" # file|sqlite
|
|
60
|
+
db_path: Optional[Path] = None
|
|
59
61
|
|
|
60
62
|
runner_enabled: bool = True
|
|
61
63
|
poll_interval_s: float = 0.25
|
|
62
64
|
command_batch_limit: int = 200
|
|
63
65
|
tick_max_steps: int = 100
|
|
64
|
-
tick_workers: int =
|
|
66
|
+
tick_workers: int = 4
|
|
65
67
|
run_scan_limit: int = 200
|
|
66
68
|
|
|
67
69
|
@staticmethod
|
|
@@ -69,13 +71,21 @@ class GatewayHostConfig:
|
|
|
69
71
|
# NOTE: We intentionally use ABSTRACTGATEWAY_* as the canonical namespace.
|
|
70
72
|
# For a transition period, we accept legacy ABSTRACTFLOW_* names as fallbacks.
|
|
71
73
|
data_dir_raw = _env("ABSTRACTGATEWAY_DATA_DIR", "ABSTRACTFLOW_RUNTIME_DIR") or "./runtime"
|
|
72
|
-
flows_dir_raw =
|
|
74
|
+
flows_dir_raw = (
|
|
75
|
+
_env("ABSTRACTGATEWAY_FLOWS_DIR")
|
|
76
|
+
or _env("ABSTRACTFRAMEWORK_WORKFLOWS_DIR")
|
|
77
|
+
or _env("ABSTRACTFLOW_FLOWS_DIR")
|
|
78
|
+
or "./flows"
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
store_backend = str(_env("ABSTRACTGATEWAY_STORE_BACKEND") or "file").strip().lower() or "file"
|
|
82
|
+
db_path_raw = _env("ABSTRACTGATEWAY_DB_PATH")
|
|
73
83
|
|
|
74
84
|
enabled_raw = _env("ABSTRACTGATEWAY_RUNNER", "ABSTRACTFLOW_GATEWAY_RUNNER") or "1"
|
|
75
85
|
runner_enabled = _as_bool(enabled_raw, True)
|
|
76
86
|
|
|
77
87
|
poll_s = _as_float(_env("ABSTRACTGATEWAY_POLL_S", "ABSTRACTFLOW_GATEWAY_POLL_S"), 0.25)
|
|
78
|
-
tick_workers = _as_int(_env("ABSTRACTGATEWAY_TICK_WORKERS", "ABSTRACTFLOW_GATEWAY_TICK_WORKERS"),
|
|
88
|
+
tick_workers = _as_int(_env("ABSTRACTGATEWAY_TICK_WORKERS", "ABSTRACTFLOW_GATEWAY_TICK_WORKERS"), 4)
|
|
79
89
|
tick_steps = _as_int(_env("ABSTRACTGATEWAY_TICK_MAX_STEPS", "ABSTRACTFLOW_GATEWAY_TICK_MAX_STEPS"), 100)
|
|
80
90
|
batch = _as_int(_env("ABSTRACTGATEWAY_COMMAND_BATCH_LIMIT", "ABSTRACTFLOW_GATEWAY_COMMAND_BATCH_LIMIT"), 200)
|
|
81
91
|
scan = _as_int(_env("ABSTRACTGATEWAY_RUN_SCAN_LIMIT", "ABSTRACTFLOW_GATEWAY_RUN_SCAN_LIMIT"), 200)
|
|
@@ -83,6 +93,8 @@ class GatewayHostConfig:
|
|
|
83
93
|
return GatewayHostConfig(
|
|
84
94
|
data_dir=Path(data_dir_raw).expanduser().resolve(),
|
|
85
95
|
flows_dir=Path(flows_dir_raw).expanduser().resolve(),
|
|
96
|
+
store_backend=store_backend,
|
|
97
|
+
db_path=Path(db_path_raw).expanduser().resolve() if db_path_raw else None,
|
|
86
98
|
runner_enabled=bool(runner_enabled),
|
|
87
99
|
poll_interval_s=float(poll_s),
|
|
88
100
|
command_batch_limit=max(1, int(batch)),
|
|
@@ -90,5 +102,3 @@ class GatewayHostConfig:
|
|
|
90
102
|
tick_workers=max(1, int(tick_workers)),
|
|
91
103
|
run_scan_limit=max(1, int(scan)),
|
|
92
104
|
)
|
|
93
|
-
|
|
94
|
-
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, Dict, Tuple
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _load_json_file(path: Path) -> Dict[str, Any]:
|
|
10
|
+
try:
|
|
11
|
+
raw = path.read_text(encoding="utf-8")
|
|
12
|
+
data = json.loads(raw)
|
|
13
|
+
return data if isinstance(data, dict) else {}
|
|
14
|
+
except Exception:
|
|
15
|
+
return {}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _save_json_file(path: Path, data: Dict[str, Any]) -> None:
|
|
19
|
+
try:
|
|
20
|
+
path.write_text(json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True), encoding="utf-8")
|
|
21
|
+
except Exception:
|
|
22
|
+
return
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def resolve_embedding_config(*, base_dir: Path) -> Tuple[str, str]:
|
|
26
|
+
"""Resolve the gateway-wide embedding provider/model.
|
|
27
|
+
|
|
28
|
+
Contract:
|
|
29
|
+
- Stable within a single gateway instance (singleton embedding space).
|
|
30
|
+
- Persisted under the gateway data dir so restarts keep the same embedding space by default.
|
|
31
|
+
- Env vars can override (and will re-write the persisted value).
|
|
32
|
+
"""
|
|
33
|
+
provider_env = os.getenv("ABSTRACTGATEWAY_EMBEDDING_PROVIDER") or os.getenv("ABSTRACTFLOW_EMBEDDING_PROVIDER")
|
|
34
|
+
model_env = os.getenv("ABSTRACTGATEWAY_EMBEDDING_MODEL") or os.getenv("ABSTRACTFLOW_EMBEDDING_MODEL")
|
|
35
|
+
|
|
36
|
+
cfg_path = Path(base_dir) / "gateway_embeddings.json"
|
|
37
|
+
cfg = _load_json_file(cfg_path) if cfg_path.exists() else {}
|
|
38
|
+
|
|
39
|
+
provider = str(provider_env or cfg.get("provider") or "lmstudio").strip().lower()
|
|
40
|
+
model = str(model_env or cfg.get("model") or "text-embedding-nomic-embed-text-v1.5@q6_k").strip()
|
|
41
|
+
|
|
42
|
+
# Persist for stability across restart (best-effort).
|
|
43
|
+
_save_json_file(cfg_path, {"provider": provider, "model": model})
|
|
44
|
+
return provider, model
|
|
45
|
+
|
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import datetime
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
import shutil
|
|
7
|
+
import subprocess
|
|
8
|
+
import sys
|
|
9
|
+
import threading
|
|
10
|
+
import time
|
|
11
|
+
from typing import Any, Dict, List, Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
_CACHE_LOCK = threading.Lock()
|
|
15
|
+
_CACHE_AT_S: float = 0.0
|
|
16
|
+
_CACHE_VALUE: Optional[Dict[str, Any]] = None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _utc_now_iso() -> str:
|
|
20
|
+
return datetime.datetime.now(datetime.timezone.utc).isoformat()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _parse_nvidia_smi_csv(text: str) -> List[Dict[str, Any]]:
|
|
24
|
+
out: List[Dict[str, Any]] = []
|
|
25
|
+
for raw_line in str(text or "").splitlines():
|
|
26
|
+
line = raw_line.strip()
|
|
27
|
+
if not line:
|
|
28
|
+
continue
|
|
29
|
+
parts = [p.strip() for p in line.split(",")]
|
|
30
|
+
if len(parts) < 3:
|
|
31
|
+
continue
|
|
32
|
+
try:
|
|
33
|
+
idx = int(parts[0])
|
|
34
|
+
except Exception:
|
|
35
|
+
continue
|
|
36
|
+
name = parts[1]
|
|
37
|
+
try:
|
|
38
|
+
util = float(parts[2])
|
|
39
|
+
except Exception:
|
|
40
|
+
util = None
|
|
41
|
+
out.append({"index": idx, "name": name, "utilization_gpu_pct": util})
|
|
42
|
+
return out
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _read_nvidia_smi_gpu_metrics(*, timeout_s: float = 1.0) -> Dict[str, Any]:
|
|
46
|
+
exe = shutil.which("nvidia-smi")
|
|
47
|
+
if not exe:
|
|
48
|
+
return {"supported": False, "reason": "nvidia-smi not available"}
|
|
49
|
+
|
|
50
|
+
cmd = [
|
|
51
|
+
exe,
|
|
52
|
+
"--query-gpu=index,name,utilization.gpu",
|
|
53
|
+
"--format=csv,noheader,nounits",
|
|
54
|
+
]
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
p = subprocess.run(
|
|
58
|
+
cmd,
|
|
59
|
+
capture_output=True,
|
|
60
|
+
text=True,
|
|
61
|
+
timeout=float(timeout_s),
|
|
62
|
+
check=False,
|
|
63
|
+
)
|
|
64
|
+
except Exception as e:
|
|
65
|
+
return {"supported": False, "reason": f"nvidia-smi failed: {e}"}
|
|
66
|
+
|
|
67
|
+
if p.returncode != 0:
|
|
68
|
+
stderr = (p.stderr or "").strip()
|
|
69
|
+
detail = f" (exit={p.returncode})"
|
|
70
|
+
if stderr:
|
|
71
|
+
detail = f"{detail}: {stderr}"
|
|
72
|
+
return {"supported": False, "reason": f"nvidia-smi error{detail}"}
|
|
73
|
+
|
|
74
|
+
gpus = _parse_nvidia_smi_csv(p.stdout or "")
|
|
75
|
+
vals = [g.get("utilization_gpu_pct") for g in gpus if isinstance(g.get("utilization_gpu_pct"), (int, float))]
|
|
76
|
+
utilization: Optional[float] = None
|
|
77
|
+
if vals:
|
|
78
|
+
utilization = float(sum(vals) / len(vals))
|
|
79
|
+
|
|
80
|
+
return {
|
|
81
|
+
"supported": True,
|
|
82
|
+
"source": "nvidia-smi",
|
|
83
|
+
"utilization_gpu_pct": utilization,
|
|
84
|
+
"gpus": gpus,
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
_IOREG_ENTRY_HEADER_RE = re.compile(r"^\+\-o\s+([^\s<]+)\s+<class\s+([^,>]+)")
|
|
89
|
+
_IOREG_MODEL_RE = re.compile(r'"model"\s*=\s*"([^"]+)"')
|
|
90
|
+
_IOREG_PERF_STATS_RE = re.compile(r'"PerformanceStatistics"\s*=\s*\{(.*?)\}', re.DOTALL)
|
|
91
|
+
_IOREG_KV_RE = re.compile(r'"([^"]+)"\s*=\s*([^,}]+)')
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _coerce_number(value: str) -> Optional[float]:
|
|
95
|
+
s = str(value or "").strip()
|
|
96
|
+
if not s:
|
|
97
|
+
return None
|
|
98
|
+
if s.startswith("<") or s.startswith("("):
|
|
99
|
+
return None
|
|
100
|
+
try:
|
|
101
|
+
if "." in s:
|
|
102
|
+
return float(s)
|
|
103
|
+
return float(int(s))
|
|
104
|
+
except Exception:
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _parse_ioreg_ioaccelerator_output(text: str) -> List[Dict[str, Any]]:
|
|
109
|
+
"""Parse `ioreg -r -c IOAccelerator -l` output into GPU utilization records (macOS).
|
|
110
|
+
|
|
111
|
+
macOS exposes GPU activity in IORegistry `PerformanceStatistics` for IOAccelerator entries.
|
|
112
|
+
Example keys:
|
|
113
|
+
- "Device Utilization %"
|
|
114
|
+
- "Renderer Utilization %"
|
|
115
|
+
- "Tiler Utilization %"
|
|
116
|
+
"""
|
|
117
|
+
|
|
118
|
+
entries: list[str] = []
|
|
119
|
+
cur: list[str] = []
|
|
120
|
+
for line in str(text or "").splitlines():
|
|
121
|
+
if line.startswith("+-o "):
|
|
122
|
+
if cur:
|
|
123
|
+
entries.append("\n".join(cur))
|
|
124
|
+
cur = [line]
|
|
125
|
+
else:
|
|
126
|
+
if cur:
|
|
127
|
+
cur.append(line)
|
|
128
|
+
if cur:
|
|
129
|
+
entries.append("\n".join(cur))
|
|
130
|
+
|
|
131
|
+
gpus: List[Dict[str, Any]] = []
|
|
132
|
+
for entry in entries:
|
|
133
|
+
header = entry.splitlines()[0] if entry else ""
|
|
134
|
+
m = _IOREG_ENTRY_HEADER_RE.match(header)
|
|
135
|
+
if m:
|
|
136
|
+
obj_name = m.group(1)
|
|
137
|
+
cls_name = m.group(2)
|
|
138
|
+
else:
|
|
139
|
+
obj_name = ""
|
|
140
|
+
cls_name = ""
|
|
141
|
+
|
|
142
|
+
model_m = _IOREG_MODEL_RE.search(entry)
|
|
143
|
+
model = model_m.group(1) if model_m else ""
|
|
144
|
+
|
|
145
|
+
perf_m = _IOREG_PERF_STATS_RE.search(entry)
|
|
146
|
+
if not perf_m:
|
|
147
|
+
continue
|
|
148
|
+
body = perf_m.group(1)
|
|
149
|
+
|
|
150
|
+
stats: Dict[str, Any] = {}
|
|
151
|
+
for k, v_raw in _IOREG_KV_RE.findall(body):
|
|
152
|
+
num = _coerce_number(v_raw)
|
|
153
|
+
stats[k] = num if num is not None else str(v_raw).strip()
|
|
154
|
+
|
|
155
|
+
# Filter to entries that look like GPU perf stats.
|
|
156
|
+
if not any(k in stats for k in ("Device Utilization %", "Renderer Utilization %", "Tiler Utilization %")):
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
dev = stats.get("Device Utilization %")
|
|
160
|
+
rend = stats.get("Renderer Utilization %")
|
|
161
|
+
tiler = stats.get("Tiler Utilization %")
|
|
162
|
+
|
|
163
|
+
gpus.append(
|
|
164
|
+
{
|
|
165
|
+
"index": len(gpus),
|
|
166
|
+
"name": (model or obj_name or cls_name or "GPU").strip() or "GPU",
|
|
167
|
+
"utilization_gpu_pct": dev if isinstance(dev, (int, float)) else None,
|
|
168
|
+
"renderer_utilization_pct": rend if isinstance(rend, (int, float)) else None,
|
|
169
|
+
"tiler_utilization_pct": tiler if isinstance(tiler, (int, float)) else None,
|
|
170
|
+
}
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
return gpus
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def _read_ioreg_gpu_metrics(*, timeout_s: float = 1.0) -> Dict[str, Any]:
|
|
177
|
+
exe = shutil.which("ioreg") or "/usr/sbin/ioreg"
|
|
178
|
+
if not exe:
|
|
179
|
+
return {"supported": False, "reason": "ioreg not available"}
|
|
180
|
+
|
|
181
|
+
cmd = [exe, "-r", "-c", "IOAccelerator", "-l"]
|
|
182
|
+
try:
|
|
183
|
+
p = subprocess.run(
|
|
184
|
+
cmd,
|
|
185
|
+
capture_output=True,
|
|
186
|
+
text=True,
|
|
187
|
+
timeout=float(timeout_s),
|
|
188
|
+
check=False,
|
|
189
|
+
)
|
|
190
|
+
except Exception as e:
|
|
191
|
+
return {"supported": False, "reason": f"ioreg failed: {e}"}
|
|
192
|
+
|
|
193
|
+
if p.returncode != 0:
|
|
194
|
+
stderr = (p.stderr or "").strip()
|
|
195
|
+
detail = f" (exit={p.returncode})"
|
|
196
|
+
if stderr:
|
|
197
|
+
detail = f"{detail}: {stderr}"
|
|
198
|
+
return {"supported": False, "reason": f"ioreg error{detail}"}
|
|
199
|
+
|
|
200
|
+
gpus = _parse_ioreg_ioaccelerator_output(p.stdout or "")
|
|
201
|
+
if not gpus:
|
|
202
|
+
return {"supported": False, "reason": "IOAccelerator performance stats not found"}
|
|
203
|
+
|
|
204
|
+
vals = [g.get("utilization_gpu_pct") for g in gpus if isinstance(g.get("utilization_gpu_pct"), (int, float))]
|
|
205
|
+
utilization: Optional[float] = None
|
|
206
|
+
if vals:
|
|
207
|
+
utilization = float(sum(vals) / len(vals))
|
|
208
|
+
|
|
209
|
+
return {
|
|
210
|
+
"supported": True,
|
|
211
|
+
"source": "ioreg",
|
|
212
|
+
"utilization_gpu_pct": utilization,
|
|
213
|
+
"gpus": gpus,
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def _read_host_gpu_metrics_best_effort(*, timeout_s: float = 1.0) -> Dict[str, Any]:
|
|
218
|
+
provider_raw = str(os.getenv("ABSTRACTGATEWAY_GPU_METRICS_PROVIDER", "auto") or "auto").strip().lower()
|
|
219
|
+
if provider_raw in {"0", "off", "false", "none", "disabled"}:
|
|
220
|
+
return {"supported": False, "reason": "disabled"}
|
|
221
|
+
|
|
222
|
+
if provider_raw not in {"auto", "nvidia-smi", "nvidia", "ioreg", "macos"}:
|
|
223
|
+
return {"supported": False, "reason": f"unknown provider '{provider_raw}'"}
|
|
224
|
+
|
|
225
|
+
if provider_raw == "auto":
|
|
226
|
+
providers = ["ioreg", "nvidia-smi"] if sys.platform == "darwin" else ["nvidia-smi", "ioreg"]
|
|
227
|
+
elif provider_raw in {"nvidia", "nvidia-smi"}:
|
|
228
|
+
providers = ["nvidia-smi"]
|
|
229
|
+
else:
|
|
230
|
+
providers = ["ioreg"]
|
|
231
|
+
|
|
232
|
+
reasons: list[str] = []
|
|
233
|
+
for p in providers:
|
|
234
|
+
if p == "ioreg":
|
|
235
|
+
payload = _read_ioreg_gpu_metrics(timeout_s=timeout_s)
|
|
236
|
+
else:
|
|
237
|
+
payload = _read_nvidia_smi_gpu_metrics(timeout_s=timeout_s)
|
|
238
|
+
|
|
239
|
+
if payload.get("supported") is True:
|
|
240
|
+
return payload
|
|
241
|
+
r = payload.get("reason")
|
|
242
|
+
if isinstance(r, str) and r.strip():
|
|
243
|
+
reasons.append(r.strip())
|
|
244
|
+
|
|
245
|
+
return {"supported": False, "reason": "; ".join(reasons) if reasons else "unsupported"}
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def get_host_gpu_metrics(*, cache_ttl_s: float = 0.5) -> Dict[str, Any]:
|
|
249
|
+
"""Return best-effort host GPU utilization metrics.
|
|
250
|
+
|
|
251
|
+
This is intentionally dependency-light:
|
|
252
|
+
- macOS: reads IORegistry IOAccelerator PerformanceStatistics via `ioreg`
|
|
253
|
+
- NVIDIA: reads utilization via `nvidia-smi`
|
|
254
|
+
"""
|
|
255
|
+
|
|
256
|
+
global _CACHE_AT_S, _CACHE_VALUE
|
|
257
|
+
|
|
258
|
+
ttl = max(0.0, float(cache_ttl_s))
|
|
259
|
+
now = time.time()
|
|
260
|
+
|
|
261
|
+
if ttl > 0:
|
|
262
|
+
with _CACHE_LOCK:
|
|
263
|
+
if _CACHE_VALUE is not None and (now - _CACHE_AT_S) <= ttl:
|
|
264
|
+
return dict(_CACHE_VALUE)
|
|
265
|
+
|
|
266
|
+
payload = _read_host_gpu_metrics_best_effort(timeout_s=1.0)
|
|
267
|
+
out: Dict[str, Any] = {"ts": _utc_now_iso(), **payload}
|
|
268
|
+
|
|
269
|
+
if ttl > 0:
|
|
270
|
+
with _CACHE_LOCK:
|
|
271
|
+
_CACHE_AT_S = now
|
|
272
|
+
_CACHE_VALUE = dict(out)
|
|
273
|
+
|
|
274
|
+
return out
|