@respan/cli 0.5.3 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/auth/login.js +2 -2
- package/dist/commands/integrate/claude-code.js +9 -28
- package/dist/commands/integrate/codex-cli.js +7 -27
- package/dist/commands/integrate/gemini-cli.js +99 -53
- package/dist/hooks/claude-code.cjs +951 -0
- package/dist/hooks/claude-code.d.ts +1 -0
- package/dist/hooks/claude-code.js +641 -0
- package/dist/hooks/codex-cli.cjs +793 -0
- package/dist/hooks/codex-cli.d.ts +1 -0
- package/dist/hooks/codex-cli.js +469 -0
- package/dist/hooks/gemini-cli.cjs +826 -0
- package/dist/hooks/gemini-cli.d.ts +1 -0
- package/dist/hooks/gemini-cli.js +563 -0
- package/dist/hooks/shared.d.ts +82 -0
- package/dist/hooks/shared.js +461 -0
- package/dist/lib/integrate.d.ts +3 -3
- package/dist/lib/integrate.js +4 -8
- package/oclif.manifest.json +466 -466
- package/package.json +6 -3
- package/dist/assets/codex_hook.py +0 -897
- package/dist/assets/hook.py +0 -1052
|
@@ -1,897 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Respan Hook for Codex CLI
|
|
4
|
-
|
|
5
|
-
Sends Codex CLI conversation traces to Respan after each agent turn.
|
|
6
|
-
Uses Codex CLI's notify hook to capture session JSONL files and convert
|
|
7
|
-
them to Respan spans.
|
|
8
|
-
|
|
9
|
-
Usage:
|
|
10
|
-
Add to ~/.codex/config.toml:
|
|
11
|
-
notify = ["python3", "~/.respan/codex_hook.py"]
|
|
12
|
-
Run: respan integrate codex-cli
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
import contextlib
|
|
16
|
-
import json
|
|
17
|
-
import os
|
|
18
|
-
import sys
|
|
19
|
-
import tempfile
|
|
20
|
-
import time
|
|
21
|
-
from datetime import datetime, timezone
|
|
22
|
-
from pathlib import Path
|
|
23
|
-
from typing import Any, Dict, List, Optional, Tuple
|
|
24
|
-
|
|
25
|
-
try:
|
|
26
|
-
import requests
|
|
27
|
-
except ImportError:
|
|
28
|
-
print("Error: Python 'requests' package is required. Install: pip3 install requests", file=sys.stderr)
|
|
29
|
-
sys.exit(1)
|
|
30
|
-
|
|
31
|
-
try:
|
|
32
|
-
import fcntl
|
|
33
|
-
except ImportError:
|
|
34
|
-
fcntl = None # Not available on Windows
|
|
35
|
-
|
|
36
|
-
# Configuration
|
|
37
|
-
LOG_FILE = Path.home() / ".codex" / "state" / "respan_hook.log"
|
|
38
|
-
STATE_FILE = Path.home() / ".codex" / "state" / "respan_state.json"
|
|
39
|
-
LOCK_FILE = Path.home() / ".codex" / "state" / "respan_hook.lock"
|
|
40
|
-
DEBUG = os.environ.get("CODEX_RESPAN_DEBUG", "").lower() == "true"
|
|
41
|
-
|
|
42
|
-
try:
|
|
43
|
-
MAX_CHARS = int(os.environ.get("CODEX_RESPAN_MAX_CHARS", "4000"))
|
|
44
|
-
except (ValueError, TypeError):
|
|
45
|
-
MAX_CHARS = 4000
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def log(level: str, message: str) -> None:
|
|
49
|
-
"""Log a message to the log file."""
|
|
50
|
-
LOG_FILE.parent.mkdir(parents=True, exist_ok=True)
|
|
51
|
-
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
52
|
-
with open(LOG_FILE, "a", encoding="utf-8") as f:
|
|
53
|
-
f.write(f"{timestamp} [{level}] {message}\n")
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
def debug(message: str) -> None:
|
|
57
|
-
"""Log a debug message (only if DEBUG is enabled)."""
|
|
58
|
-
if DEBUG:
|
|
59
|
-
log("DEBUG", message)
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def load_state() -> Dict[str, Any]:
|
|
63
|
-
"""Load the state file containing session tracking info."""
|
|
64
|
-
if not STATE_FILE.exists():
|
|
65
|
-
return {}
|
|
66
|
-
try:
|
|
67
|
-
return json.loads(STATE_FILE.read_text(encoding="utf-8"))
|
|
68
|
-
except (json.JSONDecodeError, IOError):
|
|
69
|
-
return {}
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def save_state(state: Dict[str, Any]) -> None:
|
|
73
|
-
"""Save the state file atomically via write-to-temp + rename."""
|
|
74
|
-
STATE_FILE.parent.mkdir(parents=True, exist_ok=True)
|
|
75
|
-
try:
|
|
76
|
-
fd, tmp_path = tempfile.mkstemp(dir=STATE_FILE.parent, suffix=".tmp")
|
|
77
|
-
try:
|
|
78
|
-
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
|
79
|
-
json.dump(state, f, indent=2)
|
|
80
|
-
os.rename(tmp_path, STATE_FILE)
|
|
81
|
-
except BaseException:
|
|
82
|
-
with contextlib.suppress(OSError):
|
|
83
|
-
os.unlink(tmp_path)
|
|
84
|
-
raise
|
|
85
|
-
except OSError as e:
|
|
86
|
-
log("ERROR", f"Failed to save state atomically, falling back: {e}")
|
|
87
|
-
STATE_FILE.write_text(json.dumps(state, indent=2), encoding="utf-8")
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
@contextlib.contextmanager
|
|
91
|
-
def state_lock(timeout: float = 5.0):
|
|
92
|
-
"""Acquire an advisory file lock around state operations."""
|
|
93
|
-
if fcntl is None:
|
|
94
|
-
yield
|
|
95
|
-
return
|
|
96
|
-
|
|
97
|
-
LOCK_FILE.parent.mkdir(parents=True, exist_ok=True)
|
|
98
|
-
lock_fd = None
|
|
99
|
-
try:
|
|
100
|
-
lock_fd = open(LOCK_FILE, "w")
|
|
101
|
-
deadline = time.monotonic() + timeout
|
|
102
|
-
while True:
|
|
103
|
-
try:
|
|
104
|
-
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
105
|
-
break
|
|
106
|
-
except (IOError, OSError):
|
|
107
|
-
if time.monotonic() >= deadline:
|
|
108
|
-
debug("Could not acquire state lock within timeout, proceeding without lock")
|
|
109
|
-
lock_fd.close()
|
|
110
|
-
lock_fd = None
|
|
111
|
-
yield
|
|
112
|
-
return
|
|
113
|
-
time.sleep(0.1)
|
|
114
|
-
try:
|
|
115
|
-
yield
|
|
116
|
-
finally:
|
|
117
|
-
fcntl.flock(lock_fd, fcntl.LOCK_UN)
|
|
118
|
-
lock_fd.close()
|
|
119
|
-
except Exception as e:
|
|
120
|
-
debug(f"Lock error, proceeding without lock: {e}")
|
|
121
|
-
if lock_fd is not None:
|
|
122
|
-
with contextlib.suppress(Exception):
|
|
123
|
-
lock_fd.close()
|
|
124
|
-
yield
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
def parse_timestamp(ts_str: str) -> Optional[datetime]:
|
|
128
|
-
"""Parse ISO timestamp string to datetime."""
|
|
129
|
-
try:
|
|
130
|
-
if ts_str.endswith("Z"):
|
|
131
|
-
ts_str = ts_str[:-1] + "+00:00"
|
|
132
|
-
return datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
|
133
|
-
except (ValueError, AttributeError):
|
|
134
|
-
return None
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
# Known config keys in respan.json that map to span fields.
|
|
138
|
-
KNOWN_CONFIG_KEYS = {"customer_id", "span_name", "workflow_name"}
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
def load_respan_config(cwd: str) -> Dict[str, Any]:
|
|
142
|
-
"""Load .codex/respan.json from the project directory.
|
|
143
|
-
|
|
144
|
-
Returns a dict with two keys:
|
|
145
|
-
- "fields": known span fields (customer_id, span_name, workflow_name)
|
|
146
|
-
- "properties": everything else (custom properties -> metadata)
|
|
147
|
-
"""
|
|
148
|
-
config_path = Path(cwd) / ".codex" / "respan.json"
|
|
149
|
-
if not config_path.exists():
|
|
150
|
-
return {"fields": {}, "properties": {}}
|
|
151
|
-
try:
|
|
152
|
-
raw = json.loads(config_path.read_text(encoding="utf-8"))
|
|
153
|
-
if not isinstance(raw, dict):
|
|
154
|
-
return {"fields": {}, "properties": {}}
|
|
155
|
-
fields = {}
|
|
156
|
-
properties = {}
|
|
157
|
-
for k, v in raw.items():
|
|
158
|
-
if k in KNOWN_CONFIG_KEYS:
|
|
159
|
-
fields[k] = v
|
|
160
|
-
else:
|
|
161
|
-
properties[k] = v
|
|
162
|
-
return {"fields": fields, "properties": properties}
|
|
163
|
-
except (json.JSONDecodeError, IOError) as e:
|
|
164
|
-
debug(f"Failed to load respan.json from {config_path}: {e}")
|
|
165
|
-
return {"fields": {}, "properties": {}}
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
def truncate(text: str, max_length: int = MAX_CHARS) -> str:
|
|
169
|
-
"""Truncate text to max_length."""
|
|
170
|
-
if len(text) > max_length:
|
|
171
|
-
return text[:max_length] + "\n... (truncated)"
|
|
172
|
-
return text
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
def find_session_file(session_id: str) -> Optional[Path]:
|
|
176
|
-
"""Find the session JSONL file for a given session ID.
|
|
177
|
-
|
|
178
|
-
Codex CLI stores sessions at:
|
|
179
|
-
~/.codex/sessions/YYYY/MM/DD/rollout-<timestamp>-<session-id>.jsonl
|
|
180
|
-
|
|
181
|
-
The session ID appears in the filename after the timestamp prefix.
|
|
182
|
-
"""
|
|
183
|
-
sessions_dir = Path.home() / ".codex" / "sessions"
|
|
184
|
-
if not sessions_dir.exists():
|
|
185
|
-
debug(f"Sessions directory not found: {sessions_dir}")
|
|
186
|
-
return None
|
|
187
|
-
|
|
188
|
-
# Search date directories in reverse order (newest first)
|
|
189
|
-
for year_dir in sorted(sessions_dir.iterdir(), reverse=True):
|
|
190
|
-
if not year_dir.is_dir():
|
|
191
|
-
continue
|
|
192
|
-
for month_dir in sorted(year_dir.iterdir(), reverse=True):
|
|
193
|
-
if not month_dir.is_dir():
|
|
194
|
-
continue
|
|
195
|
-
for day_dir in sorted(month_dir.iterdir(), reverse=True):
|
|
196
|
-
if not day_dir.is_dir():
|
|
197
|
-
continue
|
|
198
|
-
for f in day_dir.glob("*.jsonl"):
|
|
199
|
-
if session_id in f.name:
|
|
200
|
-
debug(f"Found session file: {f}")
|
|
201
|
-
return f
|
|
202
|
-
|
|
203
|
-
debug(f"No session file found for session ID: {session_id}")
|
|
204
|
-
return None
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
def find_latest_session_file() -> Optional[Tuple[str, Path]]:
|
|
208
|
-
"""Find the most recently modified session JSONL file.
|
|
209
|
-
|
|
210
|
-
Returns (session_id, path) or None.
|
|
211
|
-
"""
|
|
212
|
-
sessions_dir = Path.home() / ".codex" / "sessions"
|
|
213
|
-
if not sessions_dir.exists():
|
|
214
|
-
return None
|
|
215
|
-
|
|
216
|
-
latest_file = None
|
|
217
|
-
latest_mtime = 0
|
|
218
|
-
|
|
219
|
-
for year_dir in sessions_dir.iterdir():
|
|
220
|
-
if not year_dir.is_dir():
|
|
221
|
-
continue
|
|
222
|
-
for month_dir in year_dir.iterdir():
|
|
223
|
-
if not month_dir.is_dir():
|
|
224
|
-
continue
|
|
225
|
-
for day_dir in month_dir.iterdir():
|
|
226
|
-
if not day_dir.is_dir():
|
|
227
|
-
continue
|
|
228
|
-
for f in day_dir.glob("*.jsonl"):
|
|
229
|
-
mtime = f.stat().st_mtime
|
|
230
|
-
if mtime > latest_mtime:
|
|
231
|
-
latest_mtime = mtime
|
|
232
|
-
latest_file = f
|
|
233
|
-
|
|
234
|
-
if latest_file:
|
|
235
|
-
# Extract session ID from first line
|
|
236
|
-
try:
|
|
237
|
-
first_line = latest_file.read_text(encoding="utf-8").split("\n")[0]
|
|
238
|
-
if first_line:
|
|
239
|
-
first_msg = json.loads(first_line)
|
|
240
|
-
payload = first_msg.get("payload", {})
|
|
241
|
-
session_id = payload.get("id", latest_file.stem)
|
|
242
|
-
return (session_id, latest_file)
|
|
243
|
-
except (json.JSONDecodeError, IOError, IndexError) as e:
|
|
244
|
-
debug(f"Error reading session file {latest_file}: {e}")
|
|
245
|
-
|
|
246
|
-
return None
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
def parse_session(lines: List[str]) -> List[Dict[str, Any]]:
|
|
250
|
-
"""Parse JSONL lines into a list of events."""
|
|
251
|
-
events = []
|
|
252
|
-
for line in lines:
|
|
253
|
-
line = line.strip()
|
|
254
|
-
if not line:
|
|
255
|
-
continue
|
|
256
|
-
try:
|
|
257
|
-
events.append(json.loads(line))
|
|
258
|
-
except json.JSONDecodeError:
|
|
259
|
-
continue
|
|
260
|
-
return events
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
def extract_turns(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
264
|
-
"""Extract turns from session events.
|
|
265
|
-
|
|
266
|
-
A turn is bounded by task_started and task_complete events.
|
|
267
|
-
Returns a list of turn dicts, each containing:
|
|
268
|
-
- turn_id: str
|
|
269
|
-
- start_time: str (ISO timestamp)
|
|
270
|
-
- end_time: str (ISO timestamp)
|
|
271
|
-
- model: str
|
|
272
|
-
- cwd: str
|
|
273
|
-
- user_message: str
|
|
274
|
-
- assistant_message: str
|
|
275
|
-
- commentary: List[str]
|
|
276
|
-
- tool_calls: List[dict]
|
|
277
|
-
- tool_outputs: List[dict]
|
|
278
|
-
- reasoning: bool (whether reasoning was present)
|
|
279
|
-
- token_usage: dict (from last_token_usage)
|
|
280
|
-
- events: List[dict] (raw events in this turn)
|
|
281
|
-
"""
|
|
282
|
-
turns = []
|
|
283
|
-
current_turn = None
|
|
284
|
-
|
|
285
|
-
for event in events:
|
|
286
|
-
evt_type = event.get("type")
|
|
287
|
-
payload = event.get("payload", {})
|
|
288
|
-
timestamp = event.get("timestamp", "")
|
|
289
|
-
|
|
290
|
-
if evt_type == "event_msg":
|
|
291
|
-
msg_type = payload.get("type", "")
|
|
292
|
-
|
|
293
|
-
if msg_type == "task_started":
|
|
294
|
-
current_turn = {
|
|
295
|
-
"turn_id": payload.get("turn_id", ""),
|
|
296
|
-
"start_time": timestamp,
|
|
297
|
-
"end_time": "",
|
|
298
|
-
"model": "",
|
|
299
|
-
"cwd": "",
|
|
300
|
-
"user_message": "",
|
|
301
|
-
"assistant_message": "",
|
|
302
|
-
"commentary": [],
|
|
303
|
-
"tool_calls": [],
|
|
304
|
-
"tool_outputs": [],
|
|
305
|
-
"reasoning": False,
|
|
306
|
-
"token_usage": {},
|
|
307
|
-
"events": [],
|
|
308
|
-
}
|
|
309
|
-
|
|
310
|
-
elif msg_type == "task_complete" and current_turn:
|
|
311
|
-
current_turn["end_time"] = timestamp
|
|
312
|
-
current_turn["_complete_payload"] = payload
|
|
313
|
-
turns.append(current_turn)
|
|
314
|
-
current_turn = None
|
|
315
|
-
|
|
316
|
-
elif msg_type == "user_message" and current_turn:
|
|
317
|
-
current_turn["user_message"] = payload.get("message", "")
|
|
318
|
-
|
|
319
|
-
elif msg_type == "agent_message" and current_turn:
|
|
320
|
-
phase = payload.get("phase", "")
|
|
321
|
-
message = payload.get("message", "")
|
|
322
|
-
if phase == "final_answer":
|
|
323
|
-
current_turn["assistant_message"] = message
|
|
324
|
-
elif phase == "commentary":
|
|
325
|
-
current_turn["commentary"].append(message)
|
|
326
|
-
|
|
327
|
-
elif msg_type == "token_count" and current_turn:
|
|
328
|
-
info = payload.get("info", {})
|
|
329
|
-
last_usage = info.get("last_token_usage", {})
|
|
330
|
-
if last_usage:
|
|
331
|
-
current_turn["token_usage"] = last_usage
|
|
332
|
-
|
|
333
|
-
elif evt_type == "turn_context" and current_turn:
|
|
334
|
-
current_turn["model"] = payload.get("model", "")
|
|
335
|
-
current_turn["cwd"] = payload.get("cwd", "")
|
|
336
|
-
|
|
337
|
-
elif evt_type == "response_item" and current_turn:
|
|
338
|
-
item_type = payload.get("type", "")
|
|
339
|
-
|
|
340
|
-
if item_type == "function_call":
|
|
341
|
-
current_turn["tool_calls"].append({
|
|
342
|
-
"name": payload.get("name", "unknown"),
|
|
343
|
-
"arguments": payload.get("arguments", ""),
|
|
344
|
-
"call_id": payload.get("call_id", ""),
|
|
345
|
-
"timestamp": timestamp,
|
|
346
|
-
})
|
|
347
|
-
|
|
348
|
-
elif item_type == "custom_tool_call":
|
|
349
|
-
current_turn["tool_calls"].append({
|
|
350
|
-
"name": payload.get("name", "unknown"),
|
|
351
|
-
"arguments": payload.get("input", ""),
|
|
352
|
-
"call_id": payload.get("call_id", ""),
|
|
353
|
-
"timestamp": timestamp,
|
|
354
|
-
})
|
|
355
|
-
|
|
356
|
-
elif item_type == "function_call_output":
|
|
357
|
-
current_turn["tool_outputs"].append({
|
|
358
|
-
"call_id": payload.get("call_id", ""),
|
|
359
|
-
"output": payload.get("output", ""),
|
|
360
|
-
"timestamp": timestamp,
|
|
361
|
-
})
|
|
362
|
-
|
|
363
|
-
elif item_type == "custom_tool_call_output":
|
|
364
|
-
current_turn["tool_outputs"].append({
|
|
365
|
-
"call_id": payload.get("call_id", ""),
|
|
366
|
-
"output": payload.get("output", ""),
|
|
367
|
-
"timestamp": timestamp,
|
|
368
|
-
})
|
|
369
|
-
|
|
370
|
-
elif item_type == "reasoning":
|
|
371
|
-
current_turn["reasoning"] = True
|
|
372
|
-
|
|
373
|
-
elif item_type == "web_search_call":
|
|
374
|
-
action = payload.get("action", {})
|
|
375
|
-
query = action.get("query", "")
|
|
376
|
-
current_turn["tool_calls"].append({
|
|
377
|
-
"name": "web_search",
|
|
378
|
-
"arguments": json.dumps({"query": query}),
|
|
379
|
-
"call_id": f"web_search_{timestamp}",
|
|
380
|
-
"timestamp": timestamp,
|
|
381
|
-
})
|
|
382
|
-
|
|
383
|
-
if current_turn is not None:
|
|
384
|
-
current_turn["events"].append(event)
|
|
385
|
-
|
|
386
|
-
return turns
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
def create_respan_spans(
|
|
390
|
-
session_id: str,
|
|
391
|
-
turn_num: int,
|
|
392
|
-
turn: Dict[str, Any],
|
|
393
|
-
config: Optional[Dict[str, Any]] = None,
|
|
394
|
-
) -> List[Dict[str, Any]]:
|
|
395
|
-
"""Create Respan span logs for a single Codex CLI turn.
|
|
396
|
-
|
|
397
|
-
Produces a span tree:
|
|
398
|
-
Root: codex-cli (agent container, latency, metadata)
|
|
399
|
-
+-- openai.chat (generation - model, tokens, messages)
|
|
400
|
-
+-- Reasoning (if reasoning_output_tokens > 0)
|
|
401
|
-
+-- Tool: Shell (if exec_command)
|
|
402
|
-
+-- Tool: File Edit (if apply_patch)
|
|
403
|
-
+-- Tool: Web Search (if web_search_call)
|
|
404
|
-
"""
|
|
405
|
-
spans = []
|
|
406
|
-
|
|
407
|
-
# Extract data from the turn
|
|
408
|
-
user_text = turn.get("user_message", "")
|
|
409
|
-
assistant_text = turn.get("assistant_message", "")
|
|
410
|
-
commentary = turn.get("commentary", [])
|
|
411
|
-
model = turn.get("model", "gpt-5.4")
|
|
412
|
-
cwd = turn.get("cwd", "")
|
|
413
|
-
token_usage = turn.get("token_usage", {})
|
|
414
|
-
tool_calls = turn.get("tool_calls", [])
|
|
415
|
-
tool_outputs = turn.get("tool_outputs", [])
|
|
416
|
-
has_reasoning = turn.get("reasoning", False)
|
|
417
|
-
|
|
418
|
-
# Timing
|
|
419
|
-
start_time_str = turn.get("start_time", "")
|
|
420
|
-
end_time_str = turn.get("end_time", "")
|
|
421
|
-
now_str = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
422
|
-
if not start_time_str:
|
|
423
|
-
start_time_str = now_str
|
|
424
|
-
if not end_time_str:
|
|
425
|
-
end_time_str = now_str
|
|
426
|
-
|
|
427
|
-
latency = None
|
|
428
|
-
start_dt = parse_timestamp(start_time_str)
|
|
429
|
-
end_dt = parse_timestamp(end_time_str)
|
|
430
|
-
if start_dt and end_dt:
|
|
431
|
-
latency = (end_dt - start_dt).total_seconds()
|
|
432
|
-
|
|
433
|
-
# Messages for input/output
|
|
434
|
-
prompt_messages: List[Dict[str, Any]] = []
|
|
435
|
-
if user_text:
|
|
436
|
-
prompt_messages.append({"role": "user", "content": user_text})
|
|
437
|
-
completion_message: Optional[Dict[str, Any]] = None
|
|
438
|
-
if assistant_text:
|
|
439
|
-
completion_message = {"role": "assistant", "content": assistant_text}
|
|
440
|
-
|
|
441
|
-
# IDs from config
|
|
442
|
-
cfg_fields = (config or {}).get("fields", {})
|
|
443
|
-
cfg_props = (config or {}).get("properties", {})
|
|
444
|
-
|
|
445
|
-
trace_unique_id = f"{session_id}_turn_{turn_num}"
|
|
446
|
-
workflow_name = os.environ.get("RESPAN_WORKFLOW_NAME") or cfg_fields.get("workflow_name") or "codex-cli"
|
|
447
|
-
root_span_name = os.environ.get("RESPAN_SPAN_NAME") or cfg_fields.get("span_name") or "codex-cli"
|
|
448
|
-
thread_id = f"codexcli_{session_id}"
|
|
449
|
-
customer_id = os.environ.get("RESPAN_CUSTOMER_ID") or cfg_fields.get("customer_id") or ""
|
|
450
|
-
|
|
451
|
-
# Metadata
|
|
452
|
-
metadata: Dict[str, Any] = {
|
|
453
|
-
"codex_cli_turn": turn_num,
|
|
454
|
-
}
|
|
455
|
-
if cwd:
|
|
456
|
-
metadata["cwd"] = cwd
|
|
457
|
-
if commentary:
|
|
458
|
-
metadata["commentary"] = "\n".join(commentary)
|
|
459
|
-
if cfg_props:
|
|
460
|
-
metadata.update(cfg_props)
|
|
461
|
-
env_metadata = os.environ.get("RESPAN_METADATA")
|
|
462
|
-
if env_metadata:
|
|
463
|
-
try:
|
|
464
|
-
extra = json.loads(env_metadata)
|
|
465
|
-
if isinstance(extra, dict):
|
|
466
|
-
metadata.update(extra)
|
|
467
|
-
except json.JSONDecodeError:
|
|
468
|
-
pass
|
|
469
|
-
|
|
470
|
-
# Token usage mapping
|
|
471
|
-
usage_fields: Dict[str, Any] = {}
|
|
472
|
-
if token_usage:
|
|
473
|
-
prompt_tokens = token_usage.get("input_tokens", 0)
|
|
474
|
-
completion_tokens = token_usage.get("output_tokens", 0)
|
|
475
|
-
usage_fields["prompt_tokens"] = prompt_tokens
|
|
476
|
-
usage_fields["completion_tokens"] = completion_tokens
|
|
477
|
-
total = token_usage.get("total_tokens", prompt_tokens + completion_tokens)
|
|
478
|
-
if total > 0:
|
|
479
|
-
usage_fields["total_tokens"] = total
|
|
480
|
-
cached = token_usage.get("cached_input_tokens", 0)
|
|
481
|
-
if cached > 0:
|
|
482
|
-
usage_fields["prompt_tokens_details"] = {"cached_tokens": cached}
|
|
483
|
-
reasoning_tokens = token_usage.get("reasoning_output_tokens", 0)
|
|
484
|
-
if reasoning_tokens > 0:
|
|
485
|
-
metadata["reasoning_tokens"] = reasoning_tokens
|
|
486
|
-
|
|
487
|
-
# ------------------------------------------------------------------
|
|
488
|
-
# Root span - agent container
|
|
489
|
-
# ------------------------------------------------------------------
|
|
490
|
-
root_span_id = f"codexcli_{trace_unique_id}_root"
|
|
491
|
-
root_span: Dict[str, Any] = {
|
|
492
|
-
"trace_unique_id": trace_unique_id,
|
|
493
|
-
"thread_identifier": thread_id,
|
|
494
|
-
"customer_identifier": customer_id,
|
|
495
|
-
"span_unique_id": root_span_id,
|
|
496
|
-
"span_name": root_span_name,
|
|
497
|
-
"span_workflow_name": workflow_name,
|
|
498
|
-
"model": model,
|
|
499
|
-
"provider_id": "",
|
|
500
|
-
"span_path": "",
|
|
501
|
-
"input": json.dumps(prompt_messages) if prompt_messages else "",
|
|
502
|
-
"output": json.dumps(completion_message) if completion_message else "",
|
|
503
|
-
"timestamp": end_time_str,
|
|
504
|
-
"start_time": start_time_str,
|
|
505
|
-
"metadata": metadata,
|
|
506
|
-
}
|
|
507
|
-
if latency is not None:
|
|
508
|
-
root_span["latency"] = latency
|
|
509
|
-
spans.append(root_span)
|
|
510
|
-
|
|
511
|
-
# ------------------------------------------------------------------
|
|
512
|
-
# LLM generation child span
|
|
513
|
-
# ------------------------------------------------------------------
|
|
514
|
-
gen_span_id = f"codexcli_{trace_unique_id}_gen"
|
|
515
|
-
gen_span: Dict[str, Any] = {
|
|
516
|
-
"trace_unique_id": trace_unique_id,
|
|
517
|
-
"span_unique_id": gen_span_id,
|
|
518
|
-
"span_parent_id": root_span_id,
|
|
519
|
-
"span_name": "openai.chat",
|
|
520
|
-
"span_workflow_name": workflow_name,
|
|
521
|
-
"span_path": "openai_chat",
|
|
522
|
-
"model": model,
|
|
523
|
-
"provider_id": "openai",
|
|
524
|
-
"metadata": {},
|
|
525
|
-
"input": json.dumps(prompt_messages) if prompt_messages else "",
|
|
526
|
-
"output": json.dumps(completion_message) if completion_message else "",
|
|
527
|
-
"prompt_messages": prompt_messages,
|
|
528
|
-
"completion_message": completion_message,
|
|
529
|
-
"timestamp": end_time_str,
|
|
530
|
-
"start_time": start_time_str,
|
|
531
|
-
}
|
|
532
|
-
if latency is not None:
|
|
533
|
-
gen_span["latency"] = latency
|
|
534
|
-
gen_span.update(usage_fields)
|
|
535
|
-
spans.append(gen_span)
|
|
536
|
-
|
|
537
|
-
# ------------------------------------------------------------------
|
|
538
|
-
# Reasoning child span (if reasoning_output_tokens > 0)
|
|
539
|
-
# ------------------------------------------------------------------
|
|
540
|
-
reasoning_tokens = token_usage.get("reasoning_output_tokens", 0)
|
|
541
|
-
if has_reasoning or reasoning_tokens > 0:
|
|
542
|
-
spans.append({
|
|
543
|
-
"trace_unique_id": trace_unique_id,
|
|
544
|
-
"span_unique_id": f"codexcli_{trace_unique_id}_reasoning",
|
|
545
|
-
"span_parent_id": root_span_id,
|
|
546
|
-
"span_name": "Reasoning",
|
|
547
|
-
"span_workflow_name": workflow_name,
|
|
548
|
-
"span_path": "reasoning",
|
|
549
|
-
"provider_id": "",
|
|
550
|
-
"metadata": {"reasoning_tokens": reasoning_tokens} if reasoning_tokens > 0 else {},
|
|
551
|
-
"input": "",
|
|
552
|
-
"output": f"[Reasoning: {reasoning_tokens} tokens]" if reasoning_tokens > 0 else "[Reasoning]",
|
|
553
|
-
"timestamp": end_time_str,
|
|
554
|
-
"start_time": start_time_str,
|
|
555
|
-
})
|
|
556
|
-
|
|
557
|
-
# ------------------------------------------------------------------
|
|
558
|
-
# Tool child spans
|
|
559
|
-
# ------------------------------------------------------------------
|
|
560
|
-
# Build output lookup by call_id
|
|
561
|
-
output_map: Dict[str, Dict[str, Any]] = {}
|
|
562
|
-
for to in tool_outputs:
|
|
563
|
-
call_id = to.get("call_id", "")
|
|
564
|
-
if call_id:
|
|
565
|
-
output_map[call_id] = to
|
|
566
|
-
|
|
567
|
-
tool_num = 0
|
|
568
|
-
for tc in tool_calls:
|
|
569
|
-
tool_num += 1
|
|
570
|
-
tool_name = tc.get("name", "unknown")
|
|
571
|
-
call_id = tc.get("call_id", "")
|
|
572
|
-
arguments = tc.get("arguments", "")
|
|
573
|
-
tool_ts = tc.get("timestamp", start_time_str)
|
|
574
|
-
|
|
575
|
-
# Map Codex tool names to friendly display names
|
|
576
|
-
display_name = _tool_display_name(tool_name)
|
|
577
|
-
|
|
578
|
-
# Format input
|
|
579
|
-
tool_input = _format_tool_input(tool_name, arguments)
|
|
580
|
-
|
|
581
|
-
# Format output
|
|
582
|
-
tool_output_data = output_map.get(call_id, {})
|
|
583
|
-
tool_output = _format_tool_output(tool_output_data.get("output", ""))
|
|
584
|
-
tool_end = tool_output_data.get("timestamp", end_time_str)
|
|
585
|
-
|
|
586
|
-
# Calculate tool latency
|
|
587
|
-
tool_latency = None
|
|
588
|
-
tool_start_dt = parse_timestamp(tool_ts)
|
|
589
|
-
tool_end_dt = parse_timestamp(tool_end)
|
|
590
|
-
if tool_start_dt and tool_end_dt:
|
|
591
|
-
tool_latency = (tool_end_dt - tool_start_dt).total_seconds()
|
|
592
|
-
|
|
593
|
-
tool_span: Dict[str, Any] = {
|
|
594
|
-
"trace_unique_id": trace_unique_id,
|
|
595
|
-
"span_unique_id": f"codexcli_{trace_unique_id}_tool_{tool_num}",
|
|
596
|
-
"span_parent_id": root_span_id,
|
|
597
|
-
"span_name": f"Tool: {display_name}",
|
|
598
|
-
"span_workflow_name": workflow_name,
|
|
599
|
-
"span_path": f"tool_{tool_name.lower()}",
|
|
600
|
-
"provider_id": "",
|
|
601
|
-
"metadata": {},
|
|
602
|
-
"input": tool_input,
|
|
603
|
-
"output": tool_output,
|
|
604
|
-
"timestamp": tool_end,
|
|
605
|
-
"start_time": tool_ts,
|
|
606
|
-
}
|
|
607
|
-
if tool_latency is not None:
|
|
608
|
-
tool_span["latency"] = tool_latency
|
|
609
|
-
spans.append(tool_span)
|
|
610
|
-
|
|
611
|
-
# Add required Respan platform fields to every span.
|
|
612
|
-
respan_defaults = {
|
|
613
|
-
"warnings": "",
|
|
614
|
-
"encoding_format": "float",
|
|
615
|
-
"disable_fallback": False,
|
|
616
|
-
"respan_params": {
|
|
617
|
-
"has_webhook": False,
|
|
618
|
-
"environment": os.environ.get("RESPAN_ENVIRONMENT", "prod"),
|
|
619
|
-
},
|
|
620
|
-
"field_name": "data: ",
|
|
621
|
-
"delimiter": "\n\n",
|
|
622
|
-
"disable_log": False,
|
|
623
|
-
"request_breakdown": False,
|
|
624
|
-
}
|
|
625
|
-
for span in spans:
|
|
626
|
-
for key, value in respan_defaults.items():
|
|
627
|
-
if key not in span:
|
|
628
|
-
span[key] = value
|
|
629
|
-
|
|
630
|
-
return spans
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
def _tool_display_name(name: str) -> str:
|
|
634
|
-
"""Map Codex CLI tool names to display names."""
|
|
635
|
-
mapping = {
|
|
636
|
-
"exec_command": "Shell",
|
|
637
|
-
"apply_patch": "File Edit",
|
|
638
|
-
"web_search": "Web Search",
|
|
639
|
-
}
|
|
640
|
-
return mapping.get(name, name)
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
def _format_tool_input(tool_name: str, arguments: str) -> str:
|
|
644
|
-
"""Format tool input for display."""
|
|
645
|
-
if not arguments:
|
|
646
|
-
return ""
|
|
647
|
-
try:
|
|
648
|
-
args = json.loads(arguments) if isinstance(arguments, str) else arguments
|
|
649
|
-
except (json.JSONDecodeError, TypeError):
|
|
650
|
-
return truncate(str(arguments))
|
|
651
|
-
|
|
652
|
-
if tool_name == "exec_command" and isinstance(args, dict):
|
|
653
|
-
cmd = args.get("cmd", "")
|
|
654
|
-
workdir = args.get("workdir", "")
|
|
655
|
-
result = f"Command: {cmd}"
|
|
656
|
-
if workdir:
|
|
657
|
-
result = f"[{workdir}] {result}"
|
|
658
|
-
return truncate(result)
|
|
659
|
-
|
|
660
|
-
if tool_name == "apply_patch" and isinstance(arguments, str):
|
|
661
|
-
return truncate(arguments)
|
|
662
|
-
|
|
663
|
-
if isinstance(args, dict):
|
|
664
|
-
try:
|
|
665
|
-
return truncate(json.dumps(args, indent=2))
|
|
666
|
-
except (TypeError, ValueError):
|
|
667
|
-
pass
|
|
668
|
-
|
|
669
|
-
return truncate(str(arguments))
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
def _format_tool_output(output: str) -> str:
|
|
673
|
-
"""Format tool output for display."""
|
|
674
|
-
if not output:
|
|
675
|
-
return ""
|
|
676
|
-
# Try to parse JSON output (custom_tool_call_output wraps in JSON)
|
|
677
|
-
try:
|
|
678
|
-
parsed = json.loads(output)
|
|
679
|
-
if isinstance(parsed, dict) and "output" in parsed:
|
|
680
|
-
return truncate(parsed["output"])
|
|
681
|
-
except (json.JSONDecodeError, TypeError):
|
|
682
|
-
pass
|
|
683
|
-
return truncate(output)
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
def send_spans(
|
|
687
|
-
spans: List[Dict[str, Any]],
|
|
688
|
-
api_key: str,
|
|
689
|
-
base_url: str,
|
|
690
|
-
turn_num: int,
|
|
691
|
-
) -> None:
|
|
692
|
-
"""Send spans to Respan as a single batch."""
|
|
693
|
-
url = f"{base_url}/v1/traces/ingest"
|
|
694
|
-
headers = {"Authorization": f"Bearer {api_key}"}
|
|
695
|
-
|
|
696
|
-
span_names = [s.get("span_name", "?") for s in spans]
|
|
697
|
-
payload_json = json.dumps(spans)
|
|
698
|
-
payload_size = len(payload_json)
|
|
699
|
-
debug(f"Sending {len(spans)} spans ({payload_size} bytes) for turn {turn_num}: {span_names}")
|
|
700
|
-
if DEBUG:
|
|
701
|
-
debug_file = LOG_FILE.parent / f"respan_codex_spans_turn_{turn_num}.json"
|
|
702
|
-
debug_file.write_text(payload_json, encoding="utf-8")
|
|
703
|
-
debug(f"Dumped spans to {debug_file}")
|
|
704
|
-
|
|
705
|
-
for attempt in range(3):
|
|
706
|
-
try:
|
|
707
|
-
response = requests.post(url, json=spans, headers=headers, timeout=30)
|
|
708
|
-
if response.status_code < 400:
|
|
709
|
-
resp_text = response.text[:300] if response.text else ""
|
|
710
|
-
debug(f"Sent {len(spans)} spans for turn {turn_num} "
|
|
711
|
-
f"(attempt {attempt + 1}): {resp_text}")
|
|
712
|
-
return
|
|
713
|
-
if response.status_code < 500:
|
|
714
|
-
log("ERROR", f"Spans rejected for turn {turn_num}: "
|
|
715
|
-
f"HTTP {response.status_code} - {response.text[:200]}")
|
|
716
|
-
return
|
|
717
|
-
debug(f"Server error for turn {turn_num} "
|
|
718
|
-
f"(attempt {attempt + 1}), retrying...")
|
|
719
|
-
time.sleep(1.0)
|
|
720
|
-
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
|
|
721
|
-
time.sleep(1.0)
|
|
722
|
-
except Exception as e:
|
|
723
|
-
log("ERROR", f"Failed to send spans for turn {turn_num}: {e}")
|
|
724
|
-
return
|
|
725
|
-
|
|
726
|
-
log("ERROR", f"Failed to send {len(spans)} spans for turn {turn_num} after 3 attempts")
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
def process_session(
|
|
730
|
-
session_id: str,
|
|
731
|
-
session_file: Path,
|
|
732
|
-
state: Dict[str, Any],
|
|
733
|
-
api_key: str,
|
|
734
|
-
base_url: str,
|
|
735
|
-
config: Optional[Dict[str, Any]] = None,
|
|
736
|
-
) -> int:
|
|
737
|
-
"""Process a session JSONL file and create traces for new turns."""
|
|
738
|
-
session_state = state.get(session_id, {})
|
|
739
|
-
last_turn_count = session_state.get("turn_count", 0)
|
|
740
|
-
|
|
741
|
-
# Read and parse the full session file
|
|
742
|
-
lines = session_file.read_text(encoding="utf-8").strip().split("\n")
|
|
743
|
-
events = parse_session(lines)
|
|
744
|
-
|
|
745
|
-
if not events:
|
|
746
|
-
debug("No events in session file")
|
|
747
|
-
return 0
|
|
748
|
-
|
|
749
|
-
# Extract all turns from the session
|
|
750
|
-
all_turns = extract_turns(events)
|
|
751
|
-
total_turns = len(all_turns)
|
|
752
|
-
|
|
753
|
-
if total_turns <= last_turn_count:
|
|
754
|
-
debug(f"No new turns (total: {total_turns}, processed: {last_turn_count})")
|
|
755
|
-
return 0
|
|
756
|
-
|
|
757
|
-
# Process only new turns
|
|
758
|
-
new_turns = all_turns[last_turn_count:]
|
|
759
|
-
turns_processed = 0
|
|
760
|
-
|
|
761
|
-
for turn in new_turns:
|
|
762
|
-
turns_processed += 1
|
|
763
|
-
turn_num = last_turn_count + turns_processed
|
|
764
|
-
spans = create_respan_spans(session_id, turn_num, turn, config=config)
|
|
765
|
-
send_spans(spans, api_key, base_url, turn_num)
|
|
766
|
-
|
|
767
|
-
# Update state
|
|
768
|
-
state[session_id] = {
|
|
769
|
-
"turn_count": last_turn_count + turns_processed,
|
|
770
|
-
"updated": datetime.now(timezone.utc).isoformat(),
|
|
771
|
-
}
|
|
772
|
-
save_state(state)
|
|
773
|
-
|
|
774
|
-
return turns_processed
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
def resolve_credentials() -> Tuple[Optional[str], str]:
|
|
778
|
-
"""Resolve API key and base URL from env vars or credentials file.
|
|
779
|
-
|
|
780
|
-
Returns (api_key, base_url).
|
|
781
|
-
"""
|
|
782
|
-
api_key = os.getenv("RESPAN_API_KEY")
|
|
783
|
-
base_url = os.getenv("RESPAN_BASE_URL", "https://api.respan.ai/api")
|
|
784
|
-
|
|
785
|
-
if not api_key:
|
|
786
|
-
creds_file = Path.home() / ".respan" / "credentials.json"
|
|
787
|
-
if creds_file.exists():
|
|
788
|
-
try:
|
|
789
|
-
creds = json.loads(creds_file.read_text(encoding="utf-8"))
|
|
790
|
-
config_file = Path.home() / ".respan" / "config.json"
|
|
791
|
-
profile = "default"
|
|
792
|
-
if config_file.exists():
|
|
793
|
-
cfg = json.loads(config_file.read_text(encoding="utf-8"))
|
|
794
|
-
profile = cfg.get("activeProfile", "default")
|
|
795
|
-
cred = creds.get(profile, {})
|
|
796
|
-
api_key = cred.get("apiKey") or cred.get("accessToken")
|
|
797
|
-
if not base_url or base_url == "https://api.respan.ai/api":
|
|
798
|
-
base_url = cred.get("baseUrl", base_url)
|
|
799
|
-
if base_url and not base_url.rstrip("/").endswith("/api"):
|
|
800
|
-
base_url = base_url.rstrip("/") + "/api"
|
|
801
|
-
if api_key:
|
|
802
|
-
debug(f"Using API key from credentials.json (profile: {profile})")
|
|
803
|
-
except (json.JSONDecodeError, IOError) as e:
|
|
804
|
-
debug(f"Failed to read credentials.json: {e}")
|
|
805
|
-
|
|
806
|
-
return api_key, base_url
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
def main():
|
|
810
|
-
script_start = datetime.now()
|
|
811
|
-
debug("Codex hook started")
|
|
812
|
-
|
|
813
|
-
# Parse notify payload from sys.argv[1]
|
|
814
|
-
if len(sys.argv) < 2:
|
|
815
|
-
debug("No argument provided (expected JSON payload in sys.argv[1])")
|
|
816
|
-
sys.exit(0)
|
|
817
|
-
|
|
818
|
-
try:
|
|
819
|
-
payload = json.loads(sys.argv[1])
|
|
820
|
-
except (json.JSONDecodeError, TypeError) as e:
|
|
821
|
-
debug(f"Invalid JSON in sys.argv[1]: {e}")
|
|
822
|
-
sys.exit(0)
|
|
823
|
-
|
|
824
|
-
# Only process agent-turn-complete events
|
|
825
|
-
event_type = payload.get("type", "")
|
|
826
|
-
if event_type != "agent-turn-complete":
|
|
827
|
-
debug(f"Ignoring event type: {event_type}")
|
|
828
|
-
sys.exit(0)
|
|
829
|
-
|
|
830
|
-
# Extract session info from the payload
|
|
831
|
-
session_id = payload.get("thread-id", "")
|
|
832
|
-
if not session_id:
|
|
833
|
-
debug("No thread-id in notify payload")
|
|
834
|
-
sys.exit(0)
|
|
835
|
-
|
|
836
|
-
debug(f"Processing notify: type={event_type}, session={session_id}")
|
|
837
|
-
|
|
838
|
-
# Resolve credentials
|
|
839
|
-
api_key, base_url = resolve_credentials()
|
|
840
|
-
if not api_key:
|
|
841
|
-
log("ERROR", "No API key found. Run: respan auth login")
|
|
842
|
-
sys.exit(0)
|
|
843
|
-
|
|
844
|
-
# Find the session file
|
|
845
|
-
session_file = find_session_file(session_id)
|
|
846
|
-
if not session_file:
|
|
847
|
-
# Fall back to latest session file
|
|
848
|
-
result = find_latest_session_file()
|
|
849
|
-
if result:
|
|
850
|
-
session_id, session_file = result
|
|
851
|
-
else:
|
|
852
|
-
debug("No session file found")
|
|
853
|
-
sys.exit(0)
|
|
854
|
-
|
|
855
|
-
# Load respan.json config from the project directory
|
|
856
|
-
config: Dict[str, Any] = {"fields": {}, "properties": {}}
|
|
857
|
-
cwd = payload.get("cwd", "")
|
|
858
|
-
if cwd:
|
|
859
|
-
config = load_respan_config(cwd)
|
|
860
|
-
debug(f"Loaded respan.json config from {cwd}: {config}")
|
|
861
|
-
|
|
862
|
-
# Process the session with retry logic
|
|
863
|
-
max_attempts = 3
|
|
864
|
-
turns = 0
|
|
865
|
-
try:
|
|
866
|
-
for attempt in range(max_attempts):
|
|
867
|
-
with state_lock():
|
|
868
|
-
state = load_state()
|
|
869
|
-
turns = process_session(
|
|
870
|
-
session_id, session_file, state, api_key, base_url, config=config
|
|
871
|
-
)
|
|
872
|
-
|
|
873
|
-
if turns > 0:
|
|
874
|
-
break
|
|
875
|
-
|
|
876
|
-
if attempt < max_attempts - 1:
|
|
877
|
-
delay = 0.5 * (attempt + 1)
|
|
878
|
-
debug(f"No turns processed (attempt {attempt + 1}/{max_attempts}), "
|
|
879
|
-
f"retrying in {delay}s...")
|
|
880
|
-
time.sleep(delay)
|
|
881
|
-
|
|
882
|
-
duration = (datetime.now() - script_start).total_seconds()
|
|
883
|
-
log("INFO", f"Processed {turns} turns in {duration:.1f}s")
|
|
884
|
-
|
|
885
|
-
if duration > 180:
|
|
886
|
-
log("WARN", f"Hook took {duration:.1f}s (>3min), consider optimizing")
|
|
887
|
-
|
|
888
|
-
except Exception as e:
|
|
889
|
-
log("ERROR", f"Failed to process session: {e}")
|
|
890
|
-
import traceback
|
|
891
|
-
debug(traceback.format_exc())
|
|
892
|
-
|
|
893
|
-
sys.exit(0)
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
if __name__ == "__main__":
|
|
897
|
-
main()
|