sentienceapi 0.95.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentienceapi might be problematic. Click here for more details.
- sentience/__init__.py +253 -0
- sentience/_extension_loader.py +195 -0
- sentience/action_executor.py +215 -0
- sentience/actions.py +1020 -0
- sentience/agent.py +1181 -0
- sentience/agent_config.py +46 -0
- sentience/agent_runtime.py +424 -0
- sentience/asserts/__init__.py +70 -0
- sentience/asserts/expect.py +621 -0
- sentience/asserts/query.py +383 -0
- sentience/async_api.py +108 -0
- sentience/backends/__init__.py +137 -0
- sentience/backends/actions.py +343 -0
- sentience/backends/browser_use_adapter.py +241 -0
- sentience/backends/cdp_backend.py +393 -0
- sentience/backends/exceptions.py +211 -0
- sentience/backends/playwright_backend.py +194 -0
- sentience/backends/protocol.py +216 -0
- sentience/backends/sentience_context.py +469 -0
- sentience/backends/snapshot.py +427 -0
- sentience/base_agent.py +196 -0
- sentience/browser.py +1215 -0
- sentience/browser_evaluator.py +299 -0
- sentience/canonicalization.py +207 -0
- sentience/cli.py +130 -0
- sentience/cloud_tracing.py +807 -0
- sentience/constants.py +6 -0
- sentience/conversational_agent.py +543 -0
- sentience/element_filter.py +136 -0
- sentience/expect.py +188 -0
- sentience/extension/background.js +104 -0
- sentience/extension/content.js +161 -0
- sentience/extension/injected_api.js +914 -0
- sentience/extension/manifest.json +36 -0
- sentience/extension/pkg/sentience_core.d.ts +51 -0
- sentience/extension/pkg/sentience_core.js +323 -0
- sentience/extension/pkg/sentience_core_bg.wasm +0 -0
- sentience/extension/pkg/sentience_core_bg.wasm.d.ts +10 -0
- sentience/extension/release.json +115 -0
- sentience/formatting.py +15 -0
- sentience/generator.py +202 -0
- sentience/inspector.py +367 -0
- sentience/llm_interaction_handler.py +191 -0
- sentience/llm_provider.py +875 -0
- sentience/llm_provider_utils.py +120 -0
- sentience/llm_response_builder.py +153 -0
- sentience/models.py +846 -0
- sentience/ordinal.py +280 -0
- sentience/overlay.py +222 -0
- sentience/protocols.py +228 -0
- sentience/query.py +303 -0
- sentience/read.py +188 -0
- sentience/recorder.py +589 -0
- sentience/schemas/trace_v1.json +335 -0
- sentience/screenshot.py +100 -0
- sentience/sentience_methods.py +86 -0
- sentience/snapshot.py +706 -0
- sentience/snapshot_diff.py +126 -0
- sentience/text_search.py +262 -0
- sentience/trace_event_builder.py +148 -0
- sentience/trace_file_manager.py +197 -0
- sentience/trace_indexing/__init__.py +27 -0
- sentience/trace_indexing/index_schema.py +199 -0
- sentience/trace_indexing/indexer.py +414 -0
- sentience/tracer_factory.py +322 -0
- sentience/tracing.py +449 -0
- sentience/utils/__init__.py +40 -0
- sentience/utils/browser.py +46 -0
- sentience/utils/element.py +257 -0
- sentience/utils/formatting.py +59 -0
- sentience/utils.py +296 -0
- sentience/verification.py +380 -0
- sentience/visual_agent.py +2058 -0
- sentience/wait.py +139 -0
- sentienceapi-0.95.0.dist-info/METADATA +984 -0
- sentienceapi-0.95.0.dist-info/RECORD +82 -0
- sentienceapi-0.95.0.dist-info/WHEEL +5 -0
- sentienceapi-0.95.0.dist-info/entry_points.txt +2 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE +24 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE-APACHE +201 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE-MIT +21 -0
- sentienceapi-0.95.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,414 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Trace indexing for fast timeline rendering and step drill-down.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import hashlib
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Optional
|
|
11
|
+
|
|
12
|
+
from ..canonicalization import canonicalize_element
|
|
13
|
+
from .index_schema import (
|
|
14
|
+
ActionInfo,
|
|
15
|
+
SnapshotInfo,
|
|
16
|
+
StepCounters,
|
|
17
|
+
StepIndex,
|
|
18
|
+
TraceFileInfo,
|
|
19
|
+
TraceIndex,
|
|
20
|
+
TraceSummary,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _compute_snapshot_digest(snapshot_data: dict[str, Any]) -> str:
|
|
25
|
+
"""
|
|
26
|
+
Compute stable digest of snapshot for diffing.
|
|
27
|
+
|
|
28
|
+
Includes: url, viewport, canonicalized elements (id, role, text_norm, bbox_rounded).
|
|
29
|
+
Excludes: importance, style fields, transient attributes.
|
|
30
|
+
"""
|
|
31
|
+
url = snapshot_data.get("url", "")
|
|
32
|
+
viewport = snapshot_data.get("viewport", {})
|
|
33
|
+
elements = snapshot_data.get("elements", [])
|
|
34
|
+
|
|
35
|
+
# Canonicalize elements using shared helper
|
|
36
|
+
canonical_elements = [canonicalize_element(elem) for elem in elements]
|
|
37
|
+
|
|
38
|
+
# Sort by element id for determinism
|
|
39
|
+
canonical_elements.sort(key=lambda e: e.get("id", 0))
|
|
40
|
+
|
|
41
|
+
# Build canonical object
|
|
42
|
+
canonical = {
|
|
43
|
+
"url": url,
|
|
44
|
+
"viewport": {
|
|
45
|
+
"width": viewport.get("width", 0),
|
|
46
|
+
"height": viewport.get("height", 0),
|
|
47
|
+
},
|
|
48
|
+
"elements": canonical_elements,
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
# Hash
|
|
52
|
+
canonical_json = json.dumps(canonical, sort_keys=True, separators=(",", ":"))
|
|
53
|
+
digest = hashlib.sha256(canonical_json.encode("utf-8")).hexdigest()
|
|
54
|
+
return f"sha256:{digest}"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _compute_action_digest(action_data: dict[str, Any]) -> str:
|
|
58
|
+
"""
|
|
59
|
+
Compute digest of action args for privacy + determinism.
|
|
60
|
+
|
|
61
|
+
For TYPE: includes text_len + text_sha256 (not raw text)
|
|
62
|
+
For CLICK/PRESS: includes only non-sensitive fields
|
|
63
|
+
"""
|
|
64
|
+
action_type = action_data.get("type", "")
|
|
65
|
+
target_id = action_data.get("target_element_id")
|
|
66
|
+
|
|
67
|
+
canonical = {
|
|
68
|
+
"type": action_type,
|
|
69
|
+
"target_element_id": target_id,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
# Type-specific canonicalization
|
|
73
|
+
if action_type == "TYPE":
|
|
74
|
+
text = action_data.get("text", "")
|
|
75
|
+
canonical["text_len"] = len(text)
|
|
76
|
+
canonical["text_sha256"] = hashlib.sha256(text.encode("utf-8")).hexdigest()
|
|
77
|
+
elif action_type == "PRESS":
|
|
78
|
+
canonical["key"] = action_data.get("key", "")
|
|
79
|
+
# CLICK has no extra args
|
|
80
|
+
|
|
81
|
+
# Hash
|
|
82
|
+
canonical_json = json.dumps(canonical, sort_keys=True, separators=(",", ":"))
|
|
83
|
+
digest = hashlib.sha256(canonical_json.encode("utf-8")).hexdigest()
|
|
84
|
+
return f"sha256:{digest}"
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _compute_file_sha256(file_path: str) -> str:
|
|
88
|
+
"""Compute SHA256 hash of entire file."""
|
|
89
|
+
sha256 = hashlib.sha256()
|
|
90
|
+
with open(file_path, "rb") as f:
|
|
91
|
+
while chunk := f.read(8192):
|
|
92
|
+
sha256.update(chunk)
|
|
93
|
+
return sha256.hexdigest()
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def build_trace_index(trace_path: str) -> TraceIndex:
|
|
97
|
+
"""
|
|
98
|
+
Build trace index from JSONL file in single streaming pass.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
trace_path: Path to trace JSONL file
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
Complete TraceIndex object
|
|
105
|
+
"""
|
|
106
|
+
trace_path_obj = Path(trace_path)
|
|
107
|
+
if not trace_path_obj.exists():
|
|
108
|
+
raise FileNotFoundError(f"Trace file not found: {trace_path}")
|
|
109
|
+
|
|
110
|
+
# Extract run_id from filename
|
|
111
|
+
run_id = trace_path_obj.stem
|
|
112
|
+
|
|
113
|
+
# Initialize summary
|
|
114
|
+
first_ts = ""
|
|
115
|
+
last_ts = ""
|
|
116
|
+
event_count = 0
|
|
117
|
+
error_count = 0
|
|
118
|
+
final_url = None
|
|
119
|
+
run_end_status = None # Track status from run_end event
|
|
120
|
+
agent_name = None # Extract from run_start event
|
|
121
|
+
line_count = 0 # Track total line count
|
|
122
|
+
|
|
123
|
+
steps_by_id: dict[str, StepIndex] = {}
|
|
124
|
+
step_order: list[str] = [] # Track order of first appearance
|
|
125
|
+
|
|
126
|
+
# Stream through file, tracking byte offsets and line numbers
|
|
127
|
+
with open(trace_path, "rb") as f:
|
|
128
|
+
byte_offset = 0
|
|
129
|
+
line_number = 0 # Track line number for each event
|
|
130
|
+
|
|
131
|
+
for line_bytes in f:
|
|
132
|
+
line_number += 1
|
|
133
|
+
line_count += 1
|
|
134
|
+
line_len = len(line_bytes)
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
event = json.loads(line_bytes.decode("utf-8"))
|
|
138
|
+
except json.JSONDecodeError:
|
|
139
|
+
# Skip malformed lines
|
|
140
|
+
byte_offset += line_len
|
|
141
|
+
continue
|
|
142
|
+
|
|
143
|
+
# Extract event metadata
|
|
144
|
+
event_type = event.get("type", "")
|
|
145
|
+
ts = event.get("ts") or event.get("timestamp", "")
|
|
146
|
+
step_id = event.get("step_id", "step-0") # Default synthetic step
|
|
147
|
+
data = event.get("data", {})
|
|
148
|
+
|
|
149
|
+
# Update summary
|
|
150
|
+
event_count += 1
|
|
151
|
+
if not first_ts:
|
|
152
|
+
first_ts = ts
|
|
153
|
+
last_ts = ts
|
|
154
|
+
|
|
155
|
+
if event_type == "error":
|
|
156
|
+
error_count += 1
|
|
157
|
+
|
|
158
|
+
# Extract agent_name from run_start event
|
|
159
|
+
if event_type == "run_start":
|
|
160
|
+
agent_name = data.get("agent")
|
|
161
|
+
|
|
162
|
+
# Initialize step if first time seeing this step_id
|
|
163
|
+
if step_id not in steps_by_id:
|
|
164
|
+
step_order.append(step_id)
|
|
165
|
+
steps_by_id[step_id] = StepIndex(
|
|
166
|
+
step_index=len(step_order),
|
|
167
|
+
step_id=step_id,
|
|
168
|
+
goal=None,
|
|
169
|
+
status="failure", # Default to failure (will be updated by step_end event)
|
|
170
|
+
ts_start=ts,
|
|
171
|
+
ts_end=ts,
|
|
172
|
+
offset_start=byte_offset,
|
|
173
|
+
offset_end=byte_offset + line_len,
|
|
174
|
+
line_number=line_number, # Track line number
|
|
175
|
+
url_before=None,
|
|
176
|
+
url_after=None,
|
|
177
|
+
snapshot_before=SnapshotInfo(),
|
|
178
|
+
snapshot_after=SnapshotInfo(),
|
|
179
|
+
action=ActionInfo(),
|
|
180
|
+
counters=StepCounters(),
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
step = steps_by_id[step_id]
|
|
184
|
+
|
|
185
|
+
# Update step metadata
|
|
186
|
+
step.ts_end = ts
|
|
187
|
+
step.offset_end = byte_offset + line_len
|
|
188
|
+
step.line_number = line_number # Update line number on each event
|
|
189
|
+
step.counters.events += 1
|
|
190
|
+
|
|
191
|
+
# Handle specific event types
|
|
192
|
+
if event_type == "step_start":
|
|
193
|
+
step.goal = data.get("goal")
|
|
194
|
+
step.url_before = data.get("pre_url")
|
|
195
|
+
|
|
196
|
+
elif event_type == "snapshot" or event_type == "snapshot_taken":
|
|
197
|
+
# Handle both "snapshot" (current) and "snapshot_taken" (schema) for backward compatibility
|
|
198
|
+
snapshot_id = data.get("snapshot_id")
|
|
199
|
+
url = data.get("url")
|
|
200
|
+
digest = _compute_snapshot_digest(data)
|
|
201
|
+
|
|
202
|
+
# First snapshot = before, last snapshot = after
|
|
203
|
+
if step.snapshot_before.snapshot_id is None:
|
|
204
|
+
step.snapshot_before = SnapshotInfo(
|
|
205
|
+
snapshot_id=snapshot_id, digest=digest, url=url
|
|
206
|
+
)
|
|
207
|
+
step.url_before = step.url_before or url
|
|
208
|
+
|
|
209
|
+
step.snapshot_after = SnapshotInfo(snapshot_id=snapshot_id, digest=digest, url=url)
|
|
210
|
+
step.url_after = url
|
|
211
|
+
step.counters.snapshots += 1
|
|
212
|
+
final_url = url
|
|
213
|
+
|
|
214
|
+
elif event_type == "action" or event_type == "action_executed":
|
|
215
|
+
# Handle both "action" (current) and "action_executed" (schema) for backward compatibility
|
|
216
|
+
step.action = ActionInfo(
|
|
217
|
+
type=data.get("type"),
|
|
218
|
+
target_element_id=data.get("target_element_id"),
|
|
219
|
+
args_digest=_compute_action_digest(data),
|
|
220
|
+
success=data.get("success", True),
|
|
221
|
+
)
|
|
222
|
+
step.counters.actions += 1
|
|
223
|
+
|
|
224
|
+
elif event_type == "llm_response" or event_type == "llm_called":
|
|
225
|
+
# Handle both "llm_response" (current) and "llm_called" (schema) for backward compatibility
|
|
226
|
+
step.counters.llm_calls += 1
|
|
227
|
+
|
|
228
|
+
elif event_type == "error":
|
|
229
|
+
step.status = "failure"
|
|
230
|
+
|
|
231
|
+
elif event_type == "step_end":
|
|
232
|
+
# Determine status from step_end event data
|
|
233
|
+
# Frontend expects: success, failure, or partial
|
|
234
|
+
# Logic: success = exec.success && verify.passed
|
|
235
|
+
# partial = exec.success && !verify.passed
|
|
236
|
+
# failure = !exec.success
|
|
237
|
+
exec_data = data.get("exec", {})
|
|
238
|
+
verify_data = data.get("verify", {})
|
|
239
|
+
|
|
240
|
+
exec_success = exec_data.get("success", False)
|
|
241
|
+
verify_passed = verify_data.get("passed", False)
|
|
242
|
+
|
|
243
|
+
if exec_success and verify_passed:
|
|
244
|
+
step.status = "success"
|
|
245
|
+
elif exec_success and not verify_passed:
|
|
246
|
+
step.status = "partial"
|
|
247
|
+
elif not exec_success:
|
|
248
|
+
step.status = "failure"
|
|
249
|
+
else:
|
|
250
|
+
# Fallback: if step_end exists but no exec/verify data, default to failure
|
|
251
|
+
step.status = "failure"
|
|
252
|
+
|
|
253
|
+
elif event_type == "run_end":
|
|
254
|
+
# Extract status from run_end event
|
|
255
|
+
run_end_status = data.get("status")
|
|
256
|
+
# Validate status value
|
|
257
|
+
if run_end_status not in ["success", "failure", "partial", "unknown"]:
|
|
258
|
+
run_end_status = None
|
|
259
|
+
|
|
260
|
+
byte_offset += line_len
|
|
261
|
+
|
|
262
|
+
# Use run_end status if available, otherwise infer from step statuses
|
|
263
|
+
if run_end_status is None:
|
|
264
|
+
step_statuses = [step.status for step in steps_by_id.values()]
|
|
265
|
+
if step_statuses:
|
|
266
|
+
# Infer overall status from step statuses
|
|
267
|
+
if all(s == "success" for s in step_statuses):
|
|
268
|
+
run_end_status = "success"
|
|
269
|
+
elif any(s == "failure" for s in step_statuses):
|
|
270
|
+
# If any failure and no successes, it's failure; otherwise partial
|
|
271
|
+
if any(s == "success" for s in step_statuses):
|
|
272
|
+
run_end_status = "partial"
|
|
273
|
+
else:
|
|
274
|
+
run_end_status = "failure"
|
|
275
|
+
elif any(s == "partial" for s in step_statuses):
|
|
276
|
+
run_end_status = "partial"
|
|
277
|
+
else:
|
|
278
|
+
run_end_status = "failure" # Default to failure instead of unknown
|
|
279
|
+
else:
|
|
280
|
+
run_end_status = "failure" # Default to failure instead of unknown
|
|
281
|
+
|
|
282
|
+
# Calculate duration
|
|
283
|
+
duration_ms = None
|
|
284
|
+
if first_ts and last_ts:
|
|
285
|
+
try:
|
|
286
|
+
start = datetime.fromisoformat(first_ts.replace("Z", "+00:00"))
|
|
287
|
+
end = datetime.fromisoformat(last_ts.replace("Z", "+00:00"))
|
|
288
|
+
duration_ms = int((end - start).total_seconds() * 1000)
|
|
289
|
+
except (ValueError, AttributeError):
|
|
290
|
+
duration_ms = None
|
|
291
|
+
|
|
292
|
+
# Aggregate counters
|
|
293
|
+
snapshot_count = sum(step.counters.snapshots for step in steps_by_id.values())
|
|
294
|
+
action_count = sum(step.counters.actions for step in steps_by_id.values())
|
|
295
|
+
counters = {
|
|
296
|
+
"snapshot_count": snapshot_count,
|
|
297
|
+
"action_count": action_count,
|
|
298
|
+
"error_count": error_count,
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
# Build summary
|
|
302
|
+
summary = TraceSummary(
|
|
303
|
+
first_ts=first_ts,
|
|
304
|
+
last_ts=last_ts,
|
|
305
|
+
event_count=event_count,
|
|
306
|
+
step_count=len(steps_by_id),
|
|
307
|
+
error_count=error_count,
|
|
308
|
+
final_url=final_url,
|
|
309
|
+
status=run_end_status,
|
|
310
|
+
agent_name=agent_name,
|
|
311
|
+
duration_ms=duration_ms,
|
|
312
|
+
counters=counters,
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
# Build steps list in order
|
|
316
|
+
steps_list = [steps_by_id[sid] for sid in step_order]
|
|
317
|
+
|
|
318
|
+
# Build trace file info
|
|
319
|
+
trace_file = TraceFileInfo(
|
|
320
|
+
path=str(trace_path),
|
|
321
|
+
size_bytes=os.path.getsize(trace_path),
|
|
322
|
+
sha256=_compute_file_sha256(str(trace_path)),
|
|
323
|
+
line_count=line_count,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
# Build final index
|
|
327
|
+
index = TraceIndex(
|
|
328
|
+
version=1,
|
|
329
|
+
run_id=run_id,
|
|
330
|
+
created_at=datetime.now(timezone.utc).isoformat(),
|
|
331
|
+
trace_file=trace_file,
|
|
332
|
+
summary=summary,
|
|
333
|
+
steps=steps_list,
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
return index
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def write_trace_index(
|
|
340
|
+
trace_path: str, index_path: str | None = None, frontend_format: bool = False
|
|
341
|
+
) -> str:
|
|
342
|
+
"""
|
|
343
|
+
Build index and write to file.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
trace_path: Path to trace JSONL file
|
|
347
|
+
index_path: Optional custom path for index file (default: trace_path with .index.json)
|
|
348
|
+
frontend_format: If True, write in frontend-compatible format (default: False)
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
Path to written index file
|
|
352
|
+
"""
|
|
353
|
+
if index_path is None:
|
|
354
|
+
index_path = str(Path(trace_path).with_suffix("")) + ".index.json"
|
|
355
|
+
|
|
356
|
+
index = build_trace_index(trace_path)
|
|
357
|
+
|
|
358
|
+
with open(index_path, "w", encoding="utf-8") as f:
|
|
359
|
+
if frontend_format:
|
|
360
|
+
json.dump(index.to_sentience_studio_dict(), f, indent=2)
|
|
361
|
+
else:
|
|
362
|
+
json.dump(index.to_dict(), f, indent=2)
|
|
363
|
+
|
|
364
|
+
return index_path
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
def read_step_events(trace_path: str, offset_start: int, offset_end: int) -> list[dict[str, Any]]:
|
|
368
|
+
"""
|
|
369
|
+
Read events for a specific step using byte offsets from index.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
trace_path: Path to trace JSONL file
|
|
373
|
+
offset_start: Byte offset where step starts
|
|
374
|
+
offset_end: Byte offset where step ends
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
List of event dictionaries for the step
|
|
378
|
+
"""
|
|
379
|
+
events = []
|
|
380
|
+
|
|
381
|
+
with open(trace_path, "rb") as f:
|
|
382
|
+
f.seek(offset_start)
|
|
383
|
+
bytes_to_read = offset_end - offset_start
|
|
384
|
+
chunk = f.read(bytes_to_read)
|
|
385
|
+
|
|
386
|
+
# Parse lines
|
|
387
|
+
for line_bytes in chunk.split(b"\n"):
|
|
388
|
+
if not line_bytes:
|
|
389
|
+
continue
|
|
390
|
+
try:
|
|
391
|
+
event = json.loads(line_bytes.decode("utf-8"))
|
|
392
|
+
events.append(event)
|
|
393
|
+
except json.JSONDecodeError:
|
|
394
|
+
continue
|
|
395
|
+
|
|
396
|
+
return events
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
# CLI entrypoint
|
|
400
|
+
def main():
|
|
401
|
+
"""CLI tool for building trace index."""
|
|
402
|
+
import sys
|
|
403
|
+
|
|
404
|
+
if len(sys.argv) < 2:
|
|
405
|
+
print("Usage: python -m sentience.tracing.indexer <trace.jsonl>")
|
|
406
|
+
sys.exit(1)
|
|
407
|
+
|
|
408
|
+
trace_path = sys.argv[1]
|
|
409
|
+
index_path = write_trace_index(trace_path)
|
|
410
|
+
print(f"✅ Index written to: {index_path}")
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
if __name__ == "__main__":
|
|
414
|
+
main()
|