sentienceapi 0.90.16__py3-none-any.whl → 0.92.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentienceapi might be problematic. Click here for more details.
- sentience/__init__.py +14 -5
- sentience/action_executor.py +215 -0
- sentience/actions.py +408 -25
- sentience/agent.py +802 -293
- sentience/agent_config.py +3 -0
- sentience/async_api.py +83 -1142
- sentience/base_agent.py +95 -0
- sentience/browser.py +484 -1
- sentience/browser_evaluator.py +299 -0
- sentience/cloud_tracing.py +457 -33
- sentience/conversational_agent.py +77 -43
- sentience/element_filter.py +136 -0
- sentience/expect.py +98 -2
- sentience/extension/background.js +56 -185
- sentience/extension/content.js +117 -289
- sentience/extension/injected_api.js +799 -1374
- sentience/extension/manifest.json +1 -1
- sentience/extension/pkg/sentience_core.js +190 -396
- sentience/extension/pkg/sentience_core_bg.wasm +0 -0
- sentience/extension/release.json +47 -47
- sentience/formatting.py +9 -53
- sentience/inspector.py +183 -1
- sentience/llm_interaction_handler.py +191 -0
- sentience/llm_provider.py +74 -52
- sentience/llm_provider_utils.py +120 -0
- sentience/llm_response_builder.py +153 -0
- sentience/models.py +60 -1
- sentience/overlay.py +109 -2
- sentience/protocols.py +228 -0
- sentience/query.py +1 -1
- sentience/read.py +95 -3
- sentience/recorder.py +223 -3
- sentience/schemas/trace_v1.json +102 -9
- sentience/screenshot.py +48 -2
- sentience/sentience_methods.py +86 -0
- sentience/snapshot.py +291 -38
- sentience/snapshot_diff.py +141 -0
- sentience/text_search.py +119 -5
- sentience/trace_event_builder.py +129 -0
- sentience/trace_file_manager.py +197 -0
- sentience/trace_indexing/index_schema.py +95 -7
- sentience/trace_indexing/indexer.py +117 -14
- sentience/tracer_factory.py +119 -6
- sentience/tracing.py +172 -8
- sentience/utils/__init__.py +40 -0
- sentience/utils/browser.py +46 -0
- sentience/utils/element.py +257 -0
- sentience/utils/formatting.py +59 -0
- sentience/utils.py +1 -1
- sentience/visual_agent.py +2056 -0
- sentience/wait.py +68 -2
- {sentienceapi-0.90.16.dist-info → sentienceapi-0.92.2.dist-info}/METADATA +2 -1
- sentienceapi-0.92.2.dist-info/RECORD +65 -0
- sentience/extension/test-content.js +0 -4
- sentienceapi-0.90.16.dist-info/RECORD +0 -50
- {sentienceapi-0.90.16.dist-info → sentienceapi-0.92.2.dist-info}/WHEEL +0 -0
- {sentienceapi-0.90.16.dist-info → sentienceapi-0.92.2.dist-info}/entry_points.txt +0 -0
- {sentienceapi-0.90.16.dist-info → sentienceapi-0.92.2.dist-info}/licenses/LICENSE +0 -0
- {sentienceapi-0.90.16.dist-info → sentienceapi-0.92.2.dist-info}/licenses/LICENSE-APACHE +0 -0
- {sentienceapi-0.90.16.dist-info → sentienceapi-0.92.2.dist-info}/licenses/LICENSE-MIT +0 -0
- {sentienceapi-0.90.16.dist-info → sentienceapi-0.92.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Trace event building utilities for agent-based tracing.
|
|
3
|
+
|
|
4
|
+
This module provides centralized trace event building logic to reduce duplication
|
|
5
|
+
across agent implementations.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Any, Optional
|
|
9
|
+
|
|
10
|
+
from .models import AgentActionResult, Element, Snapshot
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TraceEventBuilder:
|
|
14
|
+
"""
|
|
15
|
+
Helper for building trace events with consistent structure.
|
|
16
|
+
|
|
17
|
+
Provides static methods for building common trace event types:
|
|
18
|
+
- snapshot_taken events
|
|
19
|
+
- step_end events
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
@staticmethod
|
|
23
|
+
def build_snapshot_event(
|
|
24
|
+
snapshot: Snapshot,
|
|
25
|
+
include_all_elements: bool = True,
|
|
26
|
+
) -> dict[str, Any]:
|
|
27
|
+
"""
|
|
28
|
+
Build snapshot_taken trace event data.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
snapshot: Snapshot to build event from
|
|
32
|
+
include_all_elements: If True, include all elements (for DOM tree display).
|
|
33
|
+
If False, use filtered elements only.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Dictionary with snapshot event data
|
|
37
|
+
"""
|
|
38
|
+
# Normalize importance values to importance_score (0-1 range) per snapshot
|
|
39
|
+
# Min-max normalization: (value - min) / (max - min)
|
|
40
|
+
importance_values = [el.importance for el in snapshot.elements]
|
|
41
|
+
|
|
42
|
+
if importance_values:
|
|
43
|
+
min_importance = min(importance_values)
|
|
44
|
+
max_importance = max(importance_values)
|
|
45
|
+
importance_range = max_importance - min_importance
|
|
46
|
+
else:
|
|
47
|
+
min_importance = 0
|
|
48
|
+
max_importance = 0
|
|
49
|
+
importance_range = 0
|
|
50
|
+
|
|
51
|
+
# Include ALL elements with full data for DOM tree display
|
|
52
|
+
# Add importance_score field normalized to [0, 1]
|
|
53
|
+
elements_data = []
|
|
54
|
+
for el in snapshot.elements:
|
|
55
|
+
el_dict = el.model_dump()
|
|
56
|
+
|
|
57
|
+
# Compute normalized importance_score
|
|
58
|
+
if importance_range > 0:
|
|
59
|
+
importance_score = (el.importance - min_importance) / importance_range
|
|
60
|
+
else:
|
|
61
|
+
# If all elements have same importance, set to 0.5
|
|
62
|
+
importance_score = 0.5
|
|
63
|
+
|
|
64
|
+
el_dict["importance_score"] = importance_score
|
|
65
|
+
elements_data.append(el_dict)
|
|
66
|
+
|
|
67
|
+
return {
|
|
68
|
+
"url": snapshot.url,
|
|
69
|
+
"element_count": len(snapshot.elements),
|
|
70
|
+
"timestamp": snapshot.timestamp,
|
|
71
|
+
"elements": elements_data, # Full element data for DOM tree
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
@staticmethod
|
|
75
|
+
def build_step_end_event(
|
|
76
|
+
step_id: str,
|
|
77
|
+
step_index: int,
|
|
78
|
+
goal: str,
|
|
79
|
+
attempt: int,
|
|
80
|
+
pre_url: str,
|
|
81
|
+
post_url: str,
|
|
82
|
+
snapshot_digest: str | None,
|
|
83
|
+
llm_data: dict[str, Any],
|
|
84
|
+
exec_data: dict[str, Any],
|
|
85
|
+
verify_data: dict[str, Any],
|
|
86
|
+
pre_elements: list[dict[str, Any]] | None = None,
|
|
87
|
+
) -> dict[str, Any]:
|
|
88
|
+
"""
|
|
89
|
+
Build step_end trace event data.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
step_id: Unique step identifier
|
|
93
|
+
step_index: Step index (0-based)
|
|
94
|
+
goal: User's goal for this step
|
|
95
|
+
attempt: Attempt number (0-based)
|
|
96
|
+
pre_url: URL before action execution
|
|
97
|
+
post_url: URL after action execution
|
|
98
|
+
snapshot_digest: Digest of snapshot before action
|
|
99
|
+
llm_data: LLM interaction data
|
|
100
|
+
exec_data: Action execution data
|
|
101
|
+
verify_data: Verification data
|
|
102
|
+
pre_elements: Optional list of elements from pre-snapshot (with diff_status)
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Dictionary with step_end event data
|
|
106
|
+
"""
|
|
107
|
+
pre_data: dict[str, Any] = {
|
|
108
|
+
"url": pre_url,
|
|
109
|
+
"snapshot_digest": snapshot_digest,
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
# Add elements to pre field if provided (for diff overlay support)
|
|
113
|
+
if pre_elements is not None:
|
|
114
|
+
pre_data["elements"] = pre_elements
|
|
115
|
+
|
|
116
|
+
return {
|
|
117
|
+
"v": 1,
|
|
118
|
+
"step_id": step_id,
|
|
119
|
+
"step_index": step_index,
|
|
120
|
+
"goal": goal,
|
|
121
|
+
"attempt": attempt,
|
|
122
|
+
"pre": pre_data,
|
|
123
|
+
"llm": llm_data,
|
|
124
|
+
"exec": exec_data,
|
|
125
|
+
"post": {
|
|
126
|
+
"url": post_url,
|
|
127
|
+
},
|
|
128
|
+
"verify": verify_data,
|
|
129
|
+
}
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Trace file management utilities for consistent file operations.
|
|
3
|
+
|
|
4
|
+
This module provides helper functions for common trace file operations
|
|
5
|
+
shared between JsonlTraceSink and CloudTraceSink.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from collections.abc import Callable
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any, Optional
|
|
12
|
+
|
|
13
|
+
from .models import TraceStats
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class TraceFileManager:
|
|
17
|
+
"""
|
|
18
|
+
Helper for common trace file operations.
|
|
19
|
+
|
|
20
|
+
Provides static methods for file operations shared across trace sinks.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
@staticmethod
|
|
24
|
+
def write_event(file_handle: Any, event: dict[str, Any]) -> None:
|
|
25
|
+
"""
|
|
26
|
+
Write a trace event to a file handle as JSONL.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
file_handle: Open file handle (must be writable)
|
|
30
|
+
event: Event dictionary to write
|
|
31
|
+
"""
|
|
32
|
+
json_str = json.dumps(event, ensure_ascii=False)
|
|
33
|
+
file_handle.write(json_str + "\n")
|
|
34
|
+
file_handle.flush() # Ensure written to disk
|
|
35
|
+
|
|
36
|
+
@staticmethod
|
|
37
|
+
def ensure_directory(path: Path) -> None:
|
|
38
|
+
"""
|
|
39
|
+
Ensure the parent directory of a path exists.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
path: File path whose parent directory should exist
|
|
43
|
+
"""
|
|
44
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
45
|
+
|
|
46
|
+
@staticmethod
|
|
47
|
+
def read_events(path: Path) -> list[dict[str, Any]]:
|
|
48
|
+
"""
|
|
49
|
+
Read all events from a JSONL trace file.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
path: Path to JSONL trace file
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
List of event dictionaries
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
FileNotFoundError: If file doesn't exist
|
|
59
|
+
json.JSONDecodeError: If file contains invalid JSON
|
|
60
|
+
"""
|
|
61
|
+
events = []
|
|
62
|
+
with open(path, encoding="utf-8") as f:
|
|
63
|
+
for line in f:
|
|
64
|
+
line = line.strip()
|
|
65
|
+
if not line:
|
|
66
|
+
continue
|
|
67
|
+
try:
|
|
68
|
+
event = json.loads(line)
|
|
69
|
+
events.append(event)
|
|
70
|
+
except json.JSONDecodeError:
|
|
71
|
+
# Skip invalid lines but continue reading
|
|
72
|
+
continue
|
|
73
|
+
return events
|
|
74
|
+
|
|
75
|
+
@staticmethod
|
|
76
|
+
def extract_stats(
|
|
77
|
+
events: list[dict[str, Any]],
|
|
78
|
+
infer_status_func: None | (
|
|
79
|
+
Callable[[list[dict[str, Any]], dict[str, Any] | None], str]
|
|
80
|
+
) = None,
|
|
81
|
+
) -> TraceStats:
|
|
82
|
+
"""
|
|
83
|
+
Extract execution statistics from trace events.
|
|
84
|
+
|
|
85
|
+
This is a common operation shared between JsonlTraceSink and CloudTraceSink.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
events: List of trace event dictionaries
|
|
89
|
+
infer_status_func: Optional function to infer final_status from events.
|
|
90
|
+
If None, uses default inference logic.
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
TraceStats with execution statistics
|
|
94
|
+
"""
|
|
95
|
+
if not events:
|
|
96
|
+
return TraceStats(
|
|
97
|
+
total_steps=0,
|
|
98
|
+
total_events=0,
|
|
99
|
+
duration_ms=None,
|
|
100
|
+
final_status="unknown",
|
|
101
|
+
started_at=None,
|
|
102
|
+
ended_at=None,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Find run_start and run_end events
|
|
106
|
+
run_start = next((e for e in events if e.get("type") == "run_start"), None)
|
|
107
|
+
run_end = next((e for e in events if e.get("type") == "run_end"), None)
|
|
108
|
+
|
|
109
|
+
# Extract timestamps
|
|
110
|
+
started_at: str | None = None
|
|
111
|
+
ended_at: str | None = None
|
|
112
|
+
if run_start:
|
|
113
|
+
started_at = run_start.get("ts")
|
|
114
|
+
if run_end:
|
|
115
|
+
ended_at = run_end.get("ts")
|
|
116
|
+
|
|
117
|
+
# Calculate duration
|
|
118
|
+
duration_ms: int | None = None
|
|
119
|
+
if started_at and ended_at:
|
|
120
|
+
try:
|
|
121
|
+
from datetime import datetime
|
|
122
|
+
|
|
123
|
+
start_dt = datetime.fromisoformat(started_at.replace("Z", "+00:00"))
|
|
124
|
+
end_dt = datetime.fromisoformat(ended_at.replace("Z", "+00:00"))
|
|
125
|
+
delta = end_dt - start_dt
|
|
126
|
+
duration_ms = int(delta.total_seconds() * 1000)
|
|
127
|
+
except Exception:
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
# Count steps (from step_start events, only first attempt)
|
|
131
|
+
step_indices = set()
|
|
132
|
+
for event in events:
|
|
133
|
+
if event.get("type") == "step_start":
|
|
134
|
+
step_index = event.get("data", {}).get("step_index")
|
|
135
|
+
if step_index is not None:
|
|
136
|
+
step_indices.add(step_index)
|
|
137
|
+
total_steps = len(step_indices) if step_indices else 0
|
|
138
|
+
|
|
139
|
+
# If run_end has steps count, use that (more accurate)
|
|
140
|
+
if run_end:
|
|
141
|
+
steps_from_end = run_end.get("data", {}).get("steps")
|
|
142
|
+
if steps_from_end is not None:
|
|
143
|
+
total_steps = max(total_steps, steps_from_end)
|
|
144
|
+
|
|
145
|
+
# Count total events
|
|
146
|
+
total_events = len(events)
|
|
147
|
+
|
|
148
|
+
# Infer final status
|
|
149
|
+
if infer_status_func:
|
|
150
|
+
final_status = infer_status_func(events, run_end)
|
|
151
|
+
else:
|
|
152
|
+
final_status = TraceFileManager._infer_final_status(events, run_end)
|
|
153
|
+
|
|
154
|
+
return TraceStats(
|
|
155
|
+
total_steps=total_steps,
|
|
156
|
+
total_events=total_events,
|
|
157
|
+
duration_ms=duration_ms,
|
|
158
|
+
final_status=final_status,
|
|
159
|
+
started_at=started_at,
|
|
160
|
+
ended_at=ended_at,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
@staticmethod
|
|
164
|
+
def _infer_final_status(
|
|
165
|
+
events: list[dict[str, Any]],
|
|
166
|
+
run_end: dict[str, Any] | None,
|
|
167
|
+
) -> str:
|
|
168
|
+
"""
|
|
169
|
+
Infer final status from trace events.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
events: List of trace event dictionaries
|
|
173
|
+
run_end: Optional run_end event dictionary
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
Final status string: "success", "failure", "partial", or "unknown"
|
|
177
|
+
"""
|
|
178
|
+
# Check for run_end event with status
|
|
179
|
+
if run_end:
|
|
180
|
+
status = run_end.get("data", {}).get("status")
|
|
181
|
+
if status in ("success", "failure", "partial", "unknown"):
|
|
182
|
+
return status
|
|
183
|
+
|
|
184
|
+
# Infer from error events
|
|
185
|
+
has_errors = any(e.get("type") == "error" for e in events)
|
|
186
|
+
if has_errors:
|
|
187
|
+
step_ends = [e for e in events if e.get("type") == "step_end"]
|
|
188
|
+
if step_ends:
|
|
189
|
+
return "partial"
|
|
190
|
+
else:
|
|
191
|
+
return "failure"
|
|
192
|
+
else:
|
|
193
|
+
step_ends = [e for e in events if e.get("type") == "step_end"]
|
|
194
|
+
if step_ends:
|
|
195
|
+
return "success"
|
|
196
|
+
else:
|
|
197
|
+
return "unknown"
|
|
@@ -13,6 +13,7 @@ class TraceFileInfo:
|
|
|
13
13
|
path: str
|
|
14
14
|
size_bytes: int
|
|
15
15
|
sha256: str
|
|
16
|
+
line_count: int | None = None # Number of lines in the trace file
|
|
16
17
|
|
|
17
18
|
def to_dict(self) -> dict:
|
|
18
19
|
return asdict(self)
|
|
@@ -28,6 +29,12 @@ class TraceSummary:
|
|
|
28
29
|
step_count: int
|
|
29
30
|
error_count: int
|
|
30
31
|
final_url: str | None
|
|
32
|
+
status: Literal["success", "failure", "partial", "unknown"] | None = None
|
|
33
|
+
agent_name: str | None = None # Agent name from run_start event
|
|
34
|
+
duration_ms: int | None = None # Calculated duration in milliseconds
|
|
35
|
+
counters: dict[str, int] | None = (
|
|
36
|
+
None # Aggregated counters (snapshot_count, action_count, error_count)
|
|
37
|
+
)
|
|
31
38
|
|
|
32
39
|
def to_dict(self) -> dict:
|
|
33
40
|
return asdict(self)
|
|
@@ -78,17 +85,18 @@ class StepIndex:
|
|
|
78
85
|
step_index: int
|
|
79
86
|
step_id: str
|
|
80
87
|
goal: str | None
|
|
81
|
-
status: Literal["
|
|
88
|
+
status: Literal["success", "failure", "partial", "unknown"]
|
|
82
89
|
ts_start: str
|
|
83
90
|
ts_end: str
|
|
84
91
|
offset_start: int
|
|
85
92
|
offset_end: int
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
93
|
+
line_number: int | None = None # Line number for byte-range fetching
|
|
94
|
+
url_before: str | None = None
|
|
95
|
+
url_after: str | None = None
|
|
96
|
+
snapshot_before: SnapshotInfo = field(default_factory=SnapshotInfo)
|
|
97
|
+
snapshot_after: SnapshotInfo = field(default_factory=SnapshotInfo)
|
|
98
|
+
action: ActionInfo = field(default_factory=ActionInfo)
|
|
99
|
+
counters: StepCounters = field(default_factory=StepCounters)
|
|
92
100
|
|
|
93
101
|
def to_dict(self) -> dict:
|
|
94
102
|
result = asdict(self)
|
|
@@ -109,3 +117,83 @@ class TraceIndex:
|
|
|
109
117
|
def to_dict(self) -> dict:
|
|
110
118
|
"""Convert to dictionary for JSON serialization."""
|
|
111
119
|
return asdict(self)
|
|
120
|
+
|
|
121
|
+
def to_sentience_studio_dict(self) -> dict:
|
|
122
|
+
"""
|
|
123
|
+
Convert to SS-compatible format.
|
|
124
|
+
|
|
125
|
+
Maps SDK field names to frontend expectations:
|
|
126
|
+
- created_at -> generated_at
|
|
127
|
+
- first_ts -> start_time
|
|
128
|
+
- last_ts -> end_time
|
|
129
|
+
- step_index (0-based) -> step (1-based)
|
|
130
|
+
- ts_start -> timestamp
|
|
131
|
+
- Filters out "unknown" status
|
|
132
|
+
"""
|
|
133
|
+
from datetime import datetime
|
|
134
|
+
|
|
135
|
+
# Calculate duration if not already set
|
|
136
|
+
duration_ms = self.summary.duration_ms
|
|
137
|
+
if duration_ms is None and self.summary.first_ts and self.summary.last_ts:
|
|
138
|
+
try:
|
|
139
|
+
start = datetime.fromisoformat(self.summary.first_ts.replace("Z", "+00:00"))
|
|
140
|
+
end = datetime.fromisoformat(self.summary.last_ts.replace("Z", "+00:00"))
|
|
141
|
+
duration_ms = int((end - start).total_seconds() * 1000)
|
|
142
|
+
except (ValueError, AttributeError):
|
|
143
|
+
duration_ms = None
|
|
144
|
+
|
|
145
|
+
# Aggregate counters if not already set
|
|
146
|
+
counters = self.summary.counters
|
|
147
|
+
if counters is None:
|
|
148
|
+
snapshot_count = sum(step.counters.snapshots for step in self.steps)
|
|
149
|
+
action_count = sum(step.counters.actions for step in self.steps)
|
|
150
|
+
counters = {
|
|
151
|
+
"snapshot_count": snapshot_count,
|
|
152
|
+
"action_count": action_count,
|
|
153
|
+
"error_count": self.summary.error_count,
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
return {
|
|
157
|
+
"version": self.version,
|
|
158
|
+
"run_id": self.run_id,
|
|
159
|
+
"generated_at": self.created_at, # Renamed from created_at
|
|
160
|
+
"trace_file": {
|
|
161
|
+
"path": self.trace_file.path,
|
|
162
|
+
"size_bytes": self.trace_file.size_bytes,
|
|
163
|
+
"line_count": self.trace_file.line_count, # Added
|
|
164
|
+
},
|
|
165
|
+
"summary": {
|
|
166
|
+
"agent_name": self.summary.agent_name, # Added
|
|
167
|
+
"total_steps": self.summary.step_count, # Renamed from step_count
|
|
168
|
+
"status": (
|
|
169
|
+
self.summary.status if self.summary.status != "unknown" else None
|
|
170
|
+
), # Filter out unknown
|
|
171
|
+
"start_time": self.summary.first_ts, # Renamed from first_ts
|
|
172
|
+
"end_time": self.summary.last_ts, # Renamed from last_ts
|
|
173
|
+
"duration_ms": duration_ms, # Added
|
|
174
|
+
"counters": counters, # Added
|
|
175
|
+
},
|
|
176
|
+
"steps": [
|
|
177
|
+
{
|
|
178
|
+
"step": s.step_index + 1, # Convert 0-based to 1-based
|
|
179
|
+
"byte_offset": s.offset_start,
|
|
180
|
+
"line_number": s.line_number, # Added
|
|
181
|
+
"timestamp": s.ts_start, # Use start time
|
|
182
|
+
"action": {
|
|
183
|
+
"type": s.action.type or "",
|
|
184
|
+
"goal": s.goal, # Move goal into action
|
|
185
|
+
"digest": s.action.args_digest,
|
|
186
|
+
},
|
|
187
|
+
"snapshot": (
|
|
188
|
+
{
|
|
189
|
+
"url": s.snapshot_after.url,
|
|
190
|
+
"digest": s.snapshot_after.digest,
|
|
191
|
+
}
|
|
192
|
+
if s.snapshot_after.url
|
|
193
|
+
else None
|
|
194
|
+
),
|
|
195
|
+
"status": s.status if s.status != "unknown" else None, # Filter out unknown
|
|
196
|
+
}
|
|
197
|
+
for s in self.steps
|
|
198
|
+
],
|
|
199
|
+
}
|