onetool-mcp 1.0.0b1__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. onetool/cli.py +63 -4
  2. onetool_mcp-1.0.0rc2.dist-info/METADATA +266 -0
  3. onetool_mcp-1.0.0rc2.dist-info/RECORD +129 -0
  4. {onetool_mcp-1.0.0b1.dist-info → onetool_mcp-1.0.0rc2.dist-info}/licenses/LICENSE.txt +1 -1
  5. {onetool_mcp-1.0.0b1.dist-info → onetool_mcp-1.0.0rc2.dist-info}/licenses/NOTICE.txt +54 -64
  6. ot/__main__.py +6 -6
  7. ot/config/__init__.py +48 -46
  8. ot/config/global_templates/__init__.py +2 -2
  9. ot/config/{defaults → global_templates}/diagram-templates/api-flow.mmd +33 -33
  10. ot/config/{defaults → global_templates}/diagram-templates/c4-context.puml +30 -30
  11. ot/config/{defaults → global_templates}/diagram-templates/class-diagram.mmd +87 -87
  12. ot/config/{defaults → global_templates}/diagram-templates/feature-mindmap.mmd +70 -70
  13. ot/config/{defaults → global_templates}/diagram-templates/microservices.d2 +81 -81
  14. ot/config/{defaults → global_templates}/diagram-templates/project-gantt.mmd +37 -37
  15. ot/config/{defaults → global_templates}/diagram-templates/state-machine.mmd +42 -42
  16. ot/config/global_templates/diagram.yaml +167 -0
  17. ot/config/global_templates/onetool.yaml +3 -1
  18. ot/config/{defaults → global_templates}/prompts.yaml +102 -97
  19. ot/config/global_templates/security.yaml +31 -0
  20. ot/config/global_templates/servers.yaml +93 -12
  21. ot/config/global_templates/snippets.yaml +5 -26
  22. ot/config/{defaults → global_templates}/tool_templates/__init__.py +7 -7
  23. ot/config/loader.py +221 -105
  24. ot/config/mcp.py +5 -1
  25. ot/config/secrets.py +192 -190
  26. ot/decorators.py +116 -116
  27. ot/executor/__init__.py +35 -35
  28. ot/executor/base.py +16 -16
  29. ot/executor/fence_processor.py +83 -83
  30. ot/executor/linter.py +142 -142
  31. ot/executor/pep723.py +288 -288
  32. ot/executor/runner.py +20 -6
  33. ot/executor/simple.py +163 -163
  34. ot/executor/validator.py +603 -164
  35. ot/http_client.py +145 -145
  36. ot/logging/__init__.py +37 -37
  37. ot/logging/entry.py +213 -213
  38. ot/logging/format.py +191 -188
  39. ot/logging/span.py +349 -349
  40. ot/meta.py +236 -14
  41. ot/paths.py +32 -49
  42. ot/prompts.py +218 -218
  43. ot/proxy/manager.py +14 -2
  44. ot/registry/__init__.py +189 -189
  45. ot/registry/parser.py +269 -269
  46. ot/server.py +330 -315
  47. ot/shortcuts/__init__.py +15 -15
  48. ot/shortcuts/aliases.py +87 -87
  49. ot/shortcuts/snippets.py +258 -258
  50. ot/stats/__init__.py +35 -35
  51. ot/stats/html.py +2 -2
  52. ot/stats/reader.py +354 -354
  53. ot/stats/timing.py +57 -57
  54. ot/support.py +63 -63
  55. ot/tools.py +1 -1
  56. ot/utils/batch.py +161 -161
  57. ot/utils/cache.py +120 -120
  58. ot/utils/exceptions.py +23 -23
  59. ot/utils/factory.py +178 -179
  60. ot/utils/format.py +65 -65
  61. ot/utils/http.py +202 -202
  62. ot/utils/platform.py +45 -45
  63. ot/utils/truncate.py +69 -69
  64. ot_tools/__init__.py +4 -4
  65. ot_tools/_convert/__init__.py +12 -12
  66. ot_tools/_convert/pdf.py +254 -254
  67. ot_tools/diagram.yaml +167 -167
  68. ot_tools/scaffold.py +2 -2
  69. ot_tools/transform.py +124 -19
  70. ot_tools/web_fetch.py +94 -43
  71. onetool_mcp-1.0.0b1.dist-info/METADATA +0 -163
  72. onetool_mcp-1.0.0b1.dist-info/RECORD +0 -132
  73. ot/config/defaults/bench.yaml +0 -4
  74. ot/config/defaults/onetool.yaml +0 -25
  75. ot/config/defaults/servers.yaml +0 -7
  76. ot/config/defaults/snippets.yaml +0 -4
  77. ot_tools/firecrawl.py +0 -732
  78. {onetool_mcp-1.0.0b1.dist-info → onetool_mcp-1.0.0rc2.dist-info}/WHEEL +0 -0
  79. {onetool_mcp-1.0.0b1.dist-info → onetool_mcp-1.0.0rc2.dist-info}/entry_points.txt +0 -0
  80. /ot/config/{defaults → global_templates}/tool_templates/extension.py +0 -0
  81. /ot/config/{defaults → global_templates}/tool_templates/isolated.py +0 -0
ot/stats/reader.py CHANGED
@@ -1,354 +1,354 @@
1
- """Stats reader with aggregation and filtering.
2
-
3
- Reads JSONL stats and aggregates by period with savings calculations.
4
- """
5
-
6
- from __future__ import annotations
7
-
8
- import json
9
- from dataclasses import dataclass
10
- from datetime import UTC, datetime, timedelta
11
- from typing import TYPE_CHECKING, Any, Literal
12
-
13
- from loguru import logger
14
-
15
- if TYPE_CHECKING:
16
- from pathlib import Path
17
-
18
- Period = Literal["day", "week", "month", "all"]
19
-
20
-
21
- @dataclass
22
- class ToolStats:
23
- """Aggregated statistics for a single tool."""
24
-
25
- tool: str
26
- total_calls: int
27
- success_count: int
28
- error_count: int
29
- total_chars_in: int
30
- total_chars_out: int
31
- total_duration_ms: int
32
- avg_duration_ms: float
33
-
34
- @property
35
- def success_rate(self) -> float:
36
- """Calculate success rate as percentage."""
37
- if self.total_calls == 0:
38
- return 0.0
39
- return (self.success_count / self.total_calls) * 100
40
-
41
- def to_dict(self) -> dict[str, Any]:
42
- """Convert to dictionary representation."""
43
- return {
44
- "tool": self.tool,
45
- "total_calls": self.total_calls,
46
- "success_count": self.success_count,
47
- "error_count": self.error_count,
48
- "success_rate": round(self.success_rate, 1),
49
- "total_chars_in": self.total_chars_in,
50
- "total_chars_out": self.total_chars_out,
51
- "total_duration_ms": self.total_duration_ms,
52
- "avg_duration_ms": round(self.avg_duration_ms, 1),
53
- }
54
-
55
-
56
- # Cost per coffee for savings display (hardcoded)
57
- COFFEE_COST_USD = 5.0
58
-
59
-
60
- @dataclass
61
- class AggregatedStats:
62
- """Aggregated statistics summary."""
63
-
64
- period: Period
65
- start_time: str | None
66
- end_time: str | None
67
- total_calls: int
68
- success_count: int
69
- error_count: int
70
- total_chars_in: int
71
- total_chars_out: int
72
- total_duration_ms: int
73
- context_saved: int
74
- time_saved_ms: int
75
- tools: list[ToolStats]
76
- model: str = ""
77
- cost_estimate_usd: float = 0.0
78
- savings_usd: float = 0.0
79
-
80
- @property
81
- def success_rate(self) -> float:
82
- """Calculate overall success rate as percentage."""
83
- if self.total_calls == 0:
84
- return 0.0
85
- return (self.success_count / self.total_calls) * 100
86
-
87
- @property
88
- def coffees(self) -> float:
89
- """Calculate coffee equivalent of savings."""
90
- return self.savings_usd / COFFEE_COST_USD
91
-
92
- def to_dict(self) -> dict[str, Any]:
93
- """Convert to dictionary representation."""
94
- return {
95
- "period": self.period,
96
- "start_time": self.start_time,
97
- "end_time": self.end_time,
98
- "total_calls": self.total_calls,
99
- "success_count": self.success_count,
100
- "error_count": self.error_count,
101
- "success_rate": round(self.success_rate, 1),
102
- "total_chars_in": self.total_chars_in,
103
- "total_chars_out": self.total_chars_out,
104
- "total_duration_ms": self.total_duration_ms,
105
- "context_saved": self.context_saved,
106
- "time_saved_ms": self.time_saved_ms,
107
- "model": self.model,
108
- "cost_estimate_usd": round(self.cost_estimate_usd, 4),
109
- "savings_usd": round(self.savings_usd, 2),
110
- "coffees": round(self.coffees, 1),
111
- "tools": [t.to_dict() for t in self.tools],
112
- }
113
-
114
-
115
- class StatsReader:
116
- """Reads and aggregates statistics from JSONL.
117
-
118
- Usage:
119
- reader = StatsReader(path, context_per_call=30000, time_overhead_ms=4000)
120
- stats = reader.read(period="week", tool="brave.search")
121
- """
122
-
123
- def __init__(
124
- self,
125
- path: Path,
126
- context_per_call: int = 30000,
127
- time_overhead_per_call_ms: int = 4000,
128
- model: str = "anthropic/claude-opus-4.5",
129
- cost_per_million_input_tokens: float = 15.0,
130
- cost_per_million_output_tokens: float = 75.0,
131
- chars_per_token: float = 4.0,
132
- ) -> None:
133
- """Initialize reader.
134
-
135
- Args:
136
- path: Path to JSONL file
137
- context_per_call: Context tokens saved per consolidated call
138
- time_overhead_per_call_ms: Time overhead in ms saved per call
139
- model: Model name for cost estimation
140
- cost_per_million_input_tokens: Cost in USD per million input tokens
141
- cost_per_million_output_tokens: Cost in USD per million output tokens
142
- chars_per_token: Average characters per token for estimation
143
- """
144
- self._path = path
145
- self._context_per_call = context_per_call
146
- self._time_overhead_ms = time_overhead_per_call_ms
147
- self._model = model
148
- self._cost_per_m_input = cost_per_million_input_tokens
149
- self._cost_per_m_output = cost_per_million_output_tokens
150
- self._chars_per_token = chars_per_token
151
-
152
- def read(
153
- self,
154
- period: Period = "all",
155
- tool: str | None = None,
156
- ) -> AggregatedStats:
157
- """Read and aggregate stats.
158
-
159
- Args:
160
- period: Time period to filter (day/week/month/all)
161
- tool: Optional tool name filter
162
-
163
- Returns:
164
- Aggregated statistics
165
- """
166
- records = self._load_records()
167
- filtered = self._filter_records(records, period, tool)
168
- return self._aggregate(filtered, period)
169
-
170
- def _load_records(self) -> list[dict[str, Any]]:
171
- """Load all records from JSONL."""
172
- if not self._path.exists():
173
- logger.debug(f"Stats file not found: {self._path}")
174
- return []
175
-
176
- records: list[dict[str, Any]] = []
177
- try:
178
- with self._path.open() as f:
179
- for line in f:
180
- line = line.strip()
181
- if line:
182
- try:
183
- records.append(json.loads(line))
184
- except json.JSONDecodeError:
185
- logger.debug(f"Skipping malformed JSON line: {line[:50]}")
186
- except Exception as e:
187
- logger.warning(f"Failed to read stats: {e}")
188
- return []
189
-
190
- return records
191
-
192
- def _filter_records(
193
- self,
194
- records: list[dict[str, Any]],
195
- period: Period,
196
- tool: str | None,
197
- ) -> list[dict[str, Any]]:
198
- """Filter records by period and tool."""
199
- if not records:
200
- return []
201
-
202
- # Calculate period cutoff
203
- cutoff = self._get_period_cutoff(period)
204
-
205
- filtered: list[dict[str, Any]] = []
206
- for record in records:
207
- # Filter by period
208
- if cutoff is not None:
209
- try:
210
- ts = datetime.fromisoformat(record["ts"])
211
- if ts < cutoff:
212
- continue
213
- except (KeyError, ValueError):
214
- continue
215
-
216
- # Filter by tool (only applies to tool-type records)
217
- if (
218
- tool is not None
219
- and record.get("type") == "tool"
220
- and record.get("tool") != tool
221
- ):
222
- continue
223
-
224
- filtered.append(record)
225
-
226
- return filtered
227
-
228
- def _get_period_cutoff(self, period: Period) -> datetime | None:
229
- """Get cutoff datetime for period."""
230
- if period == "all":
231
- return None
232
-
233
- now = datetime.now(UTC)
234
- if period == "day":
235
- return now - timedelta(days=1)
236
- elif period == "week":
237
- return now - timedelta(weeks=1)
238
- elif period == "month":
239
- return now - timedelta(days=30)
240
-
241
- return None
242
-
243
- def _aggregate(
244
- self, records: list[dict[str, Any]], period: Period
245
- ) -> AggregatedStats:
246
- """Aggregate records into summary stats.
247
-
248
- Records are split by type:
249
- - "run" records: contain chars_in/chars_out, used for run counts and savings
250
- - "tool" records: contain tool name, used for per-tool breakdown
251
- """
252
- if not records:
253
- return AggregatedStats(
254
- period=period,
255
- start_time=None,
256
- end_time=None,
257
- total_calls=0,
258
- success_count=0,
259
- error_count=0,
260
- total_chars_in=0,
261
- total_chars_out=0,
262
- total_duration_ms=0,
263
- context_saved=0,
264
- time_saved_ms=0,
265
- tools=[],
266
- )
267
-
268
- # Separate run-level and tool-level records
269
- run_records: list[dict[str, Any]] = []
270
- tool_records_by_name: dict[str, list[dict[str, Any]]] = {}
271
- timestamps: list[str] = []
272
-
273
- for record in records:
274
- record_type = record.get("type", "run")
275
- ts = record.get("ts")
276
- if ts:
277
- timestamps.append(ts)
278
-
279
- if record_type == "run":
280
- run_records.append(record)
281
- elif record_type == "tool":
282
- tool_name = record.get("tool", "unknown")
283
- if tool_name not in tool_records_by_name:
284
- tool_records_by_name[tool_name] = []
285
- tool_records_by_name[tool_name].append(record)
286
-
287
- # Sort timestamps for range
288
- timestamps.sort()
289
-
290
- # Aggregate run-level stats
291
- run_count = len(run_records)
292
- run_success = sum(1 for r in run_records if r.get("success") is True)
293
- run_error = run_count - run_success
294
- total_chars_in = sum(int(r.get("chars_in", 0)) for r in run_records)
295
- total_chars_out = sum(int(r.get("chars_out", 0)) for r in run_records)
296
- run_duration = sum(int(r.get("duration_ms", 0)) for r in run_records)
297
-
298
- # Aggregate per-tool stats
299
- tool_stats: list[ToolStats] = []
300
- total_tool_duration = 0
301
-
302
- for tool_name, tool_records in sorted(tool_records_by_name.items()):
303
- calls = len(tool_records)
304
- success = sum(1 for r in tool_records if r.get("success") is True)
305
- errors = calls - success
306
- duration = sum(int(r.get("duration_ms", 0)) for r in tool_records)
307
-
308
- tool_stats.append(
309
- ToolStats(
310
- tool=tool_name,
311
- total_calls=calls,
312
- success_count=success,
313
- error_count=errors,
314
- total_chars_in=0, # Tool records don't have chars
315
- total_chars_out=0,
316
- total_duration_ms=duration,
317
- avg_duration_ms=duration / calls if calls > 0 else 0,
318
- )
319
- )
320
-
321
- total_tool_duration += duration
322
-
323
- # Calculate savings (context and time saved by consolidating run calls)
324
- context_saved = run_count * self._context_per_call
325
- time_saved = run_count * self._time_overhead_ms
326
-
327
- # Calculate cost estimate (actual cost of tokens used)
328
- input_tokens = total_chars_in / self._chars_per_token
329
- output_tokens = total_chars_out / self._chars_per_token
330
- cost_estimate = (
331
- (input_tokens / 1_000_000) * self._cost_per_m_input
332
- + (output_tokens / 1_000_000) * self._cost_per_m_output
333
- )
334
-
335
- # Calculate savings estimate (cost of context overhead avoided)
336
- savings_usd = (context_saved / 1_000_000) * self._cost_per_m_input
337
-
338
- return AggregatedStats(
339
- period=period,
340
- start_time=timestamps[0] if timestamps else None,
341
- end_time=timestamps[-1] if timestamps else None,
342
- total_calls=run_count,
343
- success_count=run_success,
344
- error_count=run_error,
345
- total_chars_in=total_chars_in,
346
- total_chars_out=total_chars_out,
347
- total_duration_ms=run_duration,
348
- context_saved=context_saved,
349
- time_saved_ms=time_saved,
350
- tools=tool_stats,
351
- model=self._model,
352
- cost_estimate_usd=cost_estimate,
353
- savings_usd=savings_usd,
354
- )
1
+ """Stats reader with aggregation and filtering.
2
+
3
+ Reads JSONL stats and aggregates by period with savings calculations.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import json
9
+ from dataclasses import dataclass
10
+ from datetime import UTC, datetime, timedelta
11
+ from typing import TYPE_CHECKING, Any, Literal
12
+
13
+ from loguru import logger
14
+
15
+ if TYPE_CHECKING:
16
+ from pathlib import Path
17
+
18
+ Period = Literal["day", "week", "month", "all"]
19
+
20
+
21
+ @dataclass
22
+ class ToolStats:
23
+ """Aggregated statistics for a single tool."""
24
+
25
+ tool: str
26
+ total_calls: int
27
+ success_count: int
28
+ error_count: int
29
+ total_chars_in: int
30
+ total_chars_out: int
31
+ total_duration_ms: int
32
+ avg_duration_ms: float
33
+
34
+ @property
35
+ def success_rate(self) -> float:
36
+ """Calculate success rate as percentage."""
37
+ if self.total_calls == 0:
38
+ return 0.0
39
+ return (self.success_count / self.total_calls) * 100
40
+
41
+ def to_dict(self) -> dict[str, Any]:
42
+ """Convert to dictionary representation."""
43
+ return {
44
+ "tool": self.tool,
45
+ "total_calls": self.total_calls,
46
+ "success_count": self.success_count,
47
+ "error_count": self.error_count,
48
+ "success_rate": round(self.success_rate, 1),
49
+ "total_chars_in": self.total_chars_in,
50
+ "total_chars_out": self.total_chars_out,
51
+ "total_duration_ms": self.total_duration_ms,
52
+ "avg_duration_ms": round(self.avg_duration_ms, 1),
53
+ }
54
+
55
+
56
+ # Cost per coffee for savings display (hardcoded)
57
+ COFFEE_COST_USD = 5.0
58
+
59
+
60
+ @dataclass
61
+ class AggregatedStats:
62
+ """Aggregated statistics summary."""
63
+
64
+ period: Period
65
+ start_time: str | None
66
+ end_time: str | None
67
+ total_calls: int
68
+ success_count: int
69
+ error_count: int
70
+ total_chars_in: int
71
+ total_chars_out: int
72
+ total_duration_ms: int
73
+ context_saved: int
74
+ time_saved_ms: int
75
+ tools: list[ToolStats]
76
+ model: str = ""
77
+ cost_estimate_usd: float = 0.0
78
+ savings_usd: float = 0.0
79
+
80
+ @property
81
+ def success_rate(self) -> float:
82
+ """Calculate overall success rate as percentage."""
83
+ if self.total_calls == 0:
84
+ return 0.0
85
+ return (self.success_count / self.total_calls) * 100
86
+
87
+ @property
88
+ def coffees(self) -> float:
89
+ """Calculate coffee equivalent of savings."""
90
+ return self.savings_usd / COFFEE_COST_USD
91
+
92
+ def to_dict(self) -> dict[str, Any]:
93
+ """Convert to dictionary representation."""
94
+ return {
95
+ "period": self.period,
96
+ "start_time": self.start_time,
97
+ "end_time": self.end_time,
98
+ "total_calls": self.total_calls,
99
+ "success_count": self.success_count,
100
+ "error_count": self.error_count,
101
+ "success_rate": round(self.success_rate, 1),
102
+ "total_chars_in": self.total_chars_in,
103
+ "total_chars_out": self.total_chars_out,
104
+ "total_duration_ms": self.total_duration_ms,
105
+ "context_saved": self.context_saved,
106
+ "time_saved_ms": self.time_saved_ms,
107
+ "model": self.model,
108
+ "cost_estimate_usd": round(self.cost_estimate_usd, 4),
109
+ "savings_usd": round(self.savings_usd, 2),
110
+ "coffees": round(self.coffees, 1),
111
+ "tools": [t.to_dict() for t in self.tools],
112
+ }
113
+
114
+
115
+ class StatsReader:
116
+ """Reads and aggregates statistics from JSONL.
117
+
118
+ Usage:
119
+ reader = StatsReader(path, context_per_call=30000, time_overhead_ms=4000)
120
+ stats = reader.read(period="week", tool="brave.search")
121
+ """
122
+
123
+ def __init__(
124
+ self,
125
+ path: Path,
126
+ context_per_call: int = 30000,
127
+ time_overhead_per_call_ms: int = 4000,
128
+ model: str = "anthropic/claude-opus-4.5",
129
+ cost_per_million_input_tokens: float = 15.0,
130
+ cost_per_million_output_tokens: float = 75.0,
131
+ chars_per_token: float = 4.0,
132
+ ) -> None:
133
+ """Initialize reader.
134
+
135
+ Args:
136
+ path: Path to JSONL file
137
+ context_per_call: Context tokens saved per consolidated call
138
+ time_overhead_per_call_ms: Time overhead in ms saved per call
139
+ model: Model name for cost estimation
140
+ cost_per_million_input_tokens: Cost in USD per million input tokens
141
+ cost_per_million_output_tokens: Cost in USD per million output tokens
142
+ chars_per_token: Average characters per token for estimation
143
+ """
144
+ self._path = path
145
+ self._context_per_call = context_per_call
146
+ self._time_overhead_ms = time_overhead_per_call_ms
147
+ self._model = model
148
+ self._cost_per_m_input = cost_per_million_input_tokens
149
+ self._cost_per_m_output = cost_per_million_output_tokens
150
+ self._chars_per_token = chars_per_token
151
+
152
+ def read(
153
+ self,
154
+ period: Period = "all",
155
+ tool: str | None = None,
156
+ ) -> AggregatedStats:
157
+ """Read and aggregate stats.
158
+
159
+ Args:
160
+ period: Time period to filter (day/week/month/all)
161
+ tool: Optional tool name filter
162
+
163
+ Returns:
164
+ Aggregated statistics
165
+ """
166
+ records = self._load_records()
167
+ filtered = self._filter_records(records, period, tool)
168
+ return self._aggregate(filtered, period)
169
+
170
+ def _load_records(self) -> list[dict[str, Any]]:
171
+ """Load all records from JSONL."""
172
+ if not self._path.exists():
173
+ logger.debug(f"Stats file not found: {self._path}")
174
+ return []
175
+
176
+ records: list[dict[str, Any]] = []
177
+ try:
178
+ with self._path.open() as f:
179
+ for line in f:
180
+ line = line.strip()
181
+ if line:
182
+ try:
183
+ records.append(json.loads(line))
184
+ except json.JSONDecodeError:
185
+ logger.debug(f"Skipping malformed JSON line: {line[:50]}")
186
+ except Exception as e:
187
+ logger.warning(f"Failed to read stats: {e}")
188
+ return []
189
+
190
+ return records
191
+
192
+ def _filter_records(
193
+ self,
194
+ records: list[dict[str, Any]],
195
+ period: Period,
196
+ tool: str | None,
197
+ ) -> list[dict[str, Any]]:
198
+ """Filter records by period and tool."""
199
+ if not records:
200
+ return []
201
+
202
+ # Calculate period cutoff
203
+ cutoff = self._get_period_cutoff(period)
204
+
205
+ filtered: list[dict[str, Any]] = []
206
+ for record in records:
207
+ # Filter by period
208
+ if cutoff is not None:
209
+ try:
210
+ ts = datetime.fromisoformat(record["ts"])
211
+ if ts < cutoff:
212
+ continue
213
+ except (KeyError, ValueError):
214
+ continue
215
+
216
+ # Filter by tool (only applies to tool-type records)
217
+ if (
218
+ tool is not None
219
+ and record.get("type") == "tool"
220
+ and record.get("tool") != tool
221
+ ):
222
+ continue
223
+
224
+ filtered.append(record)
225
+
226
+ return filtered
227
+
228
+ def _get_period_cutoff(self, period: Period) -> datetime | None:
229
+ """Get cutoff datetime for period."""
230
+ if period == "all":
231
+ return None
232
+
233
+ now = datetime.now(UTC)
234
+ if period == "day":
235
+ return now - timedelta(days=1)
236
+ elif period == "week":
237
+ return now - timedelta(weeks=1)
238
+ elif period == "month":
239
+ return now - timedelta(days=30)
240
+
241
+ return None
242
+
243
+ def _aggregate(
244
+ self, records: list[dict[str, Any]], period: Period
245
+ ) -> AggregatedStats:
246
+ """Aggregate records into summary stats.
247
+
248
+ Records are split by type:
249
+ - "run" records: contain chars_in/chars_out, used for run counts and savings
250
+ - "tool" records: contain tool name, used for per-tool breakdown
251
+ """
252
+ if not records:
253
+ return AggregatedStats(
254
+ period=period,
255
+ start_time=None,
256
+ end_time=None,
257
+ total_calls=0,
258
+ success_count=0,
259
+ error_count=0,
260
+ total_chars_in=0,
261
+ total_chars_out=0,
262
+ total_duration_ms=0,
263
+ context_saved=0,
264
+ time_saved_ms=0,
265
+ tools=[],
266
+ )
267
+
268
+ # Separate run-level and tool-level records
269
+ run_records: list[dict[str, Any]] = []
270
+ tool_records_by_name: dict[str, list[dict[str, Any]]] = {}
271
+ timestamps: list[str] = []
272
+
273
+ for record in records:
274
+ record_type = record.get("type", "run")
275
+ ts = record.get("ts")
276
+ if ts:
277
+ timestamps.append(ts)
278
+
279
+ if record_type == "run":
280
+ run_records.append(record)
281
+ elif record_type == "tool":
282
+ tool_name = record.get("tool", "unknown")
283
+ if tool_name not in tool_records_by_name:
284
+ tool_records_by_name[tool_name] = []
285
+ tool_records_by_name[tool_name].append(record)
286
+
287
+ # Sort timestamps for range
288
+ timestamps.sort()
289
+
290
+ # Aggregate run-level stats
291
+ run_count = len(run_records)
292
+ run_success = sum(1 for r in run_records if r.get("success") is True)
293
+ run_error = run_count - run_success
294
+ total_chars_in = sum(int(r.get("chars_in", 0)) for r in run_records)
295
+ total_chars_out = sum(int(r.get("chars_out", 0)) for r in run_records)
296
+ run_duration = sum(int(r.get("duration_ms", 0)) for r in run_records)
297
+
298
+ # Aggregate per-tool stats
299
+ tool_stats: list[ToolStats] = []
300
+ total_tool_duration = 0
301
+
302
+ for tool_name, tool_records in sorted(tool_records_by_name.items()):
303
+ calls = len(tool_records)
304
+ success = sum(1 for r in tool_records if r.get("success") is True)
305
+ errors = calls - success
306
+ duration = sum(int(r.get("duration_ms", 0)) for r in tool_records)
307
+
308
+ tool_stats.append(
309
+ ToolStats(
310
+ tool=tool_name,
311
+ total_calls=calls,
312
+ success_count=success,
313
+ error_count=errors,
314
+ total_chars_in=0, # Tool records don't have chars
315
+ total_chars_out=0,
316
+ total_duration_ms=duration,
317
+ avg_duration_ms=duration / calls if calls > 0 else 0,
318
+ )
319
+ )
320
+
321
+ total_tool_duration += duration
322
+
323
+ # Calculate savings (context and time saved by consolidating run calls)
324
+ context_saved = run_count * self._context_per_call
325
+ time_saved = run_count * self._time_overhead_ms
326
+
327
+ # Calculate cost estimate (actual cost of tokens used)
328
+ input_tokens = total_chars_in / self._chars_per_token
329
+ output_tokens = total_chars_out / self._chars_per_token
330
+ cost_estimate = (
331
+ (input_tokens / 1_000_000) * self._cost_per_m_input
332
+ + (output_tokens / 1_000_000) * self._cost_per_m_output
333
+ )
334
+
335
+ # Calculate savings estimate (cost of context overhead avoided)
336
+ savings_usd = (context_saved / 1_000_000) * self._cost_per_m_input
337
+
338
+ return AggregatedStats(
339
+ period=period,
340
+ start_time=timestamps[0] if timestamps else None,
341
+ end_time=timestamps[-1] if timestamps else None,
342
+ total_calls=run_count,
343
+ success_count=run_success,
344
+ error_count=run_error,
345
+ total_chars_in=total_chars_in,
346
+ total_chars_out=total_chars_out,
347
+ total_duration_ms=run_duration,
348
+ context_saved=context_saved,
349
+ time_saved_ms=time_saved,
350
+ tools=tool_stats,
351
+ model=self._model,
352
+ cost_estimate_usd=cost_estimate,
353
+ savings_usd=savings_usd,
354
+ )