ursa-ai 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. ursa/__init__.py +3 -0
  2. ursa/agents/__init__.py +32 -0
  3. ursa/agents/acquisition_agents.py +812 -0
  4. ursa/agents/arxiv_agent.py +429 -0
  5. ursa/agents/base.py +728 -0
  6. ursa/agents/chat_agent.py +60 -0
  7. ursa/agents/code_review_agent.py +341 -0
  8. ursa/agents/execution_agent.py +915 -0
  9. ursa/agents/hypothesizer_agent.py +614 -0
  10. ursa/agents/lammps_agent.py +465 -0
  11. ursa/agents/mp_agent.py +204 -0
  12. ursa/agents/optimization_agent.py +410 -0
  13. ursa/agents/planning_agent.py +219 -0
  14. ursa/agents/rag_agent.py +304 -0
  15. ursa/agents/recall_agent.py +54 -0
  16. ursa/agents/websearch_agent.py +196 -0
  17. ursa/cli/__init__.py +363 -0
  18. ursa/cli/hitl.py +516 -0
  19. ursa/cli/hitl_api.py +75 -0
  20. ursa/observability/metrics_charts.py +1279 -0
  21. ursa/observability/metrics_io.py +11 -0
  22. ursa/observability/metrics_session.py +750 -0
  23. ursa/observability/pricing.json +97 -0
  24. ursa/observability/pricing.py +321 -0
  25. ursa/observability/timing.py +1466 -0
  26. ursa/prompt_library/__init__.py +0 -0
  27. ursa/prompt_library/code_review_prompts.py +51 -0
  28. ursa/prompt_library/execution_prompts.py +50 -0
  29. ursa/prompt_library/hypothesizer_prompts.py +17 -0
  30. ursa/prompt_library/literature_prompts.py +11 -0
  31. ursa/prompt_library/optimization_prompts.py +131 -0
  32. ursa/prompt_library/planning_prompts.py +79 -0
  33. ursa/prompt_library/websearch_prompts.py +131 -0
  34. ursa/tools/__init__.py +0 -0
  35. ursa/tools/feasibility_checker.py +114 -0
  36. ursa/tools/feasibility_tools.py +1075 -0
  37. ursa/tools/run_command.py +27 -0
  38. ursa/tools/write_code.py +42 -0
  39. ursa/util/__init__.py +0 -0
  40. ursa/util/diff_renderer.py +128 -0
  41. ursa/util/helperFunctions.py +142 -0
  42. ursa/util/logo_generator.py +625 -0
  43. ursa/util/memory_logger.py +183 -0
  44. ursa/util/optimization_schema.py +78 -0
  45. ursa/util/parse.py +405 -0
  46. ursa_ai-0.9.1.dist-info/METADATA +304 -0
  47. ursa_ai-0.9.1.dist-info/RECORD +51 -0
  48. ursa_ai-0.9.1.dist-info/WHEEL +5 -0
  49. ursa_ai-0.9.1.dist-info/entry_points.txt +2 -0
  50. ursa_ai-0.9.1.dist-info/licenses/LICENSE +8 -0
  51. ursa_ai-0.9.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,97 @@
1
+ {
2
+ "_note": "Prices are per 1K tokens; derived from OpenAI's table (originally per 1M). Source: https://platform.openai.com/docs/pricing",
3
+ "local/*": { "input_per_1k": 0.0, "output_per_1k": 0.0, "cached_input_multiplier": 1.0 },
4
+
5
+ "gpt-5.1": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
6
+ "openai:gpt-5.1": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
7
+
8
+ "gpt-5": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
9
+ "openai:gpt-5": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
10
+
11
+ "gpt-5-pro": { "input_per_1k": 0.01500, "output_per_1k": 0.12, "cached_input_multiplier": 1.0 },
12
+ "openai:gpt-5-pro": { "input_per_1k": 0.01500, "output_per_1k": 0.12, "cached_input_multiplier": 1.0 },
13
+
14
+ "gpt-5-mini": { "input_per_1k": 0.00025, "output_per_1k": 0.002, "cached_input_multiplier": 0.1 },
15
+ "openai:gpt-5-mini": { "input_per_1k": 0.00025, "output_per_1k": 0.002, "cached_input_multiplier": 0.1 },
16
+
17
+ "gpt-5-nano": { "input_per_1k": 0.00005, "output_per_1k": 0.0004, "cached_input_multiplier": 0.1 },
18
+ "openai:gpt-5-nano": { "input_per_1k": 0.00005, "output_per_1k": 0.0004, "cached_input_multiplier": 0.1 },
19
+
20
+ "gpt-5-chat-latest": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
21
+ "openai:gpt-5-chat-latest": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
22
+
23
+ "gpt-5.1-codex": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
24
+ "openai:gpt-5.1-codex": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
25
+
26
+ "gpt-5.1-codex-mini": { "input_per_1k": 0.00025, "output_per_1k": 0.002, "cached_input_multiplier": 0.1 },
27
+ "openai:gpt-5.1-codex-mini": { "input_per_1k": 0.00025, "output_per_1k": 0.002, "cached_input_multiplier": 0.1 },
28
+
29
+ "gpt-5-codex": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
30
+ "openai:gpt-5-codex": { "input_per_1k": 0.00125, "output_per_1k": 0.01, "cached_input_multiplier": 0.1 },
31
+
32
+ "gpt-4.1": { "input_per_1k": 0.002, "output_per_1k": 0.008, "cached_input_multiplier": 0.25 },
33
+ "openai:gpt-4.1": { "input_per_1k": 0.002, "output_per_1k": 0.008, "cached_input_multiplier": 0.25 },
34
+
35
+ "gpt-4.1-mini": { "input_per_1k": 0.0004, "output_per_1k": 0.0016, "cached_input_multiplier": 0.25 },
36
+ "openai:gpt-4.1-mini": { "input_per_1k": 0.0004, "output_per_1k": 0.0016, "cached_input_multiplier": 0.25 },
37
+
38
+ "gpt-4.1-nano": { "input_per_1k": 0.0001, "output_per_1k": 0.0004, "cached_input_multiplier": 0.25 },
39
+ "openai:gpt-4.1-nano": { "input_per_1k": 0.0001, "output_per_1k": 0.0004, "cached_input_multiplier": 0.25 },
40
+
41
+ "gpt-4o": { "input_per_1k": 0.0025, "output_per_1k": 0.01, "cached_input_multiplier": 0.5 },
42
+ "openai:gpt-4o": { "input_per_1k": 0.0025, "output_per_1k": 0.01, "cached_input_multiplier": 0.5 },
43
+
44
+ "gpt-4o-2024-05-13": { "input_per_1k": 0.005, "output_per_1k": 0.015, "cached_input_multiplier": 1.0 },
45
+ "openai:gpt-4o-2024-05-13": { "input_per_1k": 0.005, "output_per_1k": 0.015, "cached_input_multiplier": 1.0 },
46
+
47
+ "gpt-4o-mini": { "input_per_1k": 0.00015, "output_per_1k": 0.0006, "cached_input_multiplier": 0.5 },
48
+ "openai:gpt-4o-mini": { "input_per_1k": 0.00015, "output_per_1k": 0.0006, "cached_input_multiplier": 0.5 },
49
+
50
+ "gpt-realtime": { "input_per_1k": 0.004, "output_per_1k": 0.016, "cached_input_multiplier": 0.1 },
51
+ "openai:gpt-realtime": { "input_per_1k": 0.004, "output_per_1k": 0.016, "cached_input_multiplier": 0.1 },
52
+
53
+ "gpt-4o-realtime-preview": { "input_per_1k": 0.005, "output_per_1k": 0.02, "cached_input_multiplier": 0.5 },
54
+ "openai:gpt-4o-realtime-preview": { "input_per_1k": 0.005, "output_per_1k": 0.02, "cached_input_multiplier": 0.5 },
55
+
56
+ "gpt-4o-mini-realtime-preview":{ "input_per_1k": 0.0006, "output_per_1k": 0.0024, "cached_input_multiplier": 0.5 },
57
+ "openai:gpt-4o-mini-realtime-preview": { "input_per_1k": 0.0006, "output_per_1k": 0.0024, "cached_input_multiplier": 0.5 },
58
+
59
+ "gpt-audio": { "input_per_1k": 0.0025, "output_per_1k": 0.01, "cached_input_multiplier": 1.0 },
60
+ "openai:gpt-audio": { "input_per_1k": 0.0025, "output_per_1k": 0.01, "cached_input_multiplier": 1.0 },
61
+
62
+ "gpt-4o-audio-preview": { "input_per_1k": 0.0025, "output_per_1k": 0.01, "cached_input_multiplier": 1.0 },
63
+ "openai:gpt-4o-audio-preview": { "input_per_1k": 0.0025, "output_per_1k": 0.01, "cached_input_multiplier": 1.0 },
64
+
65
+ "gpt-4o-mini-audio-preview": { "input_per_1k": 0.00015, "output_per_1k": 0.0006, "cached_input_multiplier": 1.0 },
66
+ "openai:gpt-4o-mini-audio-preview": { "input_per_1k": 0.00015, "output_per_1k": 0.0006, "cached_input_multiplier": 1.0 },
67
+
68
+ "o1": { "input_per_1k": 0.015, "output_per_1k": 0.06, "cached_input_multiplier": 0.5 },
69
+ "openai:o1": { "input_per_1k": 0.015, "output_per_1k": 0.06, "cached_input_multiplier": 0.5 },
70
+
71
+ "o1-pro": { "input_per_1k": 0.15, "output_per_1k": 0.6, "cached_input_multiplier": 1.0 },
72
+ "openai:o1-pro": { "input_per_1k": 0.15, "output_per_1k": 0.6, "cached_input_multiplier": 1.0 },
73
+
74
+ "o3-pro": { "input_per_1k": 0.02, "output_per_1k": 0.08, "cached_input_multiplier": 1.0 },
75
+ "openai:o3-pro": { "input_per_1k": 0.02, "output_per_1k": 0.08, "cached_input_multiplier": 1.0 },
76
+
77
+ "o3": { "input_per_1k": 0.002, "output_per_1k": 0.008, "cached_input_multiplier": 0.25 },
78
+ "openai:o3": { "input_per_1k": 0.002, "output_per_1k": 0.008, "cached_input_multiplier": 0.25 },
79
+
80
+ "o3-deep-research": { "input_per_1k": 0.01, "output_per_1k": 0.04, "cached_input_multiplier": 0.25 },
81
+ "openai:o3-deep-research": { "input_per_1k": 0.01, "output_per_1k": 0.04, "cached_input_multiplier": 0.25 },
82
+
83
+ "o4-mini": { "input_per_1k": 0.0011, "output_per_1k": 0.0044, "cached_input_multiplier": 0.25 },
84
+ "openai:o4-mini": { "input_per_1k": 0.0011, "output_per_1k": 0.0044, "cached_input_multiplier": 0.25 },
85
+
86
+ "o4-mini-deep-research": { "input_per_1k": 0.002, "output_per_1k": 0.008, "cached_input_multiplier": 0.25 },
87
+ "openai:o4-mini-deep-research":{ "input_per_1k": 0.002, "output_per_1k": 0.008, "cached_input_multiplier": 0.25 },
88
+
89
+ "o3-mini": { "input_per_1k": 0.0011, "output_per_1k": 0.0044, "cached_input_multiplier": 0.5 },
90
+ "openai:o3-mini": { "input_per_1k": 0.0011, "output_per_1k": 0.0044, "cached_input_multiplier": 0.5 },
91
+
92
+ "o1-mini": { "input_per_1k": 0.0011, "output_per_1k": 0.0044, "cached_input_multiplier": 0.5 },
93
+ "openai:o1-mini": { "input_per_1k": 0.0011, "output_per_1k": 0.0044, "cached_input_multiplier": 0.5 },
94
+
95
+ "codex-mini-latest": { "input_per_1k": 0.0015, "output_per_1k": 0.006, "cached_input_multiplier": 0.25 },
96
+ "openai:codex-mini-latest": { "input_per_1k": 0.0015, "output_per_1k": 0.006, "cached_input_multiplier": 0.25 }
97
+ }
@@ -0,0 +1,321 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ from dataclasses import asdict, dataclass
6
+ from decimal import ROUND_HALF_UP, Decimal, getcontext
7
+ from importlib import resources
8
+ from typing import Any, Optional, Tuple
9
+
10
+ getcontext().prec = 28 # robust money math
11
+
12
+
13
+ # ---------- Model pricing schema ----------
14
+
15
+
16
+ @dataclass
17
+ class ModelPricing:
18
+ # Prices are USD per 1,000 tokens
19
+ input_per_1k: Decimal
20
+ output_per_1k: Decimal
21
+ reasoning_per_1k: Optional[Decimal] = (
22
+ None # None --> charge 0 for reasoning tokens
23
+ )
24
+ cached_input_multiplier: Decimal = Decimal(
25
+ "1"
26
+ ) # e.g., 0.25 if your provider discounts cached prompt tokens
27
+
28
+ def price_tokens(self, usage: dict[str, Any]) -> dict[str, Decimal]:
29
+ """Compute cost components from a usage dict with keys like input_tokens, output_tokens, reasoning_tokens, cached_tokens."""
30
+
31
+ def _to_dec(x) -> Decimal:
32
+ if x is None:
33
+ return Decimal("0")
34
+ try:
35
+ return Decimal(str(x))
36
+ except Exception:
37
+ return Decimal("0")
38
+
39
+ in_t = _to_dec(usage.get("input_tokens", usage.get("prompt_tokens", 0)))
40
+ out_t = _to_dec(
41
+ usage.get("output_tokens", usage.get("completion_tokens", 0))
42
+ )
43
+ # total_t = _to_dec(usage.get("total_tokens", (in_t + out_t)))
44
+ cached_t = _to_dec(usage.get("cached_tokens", 0))
45
+ reasoning_t = _to_dec(usage.get("reasoning_tokens", 0))
46
+
47
+ eff_in = (in_t - cached_t) if in_t > cached_t else Decimal("0")
48
+ cached_eff_in = cached_t
49
+
50
+ input_cost = (eff_in / Decimal(1000)) * self.input_per_1k
51
+ cached_input_cost = (
52
+ (cached_eff_in / Decimal(1000))
53
+ * self.input_per_1k
54
+ * self.cached_input_multiplier
55
+ )
56
+ output_cost = (out_t / Decimal(1000)) * self.output_per_1k
57
+ reasoning_cost = Decimal("0")
58
+ if self.reasoning_per_1k is not None and reasoning_t > 0:
59
+ reasoning_cost = (
60
+ reasoning_t / Decimal(1000)
61
+ ) * self.reasoning_per_1k
62
+
63
+ total_cost = (
64
+ input_cost + cached_input_cost + output_cost + reasoning_cost
65
+ )
66
+ return {
67
+ "input_cost": input_cost,
68
+ "cached_input_cost": cached_input_cost,
69
+ "output_cost": output_cost,
70
+ "reasoning_cost": reasoning_cost,
71
+ "total_cost": total_cost,
72
+ }
73
+
74
+
75
+ # ---------- Registry & resolution ----------
76
+
77
+
78
+ def _dec(x: str | float | int) -> Decimal:
79
+ try:
80
+ return Decimal(str(x))
81
+ except Exception:
82
+ return Decimal("0")
83
+
84
+
85
+ # DEFAULTS: keep $0.00 so you don’t accidentally attribute costs.
86
+ # Fill in values for your org as needed (USD per 1K tokens).
87
+ DEFAULT_REGISTRY: dict[str, ModelPricing] = {
88
+ # Examples — edit to match your negotiated prices:
89
+ # "openai:gpt-5-mini": ModelPricing(_dec("5.00"), _dec("15.00")),
90
+ # "openai:gpt-5-mini": ModelPricing(_dec("2.50"), _dec("10.00"), reasoning_per_1k=_dec("5.00")),
91
+ "openai:gpt-5-mini": ModelPricing(
92
+ _dec("0.00"), _dec("0.00"), reasoning_per_1k=_dec("0.00")
93
+ ),
94
+ "local/*": ModelPricing(_dec("0.00"), _dec("0.00")),
95
+ }
96
+
97
+
98
+ def normalize_model_name(name: str) -> str:
99
+ return (name or "").strip().lower()
100
+
101
+
102
+ def resolve_model_name(event: dict[str, Any]) -> str:
103
+ m = (
104
+ ((event.get("metadata") or {}).get("model"))
105
+ or ((event.get("metadata") or {}).get("ls_model_name"))
106
+ or (event.get("name") or "").replace("llm:", "")
107
+ )
108
+ return normalize_model_name(m)
109
+
110
+
111
+ def find_pricing(
112
+ model: str, registry: dict[str, ModelPricing]
113
+ ) -> Optional[ModelPricing]:
114
+ if model in registry:
115
+ return registry[model]
116
+ # simple wildcard support like "local/*"
117
+ for key, mp in registry.items():
118
+ if key.endswith("/*") and model.startswith(key[:-2]):
119
+ return mp
120
+ # try provider/model normalization like "openai-o3" → "openai:gpt-5-mini"
121
+ model2 = model.replace("-", "/")
122
+ if model2 in registry:
123
+ return registry[model2]
124
+ return None
125
+
126
+
127
+ def default_registry_path() -> str:
128
+ """Path to pricing file shipped with this package"""
129
+ path = resources.files("ursa") / "observability" / "pricing.json"
130
+ return str(path)
131
+
132
+
133
+ def load_registry(
134
+ path: Optional[str] = None,
135
+ overrides: Optional[dict[str, Any]] = None,
136
+ use_default_if_missing: bool = True,
137
+ ) -> dict[str, ModelPricing]:
138
+ """
139
+ Load pricing registry from:
140
+ 1) explicit `path` (if provided), else
141
+ 2) $URSA_PRICING_JSON (if set), else
142
+ 3) pricing.json next to pricing.py (if present, and use_default_if_missing)
143
+ 4) fall back to DEFAULT_REGISTRY
144
+ """
145
+ reg: dict[str, ModelPricing] = dict(DEFAULT_REGISTRY)
146
+
147
+ # 1) explicit path from caller wins
148
+ candidate = path
149
+
150
+ # 2) else env var
151
+ if not candidate:
152
+ env_path = os.environ.get("URSA_PRICING_JSON")
153
+ if env_path:
154
+ candidate = env_path
155
+
156
+ # 3) else module-local pricing.json
157
+ if not candidate and use_default_if_missing:
158
+ local_path = default_registry_path()
159
+ if os.path.exists(local_path):
160
+ candidate = local_path
161
+
162
+ # Load if we have a candidate
163
+ if candidate and os.path.exists(candidate):
164
+ with open(candidate, "r", encoding="utf-8") as f:
165
+ data = json.load(f)
166
+ for k, v in (data or {}).items():
167
+ # Ignore non-model notes like "_note"
168
+ if not isinstance(v, dict) or (
169
+ "input_per_1k" not in v and "output_per_1k" not in v
170
+ ):
171
+ continue
172
+ reg[normalize_model_name(k)] = ModelPricing(
173
+ _dec(v.get("input_per_1k", 0)),
174
+ _dec(v.get("output_per_1k", 0)),
175
+ _dec(v["reasoning_per_1k"])
176
+ if v.get("reasoning_per_1k") is not None
177
+ else None,
178
+ _dec(v.get("cached_input_multiplier", 1)),
179
+ )
180
+
181
+ # Apply programmatic overrides last
182
+ if overrides:
183
+ for k, v in overrides.items():
184
+ reg[normalize_model_name(k)] = ModelPricing(
185
+ _dec(v.get("input_per_1k", 0)),
186
+ _dec(v.get("output_per_1k", 0)),
187
+ _dec(v["reasoning_per_1k"])
188
+ if v.get("reasoning_per_1k") is not None
189
+ else None,
190
+ _dec(v.get("cached_input_multiplier", 1)),
191
+ )
192
+
193
+ return reg
194
+
195
+
196
+ # ---------- Core pricing application ----------
197
+
198
+
199
+ def _has_provider_cost(roll: dict[str, Any]) -> bool:
200
+ # Treat nonzero provider totals as authoritative
201
+ try:
202
+ return any([
203
+ float(roll.get("total_cost", 0) or 0) > 0,
204
+ float(roll.get("input_cost", 0) or 0) > 0,
205
+ float(roll.get("output_cost", 0) or 0) > 0,
206
+ ])
207
+ except Exception:
208
+ return False
209
+
210
+
211
+ def _round_money(x: Decimal) -> float:
212
+ return float(x.quantize(Decimal("0.000001"), rounding=ROUND_HALF_UP))
213
+
214
+
215
+ def price_event(
216
+ event: dict[str, Any],
217
+ registry: dict[str, ModelPricing],
218
+ overwrite: bool = False,
219
+ ) -> Tuple[dict[str, Any], Optional[Decimal], str]:
220
+ """
221
+ Returns (event, total_cost_decimal_or_None, cost_source)
222
+ cost_source ∈ {"provider", "computed", "no_usage", "no_pricing"}
223
+ """
224
+ metrics = event.get("metrics") or {}
225
+ roll = metrics.get("usage_rollup") or {}
226
+ if not roll:
227
+ return (event, None, "no_usage")
228
+
229
+ if _has_provider_cost(roll) and not overwrite:
230
+ # Respect provider-reported cost
231
+ return (event, Decimal(str(roll.get("total_cost", 0) or 0)), "provider")
232
+
233
+ model = resolve_model_name(event)
234
+ mp = find_pricing(model, registry)
235
+ if not mp:
236
+ return (event, None, "no_pricing")
237
+
238
+ # Compute costs from tokens
239
+ costs = mp.price_tokens(roll)
240
+
241
+ # Populate rollup fields (only fill or overwrite if asked)
242
+ roll = dict(roll) # copy to avoid mutating caller unexpectedly
243
+ for key in ("input_cost", "output_cost", "total_cost"):
244
+ if overwrite or not roll.get(key):
245
+ roll[key] = _round_money(costs[key])
246
+ # Optional: attach granular breakdown so you can inspect later
247
+ metrics["cost_details"] = {
248
+ "source": "computed",
249
+ "model_resolved": model,
250
+ "pricing_used": asdict(mp),
251
+ "components_usd": {k: _round_money(v) for k, v in costs.items()},
252
+ }
253
+ metrics["cost_source"] = "computed"
254
+ event["metrics"] = metrics
255
+ event["metrics"]["usage_rollup"] = roll
256
+ return (event, costs["total_cost"], "computed")
257
+
258
+
259
+ def price_payload(
260
+ payload: dict[str, Any],
261
+ registry: Optional[dict[str, ModelPricing]] = None,
262
+ overwrite: bool = False,
263
+ ) -> dict[str, Any]:
264
+ """
265
+ Enriches payload in-place with computed costs where missing.
266
+ Adds a `costs` block with totals and by-model aggregation.
267
+ """
268
+ reg = registry or load_registry()
269
+ llm_events = payload.get("llm_events") or []
270
+ total = Decimal("0")
271
+ by_model: dict[str, Decimal] = {}
272
+ sources = {"provider": 0, "computed": 0, "no_usage": 0, "no_pricing": 0}
273
+
274
+ for ev in llm_events:
275
+ ev2, cost_dec, src = price_event(ev, reg, overwrite=overwrite)
276
+ sources[src] = sources.get(src, 0) + 1
277
+ model = resolve_model_name(ev2)
278
+ if cost_dec is not None:
279
+ total += cost_dec
280
+ by_model[model] = by_model.get(model, Decimal("0")) + cost_dec
281
+
282
+ payload.setdefault("costs", {})
283
+ payload["costs"]["total_usd"] = _round_money(total)
284
+ payload["costs"]["by_model_usd"] = {
285
+ k: _round_money(v) for k, v in by_model.items()
286
+ }
287
+ payload["costs"]["event_sources"] = sources
288
+ payload["costs"]["registry_note"] = (
289
+ "Edit pricing via DEFAULT_REGISTRY, pricing.json, or overrides."
290
+ )
291
+
292
+ return payload
293
+
294
+
295
+ # ---------- Convenience file I/O ----------
296
+
297
+
298
+ def price_file(
299
+ in_path: str,
300
+ out_path: Optional[str] = None,
301
+ registry_path: Optional[str] = None,
302
+ overwrite: bool = False,
303
+ ) -> str:
304
+ """
305
+ Reads a metrics JSON file (from timing.py), enriches with costs, writes result.
306
+ If out_path is None, writes alongside input as '<name>.priced.json'.
307
+ Returns output path.
308
+ """
309
+ with open(in_path, "r", encoding="utf-8") as f:
310
+ payload = json.load(f)
311
+
312
+ reg = load_registry(path=registry_path)
313
+ payload = price_payload(payload, registry=reg, overwrite=overwrite)
314
+
315
+ if not out_path:
316
+ base, ext = os.path.splitext(in_path)
317
+ out_path = f"{base}.priced.json"
318
+
319
+ with open(out_path, "w", encoding="utf-8") as f:
320
+ json.dump(payload, f, ensure_ascii=False, indent=2)
321
+ return out_path