pyspiral 0.4.0__pp310-pypy310_pp73-macosx_10_12_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. pyspiral-0.4.0.dist-info/METADATA +46 -0
  2. pyspiral-0.4.0.dist-info/RECORD +98 -0
  3. pyspiral-0.4.0.dist-info/WHEEL +4 -0
  4. pyspiral-0.4.0.dist-info/entry_points.txt +2 -0
  5. spiral/__init__.py +10 -0
  6. spiral/_lib.pypy310-pp73-darwin.so +0 -0
  7. spiral/adbc.py +393 -0
  8. spiral/api/__init__.py +64 -0
  9. spiral/api/admin.py +15 -0
  10. spiral/api/client.py +160 -0
  11. spiral/api/filesystems.py +153 -0
  12. spiral/api/organizations.py +77 -0
  13. spiral/api/projects.py +197 -0
  14. spiral/api/telemetry.py +19 -0
  15. spiral/api/types.py +20 -0
  16. spiral/api/workloads.py +52 -0
  17. spiral/arrow_.py +221 -0
  18. spiral/cli/__init__.py +79 -0
  19. spiral/cli/__main__.py +4 -0
  20. spiral/cli/admin.py +16 -0
  21. spiral/cli/app.py +65 -0
  22. spiral/cli/console.py +95 -0
  23. spiral/cli/fs.py +112 -0
  24. spiral/cli/iceberg/__init__.py +7 -0
  25. spiral/cli/iceberg/namespaces.py +47 -0
  26. spiral/cli/iceberg/tables.py +60 -0
  27. spiral/cli/indexes/__init__.py +19 -0
  28. spiral/cli/login.py +22 -0
  29. spiral/cli/orgs.py +90 -0
  30. spiral/cli/printer.py +53 -0
  31. spiral/cli/projects.py +136 -0
  32. spiral/cli/state.py +5 -0
  33. spiral/cli/tables/__init__.py +121 -0
  34. spiral/cli/telemetry.py +18 -0
  35. spiral/cli/types.py +51 -0
  36. spiral/cli/workloads.py +59 -0
  37. spiral/client.py +79 -0
  38. spiral/core/__init__.pyi +0 -0
  39. spiral/core/client/__init__.pyi +117 -0
  40. spiral/core/index/__init__.pyi +15 -0
  41. spiral/core/table/__init__.pyi +108 -0
  42. spiral/core/table/manifests/__init__.pyi +35 -0
  43. spiral/core/table/metastore/__init__.pyi +62 -0
  44. spiral/core/table/spec/__init__.pyi +214 -0
  45. spiral/datetime_.py +27 -0
  46. spiral/expressions/__init__.py +245 -0
  47. spiral/expressions/base.py +149 -0
  48. spiral/expressions/http.py +86 -0
  49. spiral/expressions/io.py +100 -0
  50. spiral/expressions/list_.py +68 -0
  51. spiral/expressions/mp4.py +62 -0
  52. spiral/expressions/png.py +18 -0
  53. spiral/expressions/qoi.py +18 -0
  54. spiral/expressions/refs.py +58 -0
  55. spiral/expressions/str_.py +39 -0
  56. spiral/expressions/struct.py +59 -0
  57. spiral/expressions/text.py +62 -0
  58. spiral/expressions/tiff.py +223 -0
  59. spiral/expressions/udf.py +46 -0
  60. spiral/grpc_.py +32 -0
  61. spiral/iceberg/__init__.py +3 -0
  62. spiral/iceberg/client.py +33 -0
  63. spiral/indexes/__init__.py +5 -0
  64. spiral/indexes/client.py +137 -0
  65. spiral/indexes/index.py +34 -0
  66. spiral/indexes/scan.py +22 -0
  67. spiral/project.py +46 -0
  68. spiral/protogen/_/__init__.py +0 -0
  69. spiral/protogen/_/arrow/__init__.py +0 -0
  70. spiral/protogen/_/arrow/flight/__init__.py +0 -0
  71. spiral/protogen/_/arrow/flight/protocol/__init__.py +0 -0
  72. spiral/protogen/_/arrow/flight/protocol/sql/__init__.py +1990 -0
  73. spiral/protogen/_/scandal/__init__.py +178 -0
  74. spiral/protogen/_/spiral/__init__.py +0 -0
  75. spiral/protogen/_/spiral/table/__init__.py +22 -0
  76. spiral/protogen/_/substrait/__init__.py +3399 -0
  77. spiral/protogen/_/substrait/extensions/__init__.py +115 -0
  78. spiral/protogen/__init__.py +0 -0
  79. spiral/protogen/substrait/__init__.py +3399 -0
  80. spiral/protogen/substrait/extensions/__init__.py +115 -0
  81. spiral/protogen/util.py +41 -0
  82. spiral/py.typed +0 -0
  83. spiral/server.py +17 -0
  84. spiral/settings.py +101 -0
  85. spiral/substrait_.py +279 -0
  86. spiral/tables/__init__.py +12 -0
  87. spiral/tables/client.py +130 -0
  88. spiral/tables/dataset.py +250 -0
  89. spiral/tables/debug/__init__.py +0 -0
  90. spiral/tables/debug/manifests.py +70 -0
  91. spiral/tables/debug/metrics.py +56 -0
  92. spiral/tables/debug/scan.py +248 -0
  93. spiral/tables/maintenance.py +12 -0
  94. spiral/tables/scan.py +193 -0
  95. spiral/tables/snapshot.py +78 -0
  96. spiral/tables/table.py +157 -0
  97. spiral/tables/transaction.py +52 -0
  98. spiral/types_.py +6 -0
@@ -0,0 +1,130 @@
1
+ from datetime import datetime
2
+ from typing import Any
3
+
4
+ import pyarrow as pa
5
+
6
+ from spiral.api import SpiralAPI
7
+ from spiral.api.projects import TableResource
8
+ from spiral.core.client import Spiral as CoreSpiral
9
+ from spiral.core.table.spec import Schema
10
+ from spiral.datetime_ import timestamp_micros
11
+ from spiral.expressions import ExprLike
12
+ from spiral.tables.scan import Scan
13
+ from spiral.tables.table import Table
14
+ from spiral.types_ import Uri
15
+
16
+
17
+ class Tables:
18
+ """
19
+ Spiral Tables a powerful and flexible way for storing, analyzing,
20
+ and querying massive and/or multimodal datasets.
21
+
22
+ The data model will feel familiar to users of SQL- or DataFrame-style systems,
23
+ yet is designed to be more flexible, more powerful, and more useful in the context
24
+ of modern data processing. Tables are stored and queried directly from object storage.
25
+ """
26
+
27
+ def __init__(self, api: SpiralAPI, spiral: CoreSpiral, *, project_id: str | None = None):
28
+ self._api = api
29
+ self._spiral = spiral
30
+ self._project_id = project_id
31
+
32
+ def table(self, identifier: str) -> Table:
33
+ """Open a table with a `dataset.table` identifier, or `table` name using the `default` dataset."""
34
+ project_id, dataset, table = self._parse_identifier(identifier)
35
+ if project_id is None:
36
+ raise ValueError("Must provide a fully qualified table identifier.")
37
+
38
+ res = list(self._api.project.list_tables(project_id, dataset=dataset, table=table))
39
+ if len(res) == 0:
40
+ raise ValueError(f"Table not found: {project_id}.{dataset}.{table}")
41
+
42
+ res = res[0]
43
+ return Table(self, self._spiral.get_table(res.id), identifier=f"{res.project_id}.{res.dataset}.{res.table}")
44
+
45
+ def list_tables(self) -> list[TableResource]:
46
+ project_id = self._project_id
47
+ if project_id is None:
48
+ raise ValueError("Must provide a project ID to list tables.")
49
+ return list(self._api.project.list_tables(project_id))
50
+
51
+ def create_table(
52
+ self,
53
+ identifier: str,
54
+ *,
55
+ key_schema: pa.Schema | Any,
56
+ root_uri: Uri | None = None,
57
+ exist_ok: bool = False,
58
+ ) -> Table:
59
+ """Create a new table in the project.
60
+
61
+ Args:
62
+ identifier: The table identifier, in the form `project.dataset.table`, `dataset.table` or `table`.
63
+ key_schema: The schema of the table's keys.
64
+ root_uri: The root URI for the table.
65
+ exist_ok: If True, do not raise an error if the table already exists.
66
+ """
67
+ project_id, dataset, table = self._parse_identifier(identifier)
68
+ if project_id is None:
69
+ raise ValueError("Must provide a fully qualified table identifier.")
70
+
71
+ if not isinstance(key_schema, pa.Schema):
72
+ key_schema = pa.schema(key_schema)
73
+ key_schema = Schema.from_arrow(key_schema)
74
+
75
+ core_table = self._spiral.create_table(
76
+ project_id,
77
+ dataset=dataset,
78
+ table=table,
79
+ key_schema=key_schema,
80
+ root_uri=root_uri,
81
+ exist_ok=exist_ok,
82
+ )
83
+
84
+ return Table(self, core_table, identifier=f"{project_id}.{dataset}.{table}")
85
+
86
+ def _parse_identifier(self, identifier: str) -> tuple[str | None, str, str]:
87
+ parts = identifier.split(".")
88
+ if len(parts) == 1:
89
+ return self._project_id, "default", parts[0]
90
+ elif len(parts) == 2:
91
+ return self._project_id, parts[0], parts[1]
92
+ elif len(parts) == 3:
93
+ return parts[0], parts[1], parts[2]
94
+ else:
95
+ raise ValueError(f"Invalid table identifier: {identifier}")
96
+
97
+ def scan(
98
+ self,
99
+ *projections: ExprLike,
100
+ where: ExprLike | None = None,
101
+ asof: datetime | int | None = None,
102
+ exclude_keys: bool = False,
103
+ ) -> Scan:
104
+ """Starts a read transaction on the Spiral.
105
+
106
+ Args:
107
+ projections: a set of expressions that return struct arrays.
108
+ where: a query expression to apply to the data.
109
+ asof: only data written before the given timestamp will be returned, caveats around compaction.
110
+ exclude_keys: whether to exclude the key columns in the scan result, defaults to False.
111
+ Note that if a projection includes a key column, it will be included in the result.
112
+ """
113
+ from spiral import expressions as se
114
+
115
+ if isinstance(asof, datetime):
116
+ asof = timestamp_micros(asof)
117
+
118
+ # Combine all projections into a single struct.
119
+ projection = se.merge(*projections)
120
+ if where is not None:
121
+ where = se.lift(where)
122
+
123
+ return Scan(
124
+ self._spiral.open_table_scan(
125
+ projection.__expr__,
126
+ filter=where.__expr__ if where else None,
127
+ asof=asof,
128
+ exclude_keys=exclude_keys,
129
+ ),
130
+ )
@@ -0,0 +1,250 @@
1
+ from typing import Any
2
+
3
+ import pyarrow as pa
4
+ import pyarrow.compute as pc
5
+ import pyarrow.dataset as ds
6
+
7
+ from spiral.tables import Scan, Snapshot
8
+
9
+
10
+ class TableDataset(ds.Dataset):
11
+ def __init__(self, snapshot: Snapshot):
12
+ self._snapshot = snapshot
13
+ self._table = snapshot.table
14
+ self._schema: pa.Schema = self._snapshot._snapshot.table.get_schema(asof=self._snapshot.asof).to_arrow()
15
+
16
+ # We don't actually initialize a Dataset, we just implement enough of the API
17
+ # to fool both DuckDB and Polars.
18
+ # super().__init__()
19
+ self._last_scan = None
20
+
21
+ @property
22
+ def schema(self) -> pa.Schema:
23
+ return self._schema
24
+
25
+ def count_rows(
26
+ self,
27
+ filter: pc.Expression | None = None,
28
+ batch_size: int | None = None,
29
+ batch_readahead: int | None = None,
30
+ fragment_readahead: int | None = None,
31
+ fragment_scan_options: ds.FragmentScanOptions | None = None,
32
+ use_threads: bool = True,
33
+ memory_pool: pa.MemoryPool = None,
34
+ ):
35
+ return self.scanner(
36
+ None,
37
+ filter,
38
+ batch_size,
39
+ batch_readahead,
40
+ fragment_readahead,
41
+ fragment_scan_options,
42
+ use_threads,
43
+ memory_pool,
44
+ ).count_rows()
45
+
46
+ def filter(self, expression: pc.Expression) -> "TableDataset":
47
+ raise NotImplementedError("filter not implemented")
48
+
49
+ def get_fragments(self, filter: pc.Expression | None = None):
50
+ """TODO(ngates): perhaps we should return ranges as per our split API?"""
51
+ raise NotImplementedError("get_fragments not implemented")
52
+
53
+ def head(
54
+ self,
55
+ num_rows: int,
56
+ columns: list[str] | None = None,
57
+ filter: pc.Expression | None = None,
58
+ batch_size: int | None = None,
59
+ batch_readahead: int | None = None,
60
+ fragment_readahead: int | None = None,
61
+ fragment_scan_options: ds.FragmentScanOptions | None = None,
62
+ use_threads: bool = True,
63
+ memory_pool: pa.MemoryPool = None,
64
+ ):
65
+ return self.scanner(
66
+ columns,
67
+ filter,
68
+ batch_size,
69
+ batch_readahead,
70
+ fragment_readahead,
71
+ fragment_scan_options,
72
+ use_threads,
73
+ memory_pool,
74
+ ).head(num_rows)
75
+
76
+ def join(
77
+ self,
78
+ right_dataset,
79
+ keys,
80
+ right_keys=None,
81
+ join_type=None,
82
+ left_suffix=None,
83
+ right_suffix=None,
84
+ coalesce_keys=True,
85
+ use_threads=True,
86
+ ):
87
+ raise NotImplementedError("join not implemented")
88
+
89
+ def join_asof(self, right_dataset, on, by, tolerance, right_on=None, right_by=None):
90
+ raise NotImplementedError("join_asof not implemented")
91
+
92
+ def replace_schema(self, schema: pa.Schema) -> "TableDataset":
93
+ raise NotImplementedError("replace_schema not implemented")
94
+
95
+ def scanner(
96
+ self,
97
+ columns: list[str] | None = None,
98
+ filter: pc.Expression | None = None,
99
+ batch_size: int | None = None,
100
+ batch_readahead: int | None = None,
101
+ fragment_readahead: int | None = None,
102
+ fragment_scan_options: ds.FragmentScanOptions | None = None,
103
+ use_threads: bool = True,
104
+ memory_pool: pa.MemoryPool = None,
105
+ ) -> "TableScanner":
106
+ from spiral.substrait_ import SubstraitConverter
107
+
108
+ # Extract the substrait expression so we can convert it to a Spiral expression
109
+ if filter is not None:
110
+ filter = SubstraitConverter(self._table, self._schema, self._table.key_schema.to_arrow()).convert(
111
+ filter.to_substrait(self._schema, allow_arrow_extensions=True),
112
+ )
113
+
114
+ scan = (
115
+ self._snapshot.scan(
116
+ {c: self._table[c] for c in columns},
117
+ where=filter,
118
+ exclude_keys=True,
119
+ )
120
+ if columns
121
+ else self._snapshot.scan(where=filter)
122
+ )
123
+ self._last_scan = scan
124
+
125
+ return TableScanner(scan)
126
+
127
+ def sort_by(self, sorting, **kwargs):
128
+ raise NotImplementedError("sort_by not implemented")
129
+
130
+ def take(
131
+ self,
132
+ indices: pa.Array | Any,
133
+ columns: list[str] | None = None,
134
+ filter: pc.Expression | None = None,
135
+ batch_size: int | None = None,
136
+ batch_readahead: int | None = None,
137
+ fragment_readahead: int | None = None,
138
+ fragment_scan_options: ds.FragmentScanOptions | None = None,
139
+ use_threads: bool = True,
140
+ memory_pool: pa.MemoryPool = None,
141
+ ):
142
+ return self.scanner(
143
+ columns,
144
+ filter,
145
+ batch_size,
146
+ batch_readahead,
147
+ fragment_readahead,
148
+ fragment_scan_options,
149
+ use_threads,
150
+ memory_pool,
151
+ ).take(indices)
152
+
153
+ def to_batches(
154
+ self,
155
+ columns: list[str] | None = None,
156
+ filter: pc.Expression | None = None,
157
+ batch_size: int | None = None,
158
+ batch_readahead: int | None = None,
159
+ fragment_readahead: int | None = None,
160
+ fragment_scan_options: ds.FragmentScanOptions | None = None,
161
+ use_threads: bool = True,
162
+ memory_pool: pa.MemoryPool = None,
163
+ ):
164
+ return self.scanner(
165
+ columns,
166
+ filter,
167
+ batch_size,
168
+ batch_readahead,
169
+ fragment_readahead,
170
+ fragment_scan_options,
171
+ use_threads,
172
+ memory_pool,
173
+ ).to_batches()
174
+
175
+ def to_table(
176
+ self,
177
+ columns=None,
178
+ filter: pc.Expression | None = None,
179
+ batch_size: int | None = None,
180
+ batch_readahead: int | None = None,
181
+ fragment_readahead: int | None = None,
182
+ fragment_scan_options: ds.FragmentScanOptions | None = None,
183
+ use_threads: bool = True,
184
+ memory_pool: pa.MemoryPool = None,
185
+ ):
186
+ return self.scanner(
187
+ columns,
188
+ filter,
189
+ batch_size,
190
+ batch_readahead,
191
+ fragment_readahead,
192
+ fragment_scan_options,
193
+ use_threads,
194
+ memory_pool,
195
+ ).to_table()
196
+
197
+
198
+ class TableScanner(ds.Scanner):
199
+ """A PyArrow Dataset Scanner that reads from a Spiral Table."""
200
+
201
+ def __init__(
202
+ self,
203
+ scan: Scan,
204
+ key_table: pa.Table | pa.RecordBatchReader | None = None,
205
+ ):
206
+ self._scan = scan
207
+ self._schema = scan.schema
208
+ self.key_table = key_table
209
+
210
+ # We don't actually initialize a Dataset, we just implement enough of the API
211
+ # to fool both DuckDB and Polars.
212
+ # super().__init__()
213
+
214
+ @property
215
+ def schema(self):
216
+ return self._schema
217
+
218
+ def count_rows(self):
219
+ # TODO(ngates): is there a faster way to count rows?
220
+ return sum(len(batch) for batch in self.to_reader())
221
+
222
+ def head(self, num_rows: int):
223
+ """Return the first `num_rows` rows of the dataset."""
224
+ reader = self.to_reader()
225
+ batches = []
226
+ row_count = 0
227
+ for batch in reader:
228
+ if row_count + len(batch) > num_rows:
229
+ batches.append(batch.slice(0, num_rows - row_count))
230
+ break
231
+ row_count += len(batch)
232
+ batches.append(batch)
233
+ return pa.Table.from_batches(batches, schema=reader.schema)
234
+
235
+ def scan_batches(self):
236
+ raise NotImplementedError("scan_batches not implemented")
237
+
238
+ def take(self, indices):
239
+ # TODO(ngates): can we defer take until after we've constructed the scan?
240
+ # Or should this we delay constructing the Spiral Table.scan?
241
+ raise NotImplementedError("take not implemented")
242
+
243
+ def to_batches(self):
244
+ return self.to_reader()
245
+
246
+ def to_reader(self):
247
+ return self._scan.to_record_batches(key_table=self.key_table)
248
+
249
+ def to_table(self):
250
+ return self.to_reader().read_all()
File without changes
@@ -0,0 +1,70 @@
1
+ from spiral import datetime_
2
+ from spiral.core.table import TableScan
3
+ from spiral.core.table.manifests import FragmentManifest
4
+ from spiral.tables.debug.metrics import _format_bytes
5
+
6
+
7
+ def display_manifests(scan: TableScan):
8
+ """Display all manifests in a scan."""
9
+ if len(scan.table_ids()) != 1:
10
+ raise NotImplementedError("Multiple table scans are not supported.")
11
+ table_id = scan.table_ids()[0]
12
+
13
+ key_space_manifest: FragmentManifest = scan.key_space_scan(table_id).manifest
14
+ _table_of_fragments(
15
+ key_space_manifest,
16
+ title="Key Space manifest",
17
+ )
18
+
19
+ for column_group in scan.column_groups():
20
+ column_group_manifest: FragmentManifest = scan.column_group_scan(column_group).manifest
21
+ _table_of_fragments(
22
+ column_group_manifest,
23
+ title=f"Column Group manifest for {str(column_group)}",
24
+ )
25
+
26
+
27
+ def _table_of_fragments(manifest: FragmentManifest, title: str):
28
+ """Display fragments in a formatted table."""
29
+ # Calculate summary statistics
30
+ total_size = sum(fragment.size_bytes for fragment in manifest)
31
+ total_metadata_size = sum(len(fragment.format_metadata or b"") for fragment in manifest)
32
+ fragment_count = len(manifest)
33
+ avg_size = total_size / fragment_count if fragment_count > 0 else 0
34
+
35
+ # Print title and summary
36
+ print(f"\n\n{title}")
37
+ print(
38
+ f"{fragment_count} fragments, "
39
+ f"total: {_format_bytes(total_size)}, "
40
+ f"avg: {_format_bytes(int(avg_size))}, "
41
+ f"metadata: {_format_bytes(total_metadata_size)}"
42
+ )
43
+ print("=" * 120)
44
+
45
+ # Print header
46
+ print(
47
+ f"{'ID':<30} {'Size (Metadata)':<20} {'Format':<10} {'Key Span':<10} "
48
+ f"{'Level':<5} {'Committed At':<20} {'Compacted At':<20}"
49
+ )
50
+ print("=" * 120)
51
+
52
+ # Print each fragment
53
+ for fragment in manifest:
54
+ committed_str = str(datetime_.from_timestamp_micros(fragment.committed_at)) if fragment.committed_at else "N/A"
55
+ compacted_str = str(datetime_.from_timestamp_micros(fragment.compacted_at)) if fragment.compacted_at else "N/A"
56
+
57
+ size_with_metadata = (
58
+ f"{_format_bytes(fragment.size_bytes)} ({_format_bytes(len(fragment.format_metadata or b''))})"
59
+ )
60
+ key_span = f"{fragment.key_span.begin}..{fragment.key_span.end}"
61
+
62
+ print(
63
+ f"{fragment.id:<30} "
64
+ f"{size_with_metadata:<20} "
65
+ f"{str(fragment.format):<10} "
66
+ f"{key_span:<10} "
67
+ f"{str(fragment.level):<5} "
68
+ f"{committed_str:<20} "
69
+ f"{compacted_str:<20}"
70
+ )
@@ -0,0 +1,56 @@
1
+ from typing import Any
2
+
3
+
4
+ def display_metrics(metrics: dict[str, Any]) -> None:
5
+ """Display metrics in a formatted table."""
6
+ print(
7
+ f"{'Metric':<40} {'Type':<10} {'Count':<8} {'Avg':<12} {'Min':<12} "
8
+ f"{'Max':<12} {'P95':<12} {'P99':<12} {'StdDev':<12}"
9
+ )
10
+ print("=" * 140)
11
+
12
+ for metric_name, data in sorted(metrics.items()):
13
+ metric_type = data["type"]
14
+ count = data["count"]
15
+ avg = _format_value(data["avg"], metric_type, metric_name)
16
+ min_val = _format_value(data["min"], metric_type, metric_name)
17
+ max_val = _format_value(data["max"], metric_type, metric_name)
18
+ p95 = _format_value(data["p95"], metric_type, metric_name)
19
+ p99 = _format_value(data["p99"], metric_type, metric_name)
20
+ stddev = _format_value(data["stddev"], metric_type, metric_name)
21
+
22
+ print(
23
+ f"{metric_name:<40} {metric_type:<10} {count:<8} {avg:<12} {min_val:<12} "
24
+ f"{max_val:<12} {p95:<12} {p99:<12} {stddev:<12}"
25
+ )
26
+
27
+
28
+ def _format_duration(nanoseconds: float) -> str:
29
+ """Convert nanoseconds to human-readable duration."""
30
+ if nanoseconds >= 1_000_000_000:
31
+ return f"{nanoseconds / 1_000_000_000:.2f}s"
32
+ elif nanoseconds >= 1_000_000:
33
+ return f"{nanoseconds / 1_000_000:.2f}ms"
34
+ elif nanoseconds >= 1_000:
35
+ return f"{nanoseconds / 1_000:.2f}μs"
36
+ else:
37
+ return f"{nanoseconds:.0f}ns"
38
+
39
+
40
+ def _format_bytes(bytes_value: float) -> str:
41
+ """Convert bytes to human-readable size."""
42
+ for unit in ["B", "KB", "MB", "GB"]:
43
+ if bytes_value < 1024:
44
+ return f"{bytes_value:.1f}{unit}"
45
+ bytes_value /= 1024
46
+ return f"{bytes_value:.1f}TB"
47
+
48
+
49
+ def _format_value(value: float, metric_type: str, metric_name: str) -> str:
50
+ """Format a value based on metric type and name."""
51
+ if metric_type == "timer" or "duration" in metric_name:
52
+ return _format_duration(value)
53
+ elif "bytes" in metric_name:
54
+ return _format_bytes(value)
55
+ else:
56
+ return f"{value:,.0f}"