oxarchive 0.5.4__tar.gz → 0.6.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: oxarchive
3
- Version: 0.5.4
3
+ Version: 0.6.1
4
4
  Summary: Official Python SDK for 0xarchive - Hyperliquid Historical Data API
5
5
  Project-URL: Homepage, https://0xarchive.io
6
6
  Project-URL: Documentation, https://0xarchive.io/docs/sdks
@@ -406,6 +406,66 @@ candles = await client.hyperliquid.candles.ahistory("BTC", start=..., end=..., i
406
406
  | `1d` | 1 day |
407
407
  | `1w` | 1 week |
408
408
 
409
+ ### Data Quality Monitoring
410
+
411
+ Monitor data coverage, incidents, latency, and SLA compliance across all exchanges.
412
+
413
+ ```python
414
+ # Get overall system health status
415
+ status = client.data_quality.status()
416
+ print(f"System status: {status.status}")
417
+ for exchange, info in status.exchanges.items():
418
+ print(f" {exchange}: {info.status}")
419
+
420
+ # Get data coverage summary for all exchanges
421
+ coverage = client.data_quality.coverage()
422
+ for exchange in coverage.exchanges:
423
+ print(f"{exchange.exchange}:")
424
+ for dtype, info in exchange.data_types.items():
425
+ print(f" {dtype}: {info.total_records:,} records, {info.completeness}% complete")
426
+
427
+ # Get symbol-specific coverage with gap detection
428
+ btc = client.data_quality.symbol_coverage("hyperliquid", "BTC")
429
+ oi = btc.data_types["open_interest"]
430
+ print(f"BTC OI completeness: {oi.completeness}%")
431
+ print(f"Gaps found: {len(oi.gaps)}")
432
+ for gap in oi.gaps[:5]:
433
+ print(f" {gap.duration_minutes} min gap: {gap.start} -> {gap.end}")
434
+
435
+ # List incidents with filtering
436
+ result = client.data_quality.list_incidents(status="open")
437
+ for incident in result.incidents:
438
+ print(f"[{incident.severity}] {incident.title}")
439
+
440
+ # Get latency metrics
441
+ latency = client.data_quality.latency()
442
+ for exchange, metrics in latency.exchanges.items():
443
+ print(f"{exchange}: OB lag {metrics.data_freshness.orderbook_lag_ms}ms")
444
+
445
+ # Get SLA compliance metrics for a specific month
446
+ sla = client.data_quality.sla(year=2026, month=1)
447
+ print(f"Period: {sla.period}")
448
+ print(f"Uptime: {sla.actual.uptime}% ({sla.actual.uptime_status})")
449
+ print(f"API P99: {sla.actual.api_latency_p99_ms}ms ({sla.actual.latency_status})")
450
+
451
+ # Async versions available for all methods
452
+ status = await client.data_quality.astatus()
453
+ coverage = await client.data_quality.acoverage()
454
+ ```
455
+
456
+ #### Data Quality Endpoints
457
+
458
+ | Method | Description |
459
+ |--------|-------------|
460
+ | `status()` | Overall system health and per-exchange status |
461
+ | `coverage()` | Data coverage summary for all exchanges |
462
+ | `exchange_coverage(exchange)` | Coverage details for a specific exchange |
463
+ | `symbol_coverage(exchange, symbol)` | Coverage with gap detection for specific symbol |
464
+ | `list_incidents(...)` | List incidents with filtering and pagination |
465
+ | `get_incident(incident_id)` | Get specific incident details |
466
+ | `latency()` | Current latency metrics (WebSocket, REST, data freshness) |
467
+ | `sla(year, month)` | SLA compliance metrics for a specific month |
468
+
409
469
  ### Legacy API (Deprecated)
410
470
 
411
471
  The following legacy methods are deprecated and will be removed in v2.0. They default to Hyperliquid data:
@@ -579,6 +639,43 @@ async def main():
579
639
  asyncio.run(main())
580
640
  ```
581
641
 
642
+ ### Gap Detection
643
+
644
+ During historical replay and bulk streaming, the server automatically detects gaps in the data and notifies the client. This helps identify periods where data may be missing.
645
+
646
+ ```python
647
+ import asyncio
648
+ from oxarchive import OxArchiveWs, WsOptions
649
+
650
+ async def main():
651
+ ws = OxArchiveWs(WsOptions(api_key="ox_..."))
652
+
653
+ # Handle gap notifications during replay/stream
654
+ def handle_gap(channel, coin, gap_start, gap_end, duration_minutes):
655
+ print(f"Gap detected in {channel}/{coin}:")
656
+ print(f" From: {gap_start}")
657
+ print(f" To: {gap_end}")
658
+ print(f" Duration: {duration_minutes} minutes")
659
+
660
+ ws.on_gap(handle_gap)
661
+
662
+ await ws.connect()
663
+
664
+ # Start replay - gaps will be reported via on_gap callback
665
+ await ws.replay(
666
+ "orderbook", "BTC",
667
+ start=int(time.time() * 1000) - 86400000,
668
+ end=int(time.time() * 1000),
669
+ speed=10
670
+ )
671
+
672
+ asyncio.run(main())
673
+ ```
674
+
675
+ Gap thresholds vary by channel:
676
+ - **orderbook**, **candles**, **liquidations**: 2 minutes
677
+ - **trades**: 60 minutes (trades can naturally have longer gaps during low activity periods)
678
+
582
679
  ### WebSocket Configuration
583
680
 
584
681
  ```python
@@ -599,6 +696,7 @@ ws = OxArchiveWs(WsOptions(
599
696
  | `orderbook` | L2 order book updates | Yes | Yes |
600
697
  | `trades` | Trade/fill updates | Yes | Yes |
601
698
  | `candles` | OHLCV candle data | Yes | Yes (replay/stream only) |
699
+ | `liquidations` | Liquidation events (May 2025+) | Yes | Yes (replay/stream only) |
602
700
  | `ticker` | Price and 24h volume | Yes | Real-time only |
603
701
  | `all_tickers` | All market tickers | No | Real-time only |
604
702
 
@@ -369,6 +369,66 @@ candles = await client.hyperliquid.candles.ahistory("BTC", start=..., end=..., i
369
369
  | `1d` | 1 day |
370
370
  | `1w` | 1 week |
371
371
 
372
+ ### Data Quality Monitoring
373
+
374
+ Monitor data coverage, incidents, latency, and SLA compliance across all exchanges.
375
+
376
+ ```python
377
+ # Get overall system health status
378
+ status = client.data_quality.status()
379
+ print(f"System status: {status.status}")
380
+ for exchange, info in status.exchanges.items():
381
+ print(f" {exchange}: {info.status}")
382
+
383
+ # Get data coverage summary for all exchanges
384
+ coverage = client.data_quality.coverage()
385
+ for exchange in coverage.exchanges:
386
+ print(f"{exchange.exchange}:")
387
+ for dtype, info in exchange.data_types.items():
388
+ print(f" {dtype}: {info.total_records:,} records, {info.completeness}% complete")
389
+
390
+ # Get symbol-specific coverage with gap detection
391
+ btc = client.data_quality.symbol_coverage("hyperliquid", "BTC")
392
+ oi = btc.data_types["open_interest"]
393
+ print(f"BTC OI completeness: {oi.completeness}%")
394
+ print(f"Gaps found: {len(oi.gaps)}")
395
+ for gap in oi.gaps[:5]:
396
+ print(f" {gap.duration_minutes} min gap: {gap.start} -> {gap.end}")
397
+
398
+ # List incidents with filtering
399
+ result = client.data_quality.list_incidents(status="open")
400
+ for incident in result.incidents:
401
+ print(f"[{incident.severity}] {incident.title}")
402
+
403
+ # Get latency metrics
404
+ latency = client.data_quality.latency()
405
+ for exchange, metrics in latency.exchanges.items():
406
+ print(f"{exchange}: OB lag {metrics.data_freshness.orderbook_lag_ms}ms")
407
+
408
+ # Get SLA compliance metrics for a specific month
409
+ sla = client.data_quality.sla(year=2026, month=1)
410
+ print(f"Period: {sla.period}")
411
+ print(f"Uptime: {sla.actual.uptime}% ({sla.actual.uptime_status})")
412
+ print(f"API P99: {sla.actual.api_latency_p99_ms}ms ({sla.actual.latency_status})")
413
+
414
+ # Async versions available for all methods
415
+ status = await client.data_quality.astatus()
416
+ coverage = await client.data_quality.acoverage()
417
+ ```
418
+
419
+ #### Data Quality Endpoints
420
+
421
+ | Method | Description |
422
+ |--------|-------------|
423
+ | `status()` | Overall system health and per-exchange status |
424
+ | `coverage()` | Data coverage summary for all exchanges |
425
+ | `exchange_coverage(exchange)` | Coverage details for a specific exchange |
426
+ | `symbol_coverage(exchange, symbol)` | Coverage with gap detection for specific symbol |
427
+ | `list_incidents(...)` | List incidents with filtering and pagination |
428
+ | `get_incident(incident_id)` | Get specific incident details |
429
+ | `latency()` | Current latency metrics (WebSocket, REST, data freshness) |
430
+ | `sla(year, month)` | SLA compliance metrics for a specific month |
431
+
372
432
  ### Legacy API (Deprecated)
373
433
 
374
434
  The following legacy methods are deprecated and will be removed in v2.0. They default to Hyperliquid data:
@@ -542,6 +602,43 @@ async def main():
542
602
  asyncio.run(main())
543
603
  ```
544
604
 
605
+ ### Gap Detection
606
+
607
+ During historical replay and bulk streaming, the server automatically detects gaps in the data and notifies the client. This helps identify periods where data may be missing.
608
+
609
+ ```python
610
+ import asyncio
611
+ from oxarchive import OxArchiveWs, WsOptions
612
+
613
+ async def main():
614
+ ws = OxArchiveWs(WsOptions(api_key="ox_..."))
615
+
616
+ # Handle gap notifications during replay/stream
617
+ def handle_gap(channel, coin, gap_start, gap_end, duration_minutes):
618
+ print(f"Gap detected in {channel}/{coin}:")
619
+ print(f" From: {gap_start}")
620
+ print(f" To: {gap_end}")
621
+ print(f" Duration: {duration_minutes} minutes")
622
+
623
+ ws.on_gap(handle_gap)
624
+
625
+ await ws.connect()
626
+
627
+ # Start replay - gaps will be reported via on_gap callback
628
+ await ws.replay(
629
+ "orderbook", "BTC",
630
+ start=int(time.time() * 1000) - 86400000,
631
+ end=int(time.time() * 1000),
632
+ speed=10
633
+ )
634
+
635
+ asyncio.run(main())
636
+ ```
637
+
638
+ Gap thresholds vary by channel:
639
+ - **orderbook**, **candles**, **liquidations**: 2 minutes
640
+ - **trades**: 60 minutes (trades can naturally have longer gaps during low activity periods)
641
+
545
642
  ### WebSocket Configuration
546
643
 
547
644
  ```python
@@ -562,6 +659,7 @@ ws = OxArchiveWs(WsOptions(
562
659
  | `orderbook` | L2 order book updates | Yes | Yes |
563
660
  | `trades` | Trade/fill updates | Yes | Yes |
564
661
  | `candles` | OHLCV candle data | Yes | Yes (replay/stream only) |
662
+ | `liquidations` | Liquidation events (May 2025+) | Yes | Yes (replay/stream only) |
565
663
  | `ticker` | Price and 24h volume | Yes | Real-time only |
566
664
  | `all_tickers` | All market tickers | No | Real-time only |
567
665
 
@@ -68,7 +68,7 @@ except ImportError:
68
68
  OxArchiveWs = None # type: ignore
69
69
  WsOptions = None # type: ignore
70
70
 
71
- __version__ = "0.5.4"
71
+ __version__ = "0.6.1"
72
72
 
73
73
  __all__ = [
74
74
  # Client
@@ -12,6 +12,7 @@ from .resources import (
12
12
  InstrumentsResource,
13
13
  FundingResource,
14
14
  OpenInterestResource,
15
+ DataQualityResource,
15
16
  )
16
17
 
17
18
  DEFAULT_BASE_URL = "https://api.0xarchive.io"
@@ -92,6 +93,10 @@ class Client:
92
93
  self.lighter = LighterClient(self._http)
93
94
  """Lighter.xyz exchange data (August 2025+)"""
94
95
 
96
+ # Data quality monitoring (cross-exchange)
97
+ self.data_quality = DataQualityResource(self._http)
98
+ """Data quality metrics: status, coverage, incidents, latency, SLA"""
99
+
95
100
  # Legacy resource namespaces (deprecated - use client.hyperliquid.* instead)
96
101
  # These will be removed in v2.0
97
102
  # Note: Using /v1/hyperliquid base path for backward compatibility
@@ -7,6 +7,7 @@ from .funding import FundingResource
7
7
  from .openinterest import OpenInterestResource
8
8
  from .candles import CandlesResource
9
9
  from .liquidations import LiquidationsResource
10
+ from .data_quality import DataQualityResource
10
11
 
11
12
  __all__ = [
12
13
  "OrderBookResource",
@@ -17,4 +18,5 @@ __all__ = [
17
18
  "OpenInterestResource",
18
19
  "CandlesResource",
19
20
  "LiquidationsResource",
21
+ "DataQualityResource",
20
22
  ]
@@ -0,0 +1,336 @@
1
+ """Data quality API resource."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime
6
+ from typing import Literal, Optional
7
+
8
+ from ..http import HttpClient
9
+ from ..types import (
10
+ CoverageResponse,
11
+ ExchangeCoverage,
12
+ Incident,
13
+ IncidentsResponse,
14
+ LatencyResponse,
15
+ SlaResponse,
16
+ StatusResponse,
17
+ SymbolCoverageResponse,
18
+ Timestamp,
19
+ )
20
+
21
+
22
+ class DataQualityResource:
23
+ """
24
+ Data quality API resource.
25
+
26
+ Provides endpoints for monitoring data quality, coverage, incidents, and SLA metrics.
27
+
28
+ Example:
29
+ >>> # Get system status
30
+ >>> status = client.data_quality.status()
31
+ >>> print(f"System status: {status.status}")
32
+ >>>
33
+ >>> # Get coverage for all exchanges
34
+ >>> coverage = client.data_quality.coverage()
35
+ >>>
36
+ >>> # Get symbol-specific coverage with gap detection
37
+ >>> btc = client.data_quality.symbol_coverage("hyperliquid", "BTC")
38
+ >>> print(f"BTC completeness: {btc.data_types['orderbook'].completeness}%")
39
+ >>> for gap in btc.data_types['orderbook'].gaps[:5]:
40
+ ... print(f"Gap: {gap.start} - {gap.end} ({gap.duration_minutes} min)")
41
+ """
42
+
43
+ def __init__(self, http: HttpClient, base_path: str = "/v1/data-quality"):
44
+ self._http = http
45
+ self._base_path = base_path
46
+
47
+ def _convert_timestamp(self, ts: Optional[Timestamp]) -> Optional[int]:
48
+ """Convert timestamp to Unix milliseconds."""
49
+ if ts is None:
50
+ return None
51
+ if isinstance(ts, int):
52
+ return ts
53
+ if isinstance(ts, datetime):
54
+ return int(ts.timestamp() * 1000)
55
+ if isinstance(ts, str):
56
+ try:
57
+ dt = datetime.fromisoformat(ts.replace("Z", "+00:00"))
58
+ return int(dt.timestamp() * 1000)
59
+ except ValueError:
60
+ return int(ts)
61
+ return None
62
+
63
+ # =========================================================================
64
+ # Status Endpoints
65
+ # =========================================================================
66
+
67
+ def status(self) -> StatusResponse:
68
+ """
69
+ Get overall system health status.
70
+
71
+ Returns:
72
+ StatusResponse with overall status, per-exchange status,
73
+ per-data-type status, and active incident count.
74
+
75
+ Example:
76
+ >>> status = client.data_quality.status()
77
+ >>> print(f"Overall: {status.status}")
78
+ >>> for exchange, info in status.exchanges.items():
79
+ ... print(f"{exchange}: {info.status}")
80
+ """
81
+ data = self._http.get(f"{self._base_path}/status")
82
+ return StatusResponse.model_validate(data)
83
+
84
+ async def astatus(self) -> StatusResponse:
85
+ """Async version of status()."""
86
+ data = await self._http.aget(f"{self._base_path}/status")
87
+ return StatusResponse.model_validate(data)
88
+
89
+ # =========================================================================
90
+ # Coverage Endpoints
91
+ # =========================================================================
92
+
93
+ def coverage(self) -> CoverageResponse:
94
+ """
95
+ Get data coverage summary for all exchanges.
96
+
97
+ Returns:
98
+ CoverageResponse with coverage info for all exchanges and data types.
99
+
100
+ Example:
101
+ >>> coverage = client.data_quality.coverage()
102
+ >>> for exchange in coverage.exchanges:
103
+ ... print(f"{exchange.exchange}:")
104
+ ... for dtype, info in exchange.data_types.items():
105
+ ... print(f" {dtype}: {info.total_records} records")
106
+ """
107
+ data = self._http.get(f"{self._base_path}/coverage")
108
+ return CoverageResponse.model_validate(data)
109
+
110
+ async def acoverage(self) -> CoverageResponse:
111
+ """Async version of coverage()."""
112
+ data = await self._http.aget(f"{self._base_path}/coverage")
113
+ return CoverageResponse.model_validate(data)
114
+
115
+ def exchange_coverage(self, exchange: str) -> ExchangeCoverage:
116
+ """
117
+ Get data coverage for a specific exchange.
118
+
119
+ Args:
120
+ exchange: Exchange name ('hyperliquid' or 'lighter')
121
+
122
+ Returns:
123
+ ExchangeCoverage with coverage info for all data types on this exchange.
124
+
125
+ Example:
126
+ >>> hl = client.data_quality.exchange_coverage("hyperliquid")
127
+ >>> print(f"Orderbook earliest: {hl.data_types['orderbook'].earliest}")
128
+ """
129
+ data = self._http.get(f"{self._base_path}/coverage/{exchange.lower()}")
130
+ return ExchangeCoverage.model_validate(data)
131
+
132
+ async def aexchange_coverage(self, exchange: str) -> ExchangeCoverage:
133
+ """Async version of exchange_coverage()."""
134
+ data = await self._http.aget(f"{self._base_path}/coverage/{exchange.lower()}")
135
+ return ExchangeCoverage.model_validate(data)
136
+
137
+ def symbol_coverage(self, exchange: str, symbol: str) -> SymbolCoverageResponse:
138
+ """
139
+ Get data coverage for a specific symbol on an exchange.
140
+
141
+ Includes gap detection showing periods where data may be missing.
142
+
143
+ Args:
144
+ exchange: Exchange name ('hyperliquid' or 'lighter')
145
+ symbol: Symbol name (e.g., 'BTC', 'ETH')
146
+
147
+ Returns:
148
+ SymbolCoverageResponse with per-data-type coverage including gaps.
149
+
150
+ Example:
151
+ >>> btc = client.data_quality.symbol_coverage("hyperliquid", "BTC")
152
+ >>> oi = btc.data_types["open_interest"]
153
+ >>> print(f"OI completeness: {oi.completeness}%")
154
+ >>> print(f"Gaps found: {len(oi.gaps)}")
155
+ >>> for gap in oi.gaps[:3]:
156
+ ... print(f" {gap.duration_minutes} min gap at {gap.start}")
157
+ """
158
+ data = self._http.get(
159
+ f"{self._base_path}/coverage/{exchange.lower()}/{symbol.upper()}"
160
+ )
161
+ return SymbolCoverageResponse.model_validate(data)
162
+
163
+ async def asymbol_coverage(self, exchange: str, symbol: str) -> SymbolCoverageResponse:
164
+ """Async version of symbol_coverage()."""
165
+ data = await self._http.aget(
166
+ f"{self._base_path}/coverage/{exchange.lower()}/{symbol.upper()}"
167
+ )
168
+ return SymbolCoverageResponse.model_validate(data)
169
+
170
+ # =========================================================================
171
+ # Incidents Endpoints
172
+ # =========================================================================
173
+
174
+ def list_incidents(
175
+ self,
176
+ *,
177
+ status: Optional[Literal["open", "investigating", "identified", "monitoring", "resolved"]] = None,
178
+ exchange: Optional[str] = None,
179
+ since: Optional[Timestamp] = None,
180
+ limit: Optional[int] = None,
181
+ offset: Optional[int] = None,
182
+ ) -> IncidentsResponse:
183
+ """
184
+ List incidents with filtering and pagination.
185
+
186
+ Args:
187
+ status: Filter by incident status
188
+ exchange: Filter by exchange
189
+ since: Only show incidents starting after this timestamp
190
+ limit: Maximum results per page (default: 20, max: 100)
191
+ offset: Pagination offset
192
+
193
+ Returns:
194
+ IncidentsResponse with list of incidents and pagination info.
195
+
196
+ Example:
197
+ >>> # Get all open incidents
198
+ >>> result = client.data_quality.list_incidents(status="open")
199
+ >>> for incident in result.incidents:
200
+ ... print(f"{incident.severity}: {incident.title}")
201
+ """
202
+ data = self._http.get(
203
+ f"{self._base_path}/incidents",
204
+ params={
205
+ "status": status,
206
+ "exchange": exchange,
207
+ "since": self._convert_timestamp(since),
208
+ "limit": limit,
209
+ "offset": offset,
210
+ },
211
+ )
212
+ return IncidentsResponse.model_validate(data)
213
+
214
+ async def alist_incidents(
215
+ self,
216
+ *,
217
+ status: Optional[Literal["open", "investigating", "identified", "monitoring", "resolved"]] = None,
218
+ exchange: Optional[str] = None,
219
+ since: Optional[Timestamp] = None,
220
+ limit: Optional[int] = None,
221
+ offset: Optional[int] = None,
222
+ ) -> IncidentsResponse:
223
+ """Async version of list_incidents()."""
224
+ data = await self._http.aget(
225
+ f"{self._base_path}/incidents",
226
+ params={
227
+ "status": status,
228
+ "exchange": exchange,
229
+ "since": self._convert_timestamp(since),
230
+ "limit": limit,
231
+ "offset": offset,
232
+ },
233
+ )
234
+ return IncidentsResponse.model_validate(data)
235
+
236
+ def get_incident(self, incident_id: str) -> Incident:
237
+ """
238
+ Get a specific incident by ID.
239
+
240
+ Args:
241
+ incident_id: The incident ID
242
+
243
+ Returns:
244
+ Incident details.
245
+
246
+ Example:
247
+ >>> incident = client.data_quality.get_incident("inc_123")
248
+ >>> print(f"Status: {incident.status}")
249
+ >>> print(f"Root cause: {incident.root_cause}")
250
+ """
251
+ data = self._http.get(f"{self._base_path}/incidents/{incident_id}")
252
+ return Incident.model_validate(data)
253
+
254
+ async def aget_incident(self, incident_id: str) -> Incident:
255
+ """Async version of get_incident()."""
256
+ data = await self._http.aget(f"{self._base_path}/incidents/{incident_id}")
257
+ return Incident.model_validate(data)
258
+
259
+ # =========================================================================
260
+ # Latency Endpoints
261
+ # =========================================================================
262
+
263
+ def latency(self) -> LatencyResponse:
264
+ """
265
+ Get current latency metrics for all exchanges.
266
+
267
+ Returns:
268
+ LatencyResponse with WebSocket, REST API, and data freshness metrics.
269
+
270
+ Example:
271
+ >>> latency = client.data_quality.latency()
272
+ >>> for exchange, metrics in latency.exchanges.items():
273
+ ... print(f"{exchange}:")
274
+ ... if metrics.websocket:
275
+ ... print(f" WS current: {metrics.websocket.current_ms}ms")
276
+ ... print(f" OB lag: {metrics.data_freshness.orderbook_lag_ms}ms")
277
+ """
278
+ data = self._http.get(f"{self._base_path}/latency")
279
+ return LatencyResponse.model_validate(data)
280
+
281
+ async def alatency(self) -> LatencyResponse:
282
+ """Async version of latency()."""
283
+ data = await self._http.aget(f"{self._base_path}/latency")
284
+ return LatencyResponse.model_validate(data)
285
+
286
+ # =========================================================================
287
+ # SLA Endpoints
288
+ # =========================================================================
289
+
290
+ def sla(
291
+ self,
292
+ *,
293
+ year: Optional[int] = None,
294
+ month: Optional[int] = None,
295
+ ) -> SlaResponse:
296
+ """
297
+ Get SLA compliance metrics for a specific month.
298
+
299
+ Args:
300
+ year: Year (defaults to current year)
301
+ month: Month 1-12 (defaults to current month)
302
+
303
+ Returns:
304
+ SlaResponse with SLA targets, actual metrics, and compliance status.
305
+
306
+ Example:
307
+ >>> sla = client.data_quality.sla(year=2026, month=1)
308
+ >>> print(f"Period: {sla.period}")
309
+ >>> print(f"Uptime: {sla.actual.uptime}% ({sla.actual.uptime_status})")
310
+ >>> print(f"Completeness: {sla.actual.data_completeness.overall}%")
311
+ >>> print(f"API P99: {sla.actual.api_latency_p99_ms}ms")
312
+ """
313
+ data = self._http.get(
314
+ f"{self._base_path}/sla",
315
+ params={
316
+ "year": year,
317
+ "month": month,
318
+ },
319
+ )
320
+ return SlaResponse.model_validate(data)
321
+
322
+ async def asla(
323
+ self,
324
+ *,
325
+ year: Optional[int] = None,
326
+ month: Optional[int] = None,
327
+ ) -> SlaResponse:
328
+ """Async version of sla()."""
329
+ data = await self._http.aget(
330
+ f"{self._base_path}/sla",
331
+ params={
332
+ "year": year,
333
+ "month": month,
334
+ },
335
+ )
336
+ return SlaResponse.model_validate(data)
@@ -555,6 +555,24 @@ class WsStreamStopped(BaseModel):
555
555
  snapshots_sent: int
556
556
 
557
557
 
558
+ class WsGapDetected(BaseModel):
559
+ """Gap detected in historical data stream.
560
+
561
+ Sent when there's a gap exceeding the threshold between consecutive data points.
562
+ Thresholds: 2 minutes for orderbook/candles/liquidations, 60 minutes for trades.
563
+ """
564
+
565
+ type: Literal["gap_detected"]
566
+ channel: WsChannel
567
+ coin: str
568
+ gap_start: int
569
+ """Start of the gap (last data point timestamp in ms)."""
570
+ gap_end: int
571
+ """End of the gap (next data point timestamp in ms)."""
572
+ duration_minutes: int
573
+ """Gap duration in minutes."""
574
+
575
+
558
576
  # =============================================================================
559
577
  # Error Types
560
578
  # =============================================================================
@@ -593,3 +611,353 @@ class CursorResponse(BaseModel, Generic[T]):
593
611
  # Type alias for timestamp parameters
594
612
  Timestamp = Union[int, str, datetime]
595
613
  """Timestamp can be Unix ms (int), ISO string, or datetime object."""
614
+
615
+
616
+ # =============================================================================
617
+ # Data Quality Types
618
+ # =============================================================================
619
+
620
+
621
+ class SystemStatus(BaseModel):
622
+ """System status values: operational, degraded, outage, maintenance."""
623
+
624
+ status: Literal["operational", "degraded", "outage", "maintenance"]
625
+
626
+
627
+ class ExchangeStatus(BaseModel):
628
+ """Status of a single exchange."""
629
+
630
+ status: Literal["operational", "degraded", "outage", "maintenance"]
631
+ """Current status."""
632
+
633
+ last_data_at: Optional[datetime] = None
634
+ """Timestamp of last received data."""
635
+
636
+ latency_ms: Optional[int] = None
637
+ """Current latency in milliseconds."""
638
+
639
+
640
+ class DataTypeStatus(BaseModel):
641
+ """Status of a data type (orderbook, fills, etc.)."""
642
+
643
+ status: Literal["operational", "degraded", "outage", "maintenance"]
644
+ """Current status."""
645
+
646
+ completeness_24h: float
647
+ """Data completeness over last 24 hours (0-100)."""
648
+
649
+
650
+ class StatusResponse(BaseModel):
651
+ """Overall system status response."""
652
+
653
+ status: Literal["operational", "degraded", "outage", "maintenance"]
654
+ """Overall system status."""
655
+
656
+ updated_at: datetime
657
+ """When this status was computed."""
658
+
659
+ exchanges: dict[str, ExchangeStatus]
660
+ """Per-exchange status."""
661
+
662
+ data_types: dict[str, DataTypeStatus]
663
+ """Per-data-type status."""
664
+
665
+ active_incidents: int
666
+ """Number of active incidents."""
667
+
668
+
669
+ class DataTypeCoverage(BaseModel):
670
+ """Coverage information for a specific data type."""
671
+
672
+ earliest: datetime
673
+ """Earliest available data timestamp."""
674
+
675
+ latest: datetime
676
+ """Latest available data timestamp."""
677
+
678
+ total_records: int
679
+ """Total number of records."""
680
+
681
+ symbols: int
682
+ """Number of symbols with data."""
683
+
684
+ resolution: Optional[str] = None
685
+ """Data resolution (e.g., '1.2s', '1m')."""
686
+
687
+ lag: Optional[str] = None
688
+ """Current data lag."""
689
+
690
+ completeness: float
691
+ """Completeness percentage (0-100)."""
692
+
693
+
694
+ class ExchangeCoverage(BaseModel):
695
+ """Coverage for a single exchange."""
696
+
697
+ exchange: str
698
+ """Exchange name."""
699
+
700
+ data_types: dict[str, DataTypeCoverage]
701
+ """Coverage per data type."""
702
+
703
+
704
+ class CoverageResponse(BaseModel):
705
+ """Overall coverage response."""
706
+
707
+ exchanges: list[ExchangeCoverage]
708
+ """Coverage for all exchanges."""
709
+
710
+
711
+ class CoverageGap(BaseModel):
712
+ """Gap information for per-symbol coverage."""
713
+
714
+ start: datetime
715
+ """Start of the gap (last data before gap)."""
716
+
717
+ end: datetime
718
+ """End of the gap (first data after gap)."""
719
+
720
+ duration_minutes: int
721
+ """Duration of the gap in minutes."""
722
+
723
+
724
+ class SymbolDataTypeCoverage(BaseModel):
725
+ """Coverage for a specific symbol and data type."""
726
+
727
+ earliest: datetime
728
+ """Earliest available data timestamp."""
729
+
730
+ latest: datetime
731
+ """Latest available data timestamp."""
732
+
733
+ total_records: int
734
+ """Total number of records."""
735
+
736
+ completeness: float
737
+ """Completeness percentage (0-100)."""
738
+
739
+ gaps: list[CoverageGap]
740
+ """Detected data gaps."""
741
+
742
+
743
+ class SymbolCoverageResponse(BaseModel):
744
+ """Per-symbol coverage response."""
745
+
746
+ exchange: str
747
+ """Exchange name."""
748
+
749
+ symbol: str
750
+ """Symbol name."""
751
+
752
+ data_types: dict[str, SymbolDataTypeCoverage]
753
+ """Coverage per data type."""
754
+
755
+
756
+ class Incident(BaseModel):
757
+ """Data quality incident."""
758
+
759
+ id: str
760
+ """Unique incident ID."""
761
+
762
+ status: str
763
+ """Status: open, investigating, identified, monitoring, resolved."""
764
+
765
+ severity: str
766
+ """Severity: minor, major, critical."""
767
+
768
+ exchange: Optional[str] = None
769
+ """Affected exchange (if specific to one)."""
770
+
771
+ data_types: list[str]
772
+ """Affected data types."""
773
+
774
+ symbols_affected: list[str]
775
+ """Affected symbols."""
776
+
777
+ started_at: datetime
778
+ """When the incident started."""
779
+
780
+ resolved_at: Optional[datetime] = None
781
+ """When the incident was resolved."""
782
+
783
+ duration_minutes: Optional[int] = None
784
+ """Total duration in minutes."""
785
+
786
+ title: str
787
+ """Incident title."""
788
+
789
+ description: Optional[str] = None
790
+ """Detailed description."""
791
+
792
+ root_cause: Optional[str] = None
793
+ """Root cause analysis."""
794
+
795
+ resolution: Optional[str] = None
796
+ """Resolution details."""
797
+
798
+ records_affected: Optional[int] = None
799
+ """Number of records affected."""
800
+
801
+ records_recovered: Optional[int] = None
802
+ """Number of records recovered."""
803
+
804
+
805
+ class Pagination(BaseModel):
806
+ """Pagination info for incident list."""
807
+
808
+ total: int
809
+ """Total number of incidents."""
810
+
811
+ limit: int
812
+ """Page size limit."""
813
+
814
+ offset: int
815
+ """Current offset."""
816
+
817
+
818
+ class IncidentsResponse(BaseModel):
819
+ """Incidents list response."""
820
+
821
+ incidents: list[Incident]
822
+ """List of incidents."""
823
+
824
+ pagination: Pagination
825
+ """Pagination info."""
826
+
827
+
828
+ class WebSocketLatency(BaseModel):
829
+ """WebSocket latency metrics."""
830
+
831
+ current_ms: int
832
+ """Current latency."""
833
+
834
+ avg_1h_ms: int
835
+ """1-hour average latency."""
836
+
837
+ avg_24h_ms: int
838
+ """24-hour average latency."""
839
+
840
+ p99_24h_ms: Optional[int] = None
841
+ """24-hour P99 latency."""
842
+
843
+
844
+ class ApiLatency(BaseModel):
845
+ """REST API latency metrics."""
846
+
847
+ current_ms: int
848
+ """Current latency."""
849
+
850
+ avg_1h_ms: int
851
+ """1-hour average latency."""
852
+
853
+ avg_24h_ms: int
854
+ """24-hour average latency."""
855
+
856
+
857
+ class DataFreshness(BaseModel):
858
+ """Data freshness metrics (lag from source)."""
859
+
860
+ orderbook_lag_ms: Optional[int] = None
861
+ """Orderbook data lag."""
862
+
863
+ fills_lag_ms: Optional[int] = None
864
+ """Fills/trades data lag."""
865
+
866
+ funding_lag_ms: Optional[int] = None
867
+ """Funding rate data lag."""
868
+
869
+ oi_lag_ms: Optional[int] = None
870
+ """Open interest data lag."""
871
+
872
+
873
+ class ExchangeLatency(BaseModel):
874
+ """Latency metrics for a single exchange."""
875
+
876
+ websocket: Optional[WebSocketLatency] = None
877
+ """WebSocket latency metrics."""
878
+
879
+ rest_api: Optional[ApiLatency] = None
880
+ """REST API latency metrics."""
881
+
882
+ data_freshness: DataFreshness
883
+ """Data freshness metrics."""
884
+
885
+
886
+ class LatencyResponse(BaseModel):
887
+ """Overall latency response."""
888
+
889
+ measured_at: datetime
890
+ """When these metrics were measured."""
891
+
892
+ exchanges: dict[str, ExchangeLatency]
893
+ """Per-exchange latency metrics."""
894
+
895
+
896
+ class SlaTargets(BaseModel):
897
+ """SLA targets."""
898
+
899
+ uptime: float
900
+ """Uptime target percentage."""
901
+
902
+ data_completeness: float
903
+ """Data completeness target percentage."""
904
+
905
+ api_latency_p99_ms: int
906
+ """API P99 latency target in milliseconds."""
907
+
908
+
909
+ class CompletenessMetrics(BaseModel):
910
+ """Completeness metrics per data type."""
911
+
912
+ orderbook: float
913
+ """Orderbook completeness percentage."""
914
+
915
+ fills: float
916
+ """Fills completeness percentage."""
917
+
918
+ funding: float
919
+ """Funding rate completeness percentage."""
920
+
921
+ overall: float
922
+ """Overall completeness percentage."""
923
+
924
+
925
+ class SlaActual(BaseModel):
926
+ """Actual SLA metrics."""
927
+
928
+ uptime: float
929
+ """Actual uptime percentage."""
930
+
931
+ uptime_status: str
932
+ """'met' or 'missed'."""
933
+
934
+ data_completeness: CompletenessMetrics
935
+ """Actual completeness metrics."""
936
+
937
+ completeness_status: str
938
+ """'met' or 'missed'."""
939
+
940
+ api_latency_p99_ms: int
941
+ """Actual API P99 latency."""
942
+
943
+ latency_status: str
944
+ """'met' or 'missed'."""
945
+
946
+
947
+ class SlaResponse(BaseModel):
948
+ """SLA compliance response."""
949
+
950
+ period: str
951
+ """Period covered (e.g., '2026-01')."""
952
+
953
+ sla_targets: SlaTargets
954
+ """Target SLA metrics."""
955
+
956
+ actual: SlaActual
957
+ """Actual SLA metrics."""
958
+
959
+ incidents_this_period: int
960
+ """Number of incidents in this period."""
961
+
962
+ total_downtime_minutes: int
963
+ """Total downtime in minutes."""
@@ -64,6 +64,7 @@ from .types import (
64
64
  WsHistoricalBatch,
65
65
  WsStreamCompleted,
66
66
  WsStreamStopped,
67
+ WsGapDetected,
67
68
  TimestampedRecord,
68
69
  )
69
70
 
@@ -122,6 +123,9 @@ StreamStartHandler = Callable[[WsChannel, str, int, int], None] # channel, coin
122
123
  StreamProgressHandler = Callable[[int], None] # snapshots_sent
123
124
  StreamCompleteHandler = Callable[[WsChannel, str, int], None] # channel, coin, snapshots_sent
124
125
 
126
+ # Gap detection handler
127
+ GapHandler = Callable[[WsChannel, str, int, int, int], None] # channel, coin, gap_start, gap_end, duration_minutes
128
+
125
129
 
126
130
  def _transform_trade(coin: str, raw: dict) -> Trade:
127
131
  """Transform raw Hyperliquid trade format to SDK Trade type.
@@ -289,6 +293,9 @@ class OxArchiveWs:
289
293
  self._on_stream_progress: Optional[StreamProgressHandler] = None
290
294
  self._on_stream_complete: Optional[StreamCompleteHandler] = None
291
295
 
296
+ # Gap detection handler
297
+ self._on_gap: Optional[GapHandler] = None
298
+
292
299
  @property
293
300
  def state(self) -> WsConnectionState:
294
301
  """Get current connection state."""
@@ -626,6 +633,23 @@ class OxArchiveWs:
626
633
  """
627
634
  self._on_stream_complete = handler
628
635
 
636
+ def on_gap(self, handler: GapHandler) -> None:
637
+ """Set handler for gap detected events during replay or streaming.
638
+
639
+ Called when there's a gap in the historical data exceeding the threshold.
640
+ Thresholds: 2 minutes for orderbook/candles/liquidations, 60 minutes for trades.
641
+
642
+ Handler receives: (channel, coin, gap_start, gap_end, duration_minutes)
643
+
644
+ Example:
645
+ >>> def handle_gap(channel, coin, gap_start, gap_end, duration_minutes):
646
+ ... print(f"Gap detected in {channel} {coin}: {duration_minutes} minutes")
647
+ ... print(f" From: {datetime.fromtimestamp(gap_start/1000)}")
648
+ ... print(f" To: {datetime.fromtimestamp(gap_end/1000)}")
649
+ >>> ws.on_gap(handle_gap)
650
+ """
651
+ self._on_gap = handler
652
+
629
653
  # Private methods
630
654
 
631
655
  async def _send(self, msg: dict) -> None:
@@ -778,6 +802,16 @@ class OxArchiveWs:
778
802
  elif msg_type == "stream_completed" and self._on_stream_complete:
779
803
  self._on_stream_complete(data["channel"], data["coin"], data["snapshots_sent"])
780
804
 
805
+ # Gap detection
806
+ elif msg_type == "gap_detected" and self._on_gap:
807
+ self._on_gap(
808
+ data["channel"],
809
+ data["coin"],
810
+ data["gap_start"],
811
+ data["gap_end"],
812
+ data["duration_minutes"],
813
+ )
814
+
781
815
  except Exception as e:
782
816
  logger.error(f"Error handling message: {e}")
783
817
 
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "oxarchive"
7
- version = "0.5.4"
7
+ version = "0.6.1"
8
8
  description = "Official Python SDK for 0xarchive - Hyperliquid Historical Data API"
9
9
  readme = "README.md"
10
10
  license = "MIT"
File without changes
File without changes