onionoo-fastapi 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- app/__init__.py +1 -0
- app/main.py +151 -0
- app/mcp_stdio.py +189 -0
- app/mcp_tools.py +273 -0
- app/models/__init__.py +1 -0
- app/models/bandwidth.py +93 -0
- app/models/clients.py +31 -0
- app/models/details.py +261 -0
- app/models/envelope.py +80 -0
- app/models/history.py +37 -0
- app/models/misc.py +13 -0
- app/models/summary.py +62 -0
- app/models/uptime.py +50 -0
- app/models/weights.py +54 -0
- app/observability.py +139 -0
- app/routers/__init__.py +1 -0
- app/routers/aggregate.py +85 -0
- app/routers/bandwidth.py +50 -0
- app/routers/clients.py +47 -0
- app/routers/details.py +52 -0
- app/routers/params.py +224 -0
- app/routers/proxy.py +62 -0
- app/routers/summary.py +50 -0
- app/routers/uptime.py +47 -0
- app/routers/weights.py +48 -0
- app/services/__init__.py +1 -0
- app/services/aggregate.py +103 -0
- app/services/onionoo_client.py +302 -0
- app/settings.py +30 -0
- onionoo_fastapi-1.0.0.dist-info/METADATA +379 -0
- onionoo_fastapi-1.0.0.dist-info/RECORD +34 -0
- onionoo_fastapi-1.0.0.dist-info/WHEEL +4 -0
- onionoo_fastapi-1.0.0.dist-info/entry_points.txt +2 -0
- onionoo_fastapi-1.0.0.dist-info/licenses/LICENSE +22 -0
app/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""onionoo-fastapi package."""
|
app/main.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
from contextlib import asynccontextmanager
|
|
2
|
+
from time import monotonic
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
from fastapi import FastAPI, Request, Response
|
|
6
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
7
|
+
from fastapi.middleware.gzip import GZipMiddleware
|
|
8
|
+
from fastapi.responses import JSONResponse
|
|
9
|
+
from fastapi.routing import APIRoute
|
|
10
|
+
from fastapi_mcp import FastApiMCP
|
|
11
|
+
from prometheus_fastapi_instrumentator import Instrumentator
|
|
12
|
+
from slowapi import Limiter
|
|
13
|
+
from slowapi.util import get_remote_address
|
|
14
|
+
|
|
15
|
+
from app.observability import RequestIdMiddleware, configure_logging, logger
|
|
16
|
+
from app.routers import aggregate, bandwidth, clients, details, summary, uptime, weights
|
|
17
|
+
from app.services.onionoo_client import OnionooClient, UpstreamError
|
|
18
|
+
from app.settings import settings
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def create_app() -> FastAPI:
|
|
22
|
+
configure_logging()
|
|
23
|
+
|
|
24
|
+
@asynccontextmanager
|
|
25
|
+
async def lifespan(app: FastAPI):
|
|
26
|
+
app.state.onionoo = OnionooClient()
|
|
27
|
+
app.state.ready_cache = {"checked_at": 0.0, "ok": False, "detail": ""}
|
|
28
|
+
logger.info("app.startup", base_url=settings.onionoo_base_url)
|
|
29
|
+
try:
|
|
30
|
+
yield
|
|
31
|
+
finally:
|
|
32
|
+
await app.state.onionoo.aclose()
|
|
33
|
+
logger.info("app.shutdown")
|
|
34
|
+
|
|
35
|
+
app = FastAPI(
|
|
36
|
+
title="Onionoo FastAPI Proxy",
|
|
37
|
+
version="1.0.0",
|
|
38
|
+
description="Semantic/OpenAPI proxy for Tor Onionoo (data is fetched from Onionoo upstream).",
|
|
39
|
+
lifespan=lifespan,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# Order matters: request-id should wrap everything; CORS before GZip.
|
|
43
|
+
app.add_middleware(RequestIdMiddleware)
|
|
44
|
+
if settings.cors_allowed_origins:
|
|
45
|
+
app.add_middleware(
|
|
46
|
+
CORSMiddleware,
|
|
47
|
+
allow_origins=settings.cors_allowed_origins,
|
|
48
|
+
allow_methods=["GET", "OPTIONS"],
|
|
49
|
+
allow_headers=["*"],
|
|
50
|
+
expose_headers=["X-Request-ID", "Last-Modified"],
|
|
51
|
+
allow_credentials=False,
|
|
52
|
+
)
|
|
53
|
+
app.add_middleware(GZipMiddleware, minimum_size=1000)
|
|
54
|
+
|
|
55
|
+
# The limiter is always constructed so `@limiter.exempt` decorators on the
|
|
56
|
+
# health endpoints remain valid even when rate limiting is toggled off.
|
|
57
|
+
# SlowAPIMiddleware is what actually enforces `default_limits`; without it
|
|
58
|
+
# the limiter object is inert.
|
|
59
|
+
limiter = Limiter(
|
|
60
|
+
key_func=get_remote_address,
|
|
61
|
+
default_limits=[f"{settings.rate_limit_per_minute}/minute"],
|
|
62
|
+
)
|
|
63
|
+
app.state.limiter = limiter
|
|
64
|
+
if settings.rate_limit_enabled:
|
|
65
|
+
from slowapi.middleware import SlowAPIMiddleware
|
|
66
|
+
|
|
67
|
+
app.add_middleware(SlowAPIMiddleware)
|
|
68
|
+
|
|
69
|
+
if settings.metrics_enabled:
|
|
70
|
+
Instrumentator().instrument(app).expose(app, endpoint="/metrics", tags=["health"])
|
|
71
|
+
# Prometheus scrapes shouldn't compete with end-user traffic for the
|
|
72
|
+
# per-IP rate budget. Mark the route exempt regardless of toggle.
|
|
73
|
+
for route in app.routes:
|
|
74
|
+
if isinstance(route, APIRoute) and route.path == "/metrics":
|
|
75
|
+
limiter.exempt(route.endpoint)
|
|
76
|
+
break
|
|
77
|
+
|
|
78
|
+
@app.get("/healthz", tags=["health"], summary="Liveness probe (static)")
|
|
79
|
+
@limiter.exempt
|
|
80
|
+
async def healthz() -> dict[str, str]:
|
|
81
|
+
return {"status": "ok"}
|
|
82
|
+
|
|
83
|
+
@app.get("/healthz/ready", tags=["health"], summary="Readiness probe (pings upstream)")
|
|
84
|
+
@limiter.exempt
|
|
85
|
+
async def healthz_ready(request: Request) -> Response:
|
|
86
|
+
cache = request.app.state.ready_cache
|
|
87
|
+
now = monotonic()
|
|
88
|
+
if now - cache["checked_at"] < settings.healthz_ready_cache_seconds and cache["checked_at"]:
|
|
89
|
+
payload = {"status": "ok" if cache["ok"] else "degraded", "detail": cache["detail"]}
|
|
90
|
+
status = 200 if cache["ok"] else 503
|
|
91
|
+
return JSONResponse(status_code=status, content=payload)
|
|
92
|
+
|
|
93
|
+
client: OnionooClient = request.app.state.onionoo
|
|
94
|
+
try:
|
|
95
|
+
# Bypass the response cache: a cached 200 can mask a live outage.
|
|
96
|
+
await client.ping()
|
|
97
|
+
cache["ok"] = True
|
|
98
|
+
cache["detail"] = "upstream reachable"
|
|
99
|
+
except (UpstreamError, httpx.RequestError) as exc:
|
|
100
|
+
cache["ok"] = False
|
|
101
|
+
cache["detail"] = f"upstream unreachable: {exc}"
|
|
102
|
+
cache["checked_at"] = now
|
|
103
|
+
|
|
104
|
+
payload = {"status": "ok" if cache["ok"] else "degraded", "detail": cache["detail"]}
|
|
105
|
+
return JSONResponse(status_code=200 if cache["ok"] else 503, content=payload)
|
|
106
|
+
|
|
107
|
+
@app.exception_handler(UpstreamError)
|
|
108
|
+
async def upstream_error_handler(_request: Request, exc: UpstreamError) -> Response:
|
|
109
|
+
return JSONResponse(
|
|
110
|
+
status_code=exc.status_code,
|
|
111
|
+
content={
|
|
112
|
+
"error": "upstream_error",
|
|
113
|
+
"upstream_status": exc.status_code,
|
|
114
|
+
"upstream_body": exc.body,
|
|
115
|
+
},
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
@app.exception_handler(httpx.RequestError)
|
|
119
|
+
async def httpx_error_handler(_request: Request, exc: httpx.RequestError) -> Response:
|
|
120
|
+
return JSONResponse(
|
|
121
|
+
status_code=502,
|
|
122
|
+
content={
|
|
123
|
+
"error": "bad_gateway",
|
|
124
|
+
"message": str(exc),
|
|
125
|
+
},
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
app.include_router(summary.router, prefix="/v1", tags=["onionoo"])
|
|
129
|
+
app.include_router(details.router, prefix="/v1", tags=["onionoo"])
|
|
130
|
+
app.include_router(bandwidth.router, prefix="/v1", tags=["onionoo"])
|
|
131
|
+
app.include_router(weights.router, prefix="/v1", tags=["onionoo"])
|
|
132
|
+
app.include_router(clients.router, prefix="/v1", tags=["onionoo"])
|
|
133
|
+
app.include_router(uptime.router, prefix="/v1", tags=["onionoo"])
|
|
134
|
+
app.include_router(aggregate.router, prefix="/v1", tags=["aggregate"])
|
|
135
|
+
|
|
136
|
+
mcp = FastApiMCP(
|
|
137
|
+
app,
|
|
138
|
+
name="Onionoo MCP",
|
|
139
|
+
description=(
|
|
140
|
+
"Tor Onionoo data exposed as MCP tools. Read-only proxy: each tool wraps an "
|
|
141
|
+
"Onionoo endpoint (summary, details, bandwidth, weights, clients, uptime) and "
|
|
142
|
+
"accepts the same query parameters as the Onionoo spec."
|
|
143
|
+
),
|
|
144
|
+
include_tags=["onionoo", "aggregate"],
|
|
145
|
+
)
|
|
146
|
+
mcp.mount_http()
|
|
147
|
+
|
|
148
|
+
return app
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
app = create_app()
|
app/mcp_stdio.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
"""Stdio MCP server entry point — `onionoo-mcp` console script.
|
|
2
|
+
|
|
3
|
+
Exposes both the six low-level Onionoo endpoints (`onionoo_summary`, ...,
|
|
4
|
+
`onionoo_uptime`) AND the high-level task-oriented tools defined in
|
|
5
|
+
`app.mcp_tools` (find_relay, get_relay_health, top_relays_by_bandwidth,
|
|
6
|
+
compare_relays, country_summary).
|
|
7
|
+
|
|
8
|
+
Use this when you want an MCP-native client like Claude Desktop or Cursor to
|
|
9
|
+
spawn a local subprocess and talk to it over stdio. For Streamable HTTP, run
|
|
10
|
+
the FastAPI app instead and connect to `/mcp`.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
from typing import Any, Literal
|
|
17
|
+
|
|
18
|
+
from mcp.server.fastmcp import FastMCP
|
|
19
|
+
|
|
20
|
+
from app.mcp_tools import (
|
|
21
|
+
aggregate_relays,
|
|
22
|
+
compare_relays,
|
|
23
|
+
country_summary,
|
|
24
|
+
find_relay,
|
|
25
|
+
get_relay_health,
|
|
26
|
+
top_relays_by_bandwidth,
|
|
27
|
+
)
|
|
28
|
+
from app.services.onionoo_client import OnionooClient
|
|
29
|
+
|
|
30
|
+
_INSTRUCTIONS = """\
|
|
31
|
+
This server wraps the Tor Onionoo API. Use the high-level tools first:
|
|
32
|
+
- `find_relay` for free-form discovery
|
|
33
|
+
- `get_relay_health` for a single relay's status snapshot
|
|
34
|
+
- `top_relays_by_bandwidth` for ranking
|
|
35
|
+
- `compare_relays` for side-by-side
|
|
36
|
+
- `country_summary` for aggregates
|
|
37
|
+
|
|
38
|
+
Drop down to the low-level tools (onionoo_summary, onionoo_details, ...) when
|
|
39
|
+
you need raw Onionoo response shapes or filters that aren't covered above.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def build_server() -> tuple[FastMCP, OnionooClient]:
|
|
44
|
+
"""Build (but do not run) the stdio MCP server, wired to a fresh OnionooClient.
|
|
45
|
+
|
|
46
|
+
Returns the server alongside its client so the entry point can call
|
|
47
|
+
`client.aclose()` on shutdown without poking at private FastMCP state.
|
|
48
|
+
"""
|
|
49
|
+
server = FastMCP("onionoo-mcp", instructions=_INSTRUCTIONS)
|
|
50
|
+
client = OnionooClient()
|
|
51
|
+
|
|
52
|
+
# --- High-level tools ----------------------------------------------------
|
|
53
|
+
|
|
54
|
+
@server.tool(
|
|
55
|
+
name="find_relay",
|
|
56
|
+
description=(
|
|
57
|
+
"Find Tor relays or bridges by free-form query. Auto-detects whether the "
|
|
58
|
+
"query is a 40-hex fingerprint (uses lookup), an AS number like 'AS3' "
|
|
59
|
+
"(uses as filter), an IP address (uses search), or otherwise a "
|
|
60
|
+
"nickname/substring (uses search). Returns a compact list with nickname, "
|
|
61
|
+
"fingerprint, country, AS, flags, and addresses."
|
|
62
|
+
),
|
|
63
|
+
)
|
|
64
|
+
async def _find_relay(query: str, limit: int = 10) -> dict[str, Any]:
|
|
65
|
+
return await find_relay(client, query, limit=limit)
|
|
66
|
+
|
|
67
|
+
@server.tool(
|
|
68
|
+
name="get_relay_health",
|
|
69
|
+
description=(
|
|
70
|
+
"Snapshot the health of one relay: details (flags, country, AS, version, "
|
|
71
|
+
"advertised bandwidth) plus the available uptime and bandwidth history "
|
|
72
|
+
"periods. Requires a 40-hex relay fingerprint."
|
|
73
|
+
),
|
|
74
|
+
)
|
|
75
|
+
async def _get_relay_health(fingerprint: str) -> dict[str, Any]:
|
|
76
|
+
return await get_relay_health(client, fingerprint)
|
|
77
|
+
|
|
78
|
+
@server.tool(
|
|
79
|
+
name="top_relays_by_bandwidth",
|
|
80
|
+
description=(
|
|
81
|
+
"List the top N running relays sorted by consensus weight. Optional "
|
|
82
|
+
"filters: country (two-letter code), flag (e.g. Exit, Guard, Fast). "
|
|
83
|
+
"Returns minimal fields suitable for ranking."
|
|
84
|
+
),
|
|
85
|
+
)
|
|
86
|
+
async def _top_relays_by_bandwidth(
|
|
87
|
+
country: str | None = None,
|
|
88
|
+
flag: str | None = None,
|
|
89
|
+
limit: int = 10,
|
|
90
|
+
) -> dict[str, Any]:
|
|
91
|
+
return await top_relays_by_bandwidth(client, country=country, flag=flag, limit=limit)
|
|
92
|
+
|
|
93
|
+
@server.tool(
|
|
94
|
+
name="compare_relays",
|
|
95
|
+
description=(
|
|
96
|
+
"Compare several relays side-by-side by fetching their details in parallel. "
|
|
97
|
+
"Pass a list of 40-hex fingerprints. Missing relays are reported with "
|
|
98
|
+
"found=False rather than silently dropped."
|
|
99
|
+
),
|
|
100
|
+
)
|
|
101
|
+
async def _compare_relays(fingerprints: list[str]) -> dict[str, Any]:
|
|
102
|
+
return await compare_relays(client, fingerprints)
|
|
103
|
+
|
|
104
|
+
@server.tool(
|
|
105
|
+
name="country_summary",
|
|
106
|
+
description=(
|
|
107
|
+
"Aggregate stats for running relays in one country: total count, "
|
|
108
|
+
"total advertised bandwidth, total consensus weight, and a flag "
|
|
109
|
+
"distribution histogram. Country is a two-letter ISO code."
|
|
110
|
+
),
|
|
111
|
+
)
|
|
112
|
+
async def _country_summary(country: str) -> dict[str, Any]:
|
|
113
|
+
return await country_summary(client, country)
|
|
114
|
+
|
|
115
|
+
@server.tool(
|
|
116
|
+
name="aggregate_relays",
|
|
117
|
+
description=(
|
|
118
|
+
"Group all running Tor relays by country, AS, or flag. Returns "
|
|
119
|
+
"buckets sorted by relay count (desc) with totals for advertised "
|
|
120
|
+
"bandwidth and consensus weight. Use group_by='flags' to discover "
|
|
121
|
+
"which roles dominate (Guard/Exit/Fast) — note a relay can belong "
|
|
122
|
+
"to multiple flag buckets. Set top=N to keep only the largest N. "
|
|
123
|
+
"The label set matches the REST endpoints /v1/aggregate/{countries,as,flags}."
|
|
124
|
+
),
|
|
125
|
+
)
|
|
126
|
+
async def _aggregate_relays(
|
|
127
|
+
group_by: Literal["countries", "as", "flags"],
|
|
128
|
+
running: bool = True,
|
|
129
|
+
top: int | None = None,
|
|
130
|
+
) -> dict[str, Any]:
|
|
131
|
+
return await aggregate_relays(client, group_by=group_by, running=running, top=top)
|
|
132
|
+
|
|
133
|
+
# --- Low-level Onionoo passthrough --------------------------------------
|
|
134
|
+
|
|
135
|
+
def _make_passthrough(method: str, description: str):
|
|
136
|
+
@server.tool(name=f"onionoo_{method}", description=description)
|
|
137
|
+
async def _passthrough(params: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
138
|
+
resp = await client.get(method=method, params=params or {}, if_modified_since=None)
|
|
139
|
+
return {"status_code": resp.status_code, "body": resp.json_body}
|
|
140
|
+
|
|
141
|
+
return _passthrough
|
|
142
|
+
|
|
143
|
+
_make_passthrough(
|
|
144
|
+
"summary",
|
|
145
|
+
"Raw Onionoo /summary: lightweight relay/bridge list. Pass Onionoo query "
|
|
146
|
+
"parameters as a dict (e.g. {'search': 'moria', 'limit': '1'}). See "
|
|
147
|
+
"https://metrics.torproject.org/onionoo.html",
|
|
148
|
+
)
|
|
149
|
+
_make_passthrough(
|
|
150
|
+
"details",
|
|
151
|
+
"Raw Onionoo /details: full attributes (flags, AS, country, contact, …). "
|
|
152
|
+
"Pass a params dict (e.g. {'lookup': '<40-hex fp>'}). Supports `fields=` "
|
|
153
|
+
"to trim response. See https://metrics.torproject.org/onionoo.html",
|
|
154
|
+
)
|
|
155
|
+
_make_passthrough(
|
|
156
|
+
"bandwidth",
|
|
157
|
+
"Raw Onionoo /bandwidth: bandwidth time series for relays and bridges.",
|
|
158
|
+
)
|
|
159
|
+
_make_passthrough(
|
|
160
|
+
"weights",
|
|
161
|
+
"Raw Onionoo /weights: consensus weight / path-selection probability "
|
|
162
|
+
"time series (relays only).",
|
|
163
|
+
)
|
|
164
|
+
_make_passthrough(
|
|
165
|
+
"clients",
|
|
166
|
+
"Raw Onionoo /clients: estimated daily client counts (bridges only).",
|
|
167
|
+
)
|
|
168
|
+
_make_passthrough(
|
|
169
|
+
"uptime",
|
|
170
|
+
"Raw Onionoo /uptime: fractional uptime time series.",
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
return server, client
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
async def _run_async() -> None:
|
|
177
|
+
server, client = build_server()
|
|
178
|
+
try:
|
|
179
|
+
await server.run_stdio_async()
|
|
180
|
+
finally:
|
|
181
|
+
await client.aclose()
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def main() -> None:
|
|
185
|
+
asyncio.run(_run_async())
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
if __name__ == "__main__":
|
|
189
|
+
main()
|
app/mcp_tools.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
"""High-level, task-oriented MCP tools.
|
|
2
|
+
|
|
3
|
+
Each function composes one or more Onionoo endpoints into an LLM-friendly result.
|
|
4
|
+
These are *not* thin REST wrappers — they pick the right endpoint, fan out
|
|
5
|
+
requests in parallel, and aggregate. Use them when an agent should accomplish
|
|
6
|
+
a goal ("how healthy is this relay?") in a single tool call.
|
|
7
|
+
|
|
8
|
+
All tools operate against an injected `OnionooClient` so they can be tested
|
|
9
|
+
without network access via respx.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import asyncio
|
|
15
|
+
import re
|
|
16
|
+
from typing import Any, Literal
|
|
17
|
+
|
|
18
|
+
from app.services.aggregate import aggregate_details
|
|
19
|
+
from app.services.onionoo_client import OnionooClient, UpstreamError
|
|
20
|
+
|
|
21
|
+
FINGERPRINT_RE = re.compile(r"^[0-9a-fA-F]{40}$")
|
|
22
|
+
AS_RE = re.compile(r"^AS\d+$", re.IGNORECASE)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _looks_like_fingerprint(query: str) -> bool:
|
|
26
|
+
return bool(FINGERPRINT_RE.match(query.strip()))
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _looks_like_as(query: str) -> bool:
|
|
30
|
+
return bool(AS_RE.match(query.strip()))
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def find_relay(client: OnionooClient, query: str, *, limit: int = 10) -> dict[str, Any]:
|
|
34
|
+
"""Find relays/bridges by free-form query.
|
|
35
|
+
|
|
36
|
+
Auto-detects whether `query` is a 40-hex fingerprint (uses lookup), an IPv4/IPv6
|
|
37
|
+
address (uses search), an AS number (uses as filter), or otherwise a nickname/
|
|
38
|
+
substring (uses search). Returns a compact list of matches.
|
|
39
|
+
"""
|
|
40
|
+
q = query.strip()
|
|
41
|
+
params: dict[str, Any] = {"limit": str(limit)}
|
|
42
|
+
|
|
43
|
+
if _looks_like_fingerprint(q):
|
|
44
|
+
params["lookup"] = q.upper()
|
|
45
|
+
elif _looks_like_as(q):
|
|
46
|
+
params["as"] = q.upper()
|
|
47
|
+
else:
|
|
48
|
+
params["search"] = q
|
|
49
|
+
|
|
50
|
+
resp = await client.get(method="details", params=params, if_modified_since=None)
|
|
51
|
+
body = resp.json_body or {}
|
|
52
|
+
|
|
53
|
+
def _shape(item: dict[str, Any]) -> dict[str, Any]:
|
|
54
|
+
return {
|
|
55
|
+
"nickname": item.get("nickname"),
|
|
56
|
+
"fingerprint": item.get("fingerprint") or item.get("hashed_fingerprint"),
|
|
57
|
+
"running": item.get("running"),
|
|
58
|
+
"country": item.get("country"),
|
|
59
|
+
"as": item.get("as"),
|
|
60
|
+
"flags": item.get("flags"),
|
|
61
|
+
"or_addresses": item.get("or_addresses"),
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
return {
|
|
65
|
+
"query": q,
|
|
66
|
+
"matched_via": "lookup" if "lookup" in params else "as" if "as" in params else "search",
|
|
67
|
+
"relays": [_shape(r) for r in body.get("relays", [])],
|
|
68
|
+
"bridges": [_shape(b) for b in body.get("bridges", [])],
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
async def get_relay_health(client: OnionooClient, fingerprint: str) -> dict[str, Any]:
|
|
73
|
+
"""Composite health snapshot for a single relay: details + uptime + bandwidth.
|
|
74
|
+
|
|
75
|
+
Fires three Onionoo queries in parallel and condenses the response into a
|
|
76
|
+
short, LLM-readable dict. Raises UpstreamError if any sub-fetch fails.
|
|
77
|
+
"""
|
|
78
|
+
fp = fingerprint.strip().upper()
|
|
79
|
+
if not _looks_like_fingerprint(fp):
|
|
80
|
+
raise ValueError(f"fingerprint must be 40 hex chars, got {fingerprint!r}")
|
|
81
|
+
|
|
82
|
+
params = {"lookup": fp, "limit": "1"}
|
|
83
|
+
|
|
84
|
+
details_resp, uptime_resp, bandwidth_resp = await asyncio.gather(
|
|
85
|
+
client.get(method="details", params=params, if_modified_since=None),
|
|
86
|
+
client.get(method="uptime", params=params, if_modified_since=None),
|
|
87
|
+
client.get(method="bandwidth", params=params, if_modified_since=None),
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
relays = (details_resp.json_body or {}).get("relays", [])
|
|
91
|
+
if not relays:
|
|
92
|
+
return {"fingerprint": fp, "found": False}
|
|
93
|
+
|
|
94
|
+
relay = relays[0]
|
|
95
|
+
uptime = ((uptime_resp.json_body or {}).get("relays") or [{}])[0]
|
|
96
|
+
bw = ((bandwidth_resp.json_body or {}).get("relays") or [{}])[0]
|
|
97
|
+
|
|
98
|
+
return {
|
|
99
|
+
"fingerprint": fp,
|
|
100
|
+
"found": True,
|
|
101
|
+
"nickname": relay.get("nickname"),
|
|
102
|
+
"running": relay.get("running"),
|
|
103
|
+
"country": relay.get("country"),
|
|
104
|
+
"as": relay.get("as"),
|
|
105
|
+
"as_name": relay.get("as_name"),
|
|
106
|
+
"flags": relay.get("flags"),
|
|
107
|
+
"first_seen": relay.get("first_seen"),
|
|
108
|
+
"last_seen": relay.get("last_seen"),
|
|
109
|
+
"advertised_bandwidth": relay.get("advertised_bandwidth"),
|
|
110
|
+
"consensus_weight": relay.get("consensus_weight"),
|
|
111
|
+
"version": relay.get("version"),
|
|
112
|
+
"recommended_version": relay.get("recommended_version"),
|
|
113
|
+
"uptime_periods": sorted((uptime.get("uptime") or {}).keys()),
|
|
114
|
+
"bandwidth_periods": {
|
|
115
|
+
"read": sorted((bw.get("read_history") or {}).keys()),
|
|
116
|
+
"write": sorted((bw.get("write_history") or {}).keys()),
|
|
117
|
+
},
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
async def top_relays_by_bandwidth(
|
|
122
|
+
client: OnionooClient,
|
|
123
|
+
*,
|
|
124
|
+
country: str | None = None,
|
|
125
|
+
flag: str | None = None,
|
|
126
|
+
limit: int = 10,
|
|
127
|
+
) -> dict[str, Any]:
|
|
128
|
+
"""Return the top N running relays by consensus weight, optionally filtered.
|
|
129
|
+
|
|
130
|
+
Onionoo's `order=-consensus_weight` puts the highest-weight relays first.
|
|
131
|
+
"""
|
|
132
|
+
params: dict[str, Any] = {
|
|
133
|
+
"running": "true",
|
|
134
|
+
"order": "-consensus_weight",
|
|
135
|
+
"limit": str(limit),
|
|
136
|
+
"fields": ",".join(
|
|
137
|
+
[
|
|
138
|
+
"nickname",
|
|
139
|
+
"fingerprint",
|
|
140
|
+
"country",
|
|
141
|
+
"as",
|
|
142
|
+
"as_name",
|
|
143
|
+
"flags",
|
|
144
|
+
"advertised_bandwidth",
|
|
145
|
+
"consensus_weight",
|
|
146
|
+
"consensus_weight_fraction",
|
|
147
|
+
]
|
|
148
|
+
),
|
|
149
|
+
}
|
|
150
|
+
if country:
|
|
151
|
+
params["country"] = country
|
|
152
|
+
if flag:
|
|
153
|
+
params["flag"] = flag
|
|
154
|
+
|
|
155
|
+
resp = await client.get(method="details", params=params, if_modified_since=None)
|
|
156
|
+
relays = (resp.json_body or {}).get("relays", [])
|
|
157
|
+
return {
|
|
158
|
+
"country": country,
|
|
159
|
+
"flag": flag,
|
|
160
|
+
"count": len(relays),
|
|
161
|
+
"relays": relays,
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
async def compare_relays(client: OnionooClient, fingerprints: list[str]) -> dict[str, Any]:
|
|
166
|
+
"""Fetch details for several fingerprints in parallel and return a comparison.
|
|
167
|
+
|
|
168
|
+
Each fingerprint becomes one Onionoo lookup; missing relays are reported
|
|
169
|
+
rather than dropped.
|
|
170
|
+
"""
|
|
171
|
+
if not fingerprints:
|
|
172
|
+
return {"relays": []}
|
|
173
|
+
|
|
174
|
+
normalized = [fp.strip().upper() for fp in fingerprints]
|
|
175
|
+
for fp in normalized:
|
|
176
|
+
if not _looks_like_fingerprint(fp):
|
|
177
|
+
raise ValueError(f"invalid fingerprint: {fp!r}")
|
|
178
|
+
|
|
179
|
+
async def _one(fp: str) -> dict[str, Any]:
|
|
180
|
+
try:
|
|
181
|
+
r = await client.get(
|
|
182
|
+
method="details",
|
|
183
|
+
params={"lookup": fp, "limit": "1"},
|
|
184
|
+
if_modified_since=None,
|
|
185
|
+
)
|
|
186
|
+
except UpstreamError as exc:
|
|
187
|
+
return {"fingerprint": fp, "found": False, "error": str(exc)}
|
|
188
|
+
relays = (r.json_body or {}).get("relays", [])
|
|
189
|
+
if not relays:
|
|
190
|
+
return {"fingerprint": fp, "found": False}
|
|
191
|
+
relay = relays[0]
|
|
192
|
+
return {
|
|
193
|
+
"fingerprint": fp,
|
|
194
|
+
"found": True,
|
|
195
|
+
"nickname": relay.get("nickname"),
|
|
196
|
+
"running": relay.get("running"),
|
|
197
|
+
"country": relay.get("country"),
|
|
198
|
+
"as": relay.get("as"),
|
|
199
|
+
"flags": relay.get("flags"),
|
|
200
|
+
"advertised_bandwidth": relay.get("advertised_bandwidth"),
|
|
201
|
+
"consensus_weight": relay.get("consensus_weight"),
|
|
202
|
+
"version": relay.get("version"),
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
rows = await asyncio.gather(*[_one(fp) for fp in normalized])
|
|
206
|
+
return {"relays": rows}
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
async def country_summary(client: OnionooClient, country: str) -> dict[str, Any]:
|
|
210
|
+
"""Aggregate Tor relays for one country: total count, flag distribution, weight.
|
|
211
|
+
|
|
212
|
+
Pulls a single Onionoo /details query with country filter (up to MAX_LIMIT
|
|
213
|
+
relays) and computes counts client-side.
|
|
214
|
+
"""
|
|
215
|
+
country_code = country.strip().lower()
|
|
216
|
+
params = {
|
|
217
|
+
"country": country_code,
|
|
218
|
+
"running": "true",
|
|
219
|
+
"limit": "200",
|
|
220
|
+
"fields": "fingerprint,nickname,flags,advertised_bandwidth,consensus_weight",
|
|
221
|
+
}
|
|
222
|
+
resp = await client.get(method="details", params=params, if_modified_since=None)
|
|
223
|
+
body = resp.json_body or {}
|
|
224
|
+
relays = body.get("relays", [])
|
|
225
|
+
|
|
226
|
+
total_advertised_bw = 0
|
|
227
|
+
total_weight = 0
|
|
228
|
+
flag_counts: dict[str, int] = {}
|
|
229
|
+
for relay in relays:
|
|
230
|
+
total_advertised_bw += int(relay.get("advertised_bandwidth") or 0)
|
|
231
|
+
total_weight += int(relay.get("consensus_weight") or 0)
|
|
232
|
+
for f in relay.get("flags") or []:
|
|
233
|
+
flag_counts[f] = flag_counts.get(f, 0) + 1
|
|
234
|
+
|
|
235
|
+
return {
|
|
236
|
+
"country": country_code,
|
|
237
|
+
"running_relay_count": len(relays),
|
|
238
|
+
"truncated": body.get("relays_truncated"),
|
|
239
|
+
"total_advertised_bandwidth_bps": total_advertised_bw,
|
|
240
|
+
"total_consensus_weight": total_weight,
|
|
241
|
+
"flag_counts": dict(sorted(flag_counts.items(), key=lambda kv: kv[1], reverse=True)),
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
_AGGREGATE_GROUP_BY_ALIASES = {
|
|
246
|
+
"countries": "country",
|
|
247
|
+
"country": "country",
|
|
248
|
+
"as": "as",
|
|
249
|
+
"flags": "flag",
|
|
250
|
+
"flag": "flag",
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
async def aggregate_relays(
|
|
255
|
+
client: OnionooClient,
|
|
256
|
+
*,
|
|
257
|
+
group_by: Literal["countries", "as", "flags"],
|
|
258
|
+
running: bool = True,
|
|
259
|
+
top: int | None = None,
|
|
260
|
+
) -> dict[str, Any]:
|
|
261
|
+
"""Group all running Tor relays by country / AS / flag.
|
|
262
|
+
|
|
263
|
+
Thin wrapper over `app.services.aggregate.aggregate_details`. The public
|
|
264
|
+
label set (`countries`/`as`/`flags`) matches the REST endpoint paths
|
|
265
|
+
(`/v1/aggregate/{countries,as,flags}`); singular forms remain accepted for
|
|
266
|
+
backwards compatibility with the original tool signature.
|
|
267
|
+
"""
|
|
268
|
+
internal = _AGGREGATE_GROUP_BY_ALIASES.get(group_by)
|
|
269
|
+
if internal is None:
|
|
270
|
+
raise ValueError(
|
|
271
|
+
f"group_by must be one of {sorted(_AGGREGATE_GROUP_BY_ALIASES)}, got {group_by!r}"
|
|
272
|
+
)
|
|
273
|
+
return await aggregate_details(client, group_by=internal, running=running, limit=top)
|
app/models/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Pydantic models for API responses."""
|