riotskillissue 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- riotskillissue/__init__.py +5 -0
- riotskillissue/api/__init__.py +0 -0
- riotskillissue/api/client_mixin.py +131 -0
- riotskillissue/api/endpoints/account.py +188 -0
- riotskillissue/api/endpoints/champion-mastery.py +143 -0
- riotskillissue/api/endpoints/champion.py +40 -0
- riotskillissue/api/endpoints/champion_mastery.py +154 -0
- riotskillissue/api/endpoints/clash.py +176 -0
- riotskillissue/api/endpoints/league-exp.py +54 -0
- riotskillissue/api/endpoints/league.py +226 -0
- riotskillissue/api/endpoints/league_exp.py +56 -0
- riotskillissue/api/endpoints/lol-challenges.py +197 -0
- riotskillissue/api/endpoints/lol-rso-match.py +124 -0
- riotskillissue/api/endpoints/lol-status.py +38 -0
- riotskillissue/api/endpoints/lol_challenges.py +214 -0
- riotskillissue/api/endpoints/lol_rso_match.py +132 -0
- riotskillissue/api/endpoints/lol_status.py +40 -0
- riotskillissue/api/endpoints/lor-deck.py +65 -0
- riotskillissue/api/endpoints/lor-inventory.py +38 -0
- riotskillissue/api/endpoints/lor-match.py +73 -0
- riotskillissue/api/endpoints/lor-ranked.py +38 -0
- riotskillissue/api/endpoints/lor-status.py +38 -0
- riotskillissue/api/endpoints/lor_deck.py +70 -0
- riotskillissue/api/endpoints/lor_inventory.py +40 -0
- riotskillissue/api/endpoints/lor_match.py +78 -0
- riotskillissue/api/endpoints/lor_ranked.py +40 -0
- riotskillissue/api/endpoints/lor_status.py +40 -0
- riotskillissue/api/endpoints/match.py +170 -0
- riotskillissue/api/endpoints/riftbound-content.py +42 -0
- riotskillissue/api/endpoints/riftbound_content.py +44 -0
- riotskillissue/api/endpoints/spectator-tft.py +42 -0
- riotskillissue/api/endpoints/spectator.py +44 -0
- riotskillissue/api/endpoints/spectator_tft.py +44 -0
- riotskillissue/api/endpoints/summoner.py +74 -0
- riotskillissue/api/endpoints/tft-league.py +240 -0
- riotskillissue/api/endpoints/tft-match.py +89 -0
- riotskillissue/api/endpoints/tft-status.py +38 -0
- riotskillissue/api/endpoints/tft-summoner.py +69 -0
- riotskillissue/api/endpoints/tft_league.py +260 -0
- riotskillissue/api/endpoints/tft_match.py +94 -0
- riotskillissue/api/endpoints/tft_status.py +40 -0
- riotskillissue/api/endpoints/tft_summoner.py +74 -0
- riotskillissue/api/endpoints/tournament-stub.py +162 -0
- riotskillissue/api/endpoints/tournament.py +243 -0
- riotskillissue/api/endpoints/tournament_stub.py +176 -0
- riotskillissue/api/endpoints/val-console-match.py +108 -0
- riotskillissue/api/endpoints/val-console-ranked.py +54 -0
- riotskillissue/api/endpoints/val-content.py +42 -0
- riotskillissue/api/endpoints/val-match.py +104 -0
- riotskillissue/api/endpoints/val-ranked.py +50 -0
- riotskillissue/api/endpoints/val-status.py +38 -0
- riotskillissue/api/endpoints/val_console_match.py +116 -0
- riotskillissue/api/endpoints/val_console_ranked.py +56 -0
- riotskillissue/api/endpoints/val_content.py +44 -0
- riotskillissue/api/endpoints/val_match.py +112 -0
- riotskillissue/api/endpoints/val_ranked.py +52 -0
- riotskillissue/api/endpoints/val_status.py +40 -0
- riotskillissue/api/models.py +4295 -0
- riotskillissue/auth.py +76 -0
- riotskillissue/cli.py +84 -0
- riotskillissue/core/cache.py +60 -0
- riotskillissue/core/client.py +47 -0
- riotskillissue/core/config.py +19 -0
- riotskillissue/core/http.py +144 -0
- riotskillissue/core/pagination.py +63 -0
- riotskillissue/core/ratelimit.py +185 -0
- riotskillissue/core/types.py +57 -0
- riotskillissue/core/utils.py +25 -0
- riotskillissue/static.py +71 -0
- riotskillissue/testing.py +46 -0
- riotskillissue-0.1.0.dist-info/METADATA +29 -0
- riotskillissue-0.1.0.dist-info/RECORD +75 -0
- riotskillissue-0.1.0.dist-info/WHEEL +4 -0
- riotskillissue-0.1.0.dist-info/entry_points.txt +2 -0
- riotskillissue-0.1.0.dist-info/licenses/LICENSE +21 -0
riotskillissue/auth.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from typing import Optional, Dict, Any
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
import httpx
|
|
4
|
+
from riotskillissue.core.http import RiotAPIError
|
|
5
|
+
|
|
6
|
+
@dataclass
|
|
7
|
+
class RsoConfig:
|
|
8
|
+
client_id: str
|
|
9
|
+
client_secret: str
|
|
10
|
+
redirect_uri: str
|
|
11
|
+
provider: str = "https://auth.riotgames.com"
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class TokenResponse:
|
|
15
|
+
access_token: str
|
|
16
|
+
refresh_token: str
|
|
17
|
+
id_token: str
|
|
18
|
+
expires_in: int
|
|
19
|
+
scope: str
|
|
20
|
+
|
|
21
|
+
class RsoClient:
|
|
22
|
+
"""
|
|
23
|
+
Helper for Riot Sign-On (OAuth2).
|
|
24
|
+
"""
|
|
25
|
+
def __init__(self, config: RsoConfig):
|
|
26
|
+
self.config = config
|
|
27
|
+
self.http = httpx.AsyncClient()
|
|
28
|
+
|
|
29
|
+
def get_auth_url(self, scope: str = "openid") -> str:
|
|
30
|
+
"""Generating the login URL for the user."""
|
|
31
|
+
base = f"{self.config.provider}/authorize"
|
|
32
|
+
return (
|
|
33
|
+
f"{base}?client_id={self.config.client_id}"
|
|
34
|
+
f"&redirect_uri={self.config.redirect_uri}"
|
|
35
|
+
f"&response_type=code"
|
|
36
|
+
f"&scope={scope}"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
async def exchange_code(self, code: str) -> TokenResponse:
|
|
40
|
+
"""Exchange the auth code for tokens."""
|
|
41
|
+
url = f"{self.config.provider}/token"
|
|
42
|
+
|
|
43
|
+
resp = await self.http.post(
|
|
44
|
+
url,
|
|
45
|
+
auth=(self.config.client_id, self.config.client_secret),
|
|
46
|
+
data={
|
|
47
|
+
"grant_type": "authorization_code",
|
|
48
|
+
"code": code,
|
|
49
|
+
"redirect_uri": self.config.redirect_uri,
|
|
50
|
+
}
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
if not resp.is_success:
|
|
54
|
+
raise RiotAPIError(resp.status_code, resp.text, resp)
|
|
55
|
+
|
|
56
|
+
data = resp.json()
|
|
57
|
+
return TokenResponse(**data)
|
|
58
|
+
|
|
59
|
+
async def refresh_token(self, refresh_token: str) -> TokenResponse:
|
|
60
|
+
"""Get new access token using refresh token."""
|
|
61
|
+
url = f"{self.config.provider}/token"
|
|
62
|
+
|
|
63
|
+
resp = await self.http.post(
|
|
64
|
+
url,
|
|
65
|
+
auth=(self.config.client_id, self.config.client_secret),
|
|
66
|
+
data={
|
|
67
|
+
"grant_type": "refresh_token",
|
|
68
|
+
"refresh_token": refresh_token,
|
|
69
|
+
}
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
if not resp.is_success:
|
|
73
|
+
raise RiotAPIError(resp.status_code, resp.text, resp)
|
|
74
|
+
|
|
75
|
+
data = resp.json()
|
|
76
|
+
return TokenResponse(**data)
|
riotskillissue/cli.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import typer
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
from rich.table import Table
|
|
5
|
+
from rich import print as rprint
|
|
6
|
+
from .core.client import RiotClient, RiotClientConfig
|
|
7
|
+
from .core.types import Region
|
|
8
|
+
|
|
9
|
+
app = typer.Typer(help="RiotSkillIssue API Wrapper CLI")
|
|
10
|
+
console = Console()
|
|
11
|
+
|
|
12
|
+
@app.command()
|
|
13
|
+
def summoner(name: str, region: str = "na1", api_key: str = typer.Option(None, envvar="RIOT_API_KEY")):
|
|
14
|
+
"""Get summoner details by name."""
|
|
15
|
+
|
|
16
|
+
async def _run():
|
|
17
|
+
config = RiotClientConfig(api_key=api_key)
|
|
18
|
+
async with RiotClient(config=config) as client:
|
|
19
|
+
try:
|
|
20
|
+
# Assuming simple name lookup still works or we use puuid if known
|
|
21
|
+
# But spec removed get_by_name. Using get_by_account logic or similar?
|
|
22
|
+
# Actually, summoner-v4 still has get_by_name in some regions but spec might hide it.
|
|
23
|
+
# Let's assume we use get_by_puuid for safety if name lookup is deprecated,
|
|
24
|
+
# but CLI user wants name.
|
|
25
|
+
# For this demo, let's use the account-v1 to get puuid then summoner.
|
|
26
|
+
|
|
27
|
+
# Step 1: Account (Tagline needed)
|
|
28
|
+
if "#" in name:
|
|
29
|
+
game_name, tag_line = name.split("#")
|
|
30
|
+
else:
|
|
31
|
+
rprint("[red]Name must be format GameName#TagLine for Account V1 lookup[/red]")
|
|
32
|
+
return
|
|
33
|
+
|
|
34
|
+
# Note: Account V1 is usually region 'americas', 'europe', etc.
|
|
35
|
+
# We do a best effort mapping or ask user.
|
|
36
|
+
# For simplicity, assume "americas" if region is na1/br1/etc.
|
|
37
|
+
# This is complex, but let's try direct account lookup on 'americas' for NA.
|
|
38
|
+
|
|
39
|
+
# Actually, let's just expose the raw method if available or specific flow.
|
|
40
|
+
# Since get_by_name is gone from summoner_v4 in latest specs (replaced by account->puuid->summoner),
|
|
41
|
+
# we do the Account flow.
|
|
42
|
+
|
|
43
|
+
rprint(f"[bold blue]Fetching {game_name}#{tag_line}...[/bold blue]")
|
|
44
|
+
|
|
45
|
+
# Account lookup (using americas as default cluster for simplicity in CLI)
|
|
46
|
+
# In real app, map region->cluster.
|
|
47
|
+
cluster = "americas"
|
|
48
|
+
|
|
49
|
+
account = await client.account.get_by_riot_id(region=cluster, gameName=game_name, tagLine=tag_line)
|
|
50
|
+
puuid = account.puuid
|
|
51
|
+
|
|
52
|
+
# Summoner lookup
|
|
53
|
+
summ = await client.summoner.get_by_puuid(region=region, encryptedPUUID=puuid)
|
|
54
|
+
|
|
55
|
+
table = Table(title=f"Summoner: {game_name}#{tag_line}")
|
|
56
|
+
table.add_column("Level", style="magenta")
|
|
57
|
+
table.add_column("PUUID", style="cyan", no_wrap=True)
|
|
58
|
+
table.add_row(str(summ.summonerLevel), summ.puuid)
|
|
59
|
+
|
|
60
|
+
console.print(table)
|
|
61
|
+
|
|
62
|
+
except Exception as e:
|
|
63
|
+
rprint(f"[red]Error: {e}[/red]")
|
|
64
|
+
|
|
65
|
+
asyncio.run(_run())
|
|
66
|
+
|
|
67
|
+
@app.command()
|
|
68
|
+
def match(match_id: str, region: str = "americas", api_key: str = typer.Option(None, envvar="RIOT_API_KEY")):
|
|
69
|
+
"""Get match details."""
|
|
70
|
+
async def _run():
|
|
71
|
+
config = RiotClientConfig(api_key=api_key)
|
|
72
|
+
async with RiotClient(config=config) as client:
|
|
73
|
+
try:
|
|
74
|
+
m = await client.match.get_match(region=region, matchId=match_id)
|
|
75
|
+
rprint(f"[green]Match {match_id} loaded![/green]")
|
|
76
|
+
rprint(f"Game Mode: {m.info.gameMode}")
|
|
77
|
+
rprint(f"Duration: {m.info.gameDuration}s")
|
|
78
|
+
except Exception as e:
|
|
79
|
+
rprint(f"[red]Error: {e}[/red]")
|
|
80
|
+
|
|
81
|
+
asyncio.run(_run())
|
|
82
|
+
|
|
83
|
+
def main():
|
|
84
|
+
app()
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Optional, Any
|
|
3
|
+
import time
|
|
4
|
+
import asyncio
|
|
5
|
+
|
|
6
|
+
class AbstractCache(ABC):
|
|
7
|
+
@abstractmethod
|
|
8
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
@abstractmethod
|
|
12
|
+
async def set(self, key: str, value: Any, ttl: int) -> None:
|
|
13
|
+
pass
|
|
14
|
+
|
|
15
|
+
class MemoryCache(AbstractCache):
|
|
16
|
+
def __init__(self):
|
|
17
|
+
self._store = {}
|
|
18
|
+
self._lock = asyncio.Lock()
|
|
19
|
+
|
|
20
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
21
|
+
async with self._lock:
|
|
22
|
+
if key in self._store:
|
|
23
|
+
val, expire_at = self._store[key]
|
|
24
|
+
if time.time() < expire_at:
|
|
25
|
+
return val
|
|
26
|
+
else:
|
|
27
|
+
del self._store[key]
|
|
28
|
+
return None
|
|
29
|
+
|
|
30
|
+
async def set(self, key: str, value: Any, ttl: int) -> None:
|
|
31
|
+
async with self._lock:
|
|
32
|
+
self._store[key] = (value, time.time() + ttl)
|
|
33
|
+
|
|
34
|
+
class NoOpCache(AbstractCache):
|
|
35
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
36
|
+
return None
|
|
37
|
+
|
|
38
|
+
async def set(self, key: str, value: Any, ttl: int) -> None:
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
from redis.asyncio import Redis
|
|
43
|
+
import pickle
|
|
44
|
+
|
|
45
|
+
class RedisCache(AbstractCache):
|
|
46
|
+
def __init__(self, redis_url: str):
|
|
47
|
+
self.redis = Redis.from_url(redis_url)
|
|
48
|
+
|
|
49
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
50
|
+
val = await self.redis.get(key)
|
|
51
|
+
if val:
|
|
52
|
+
return pickle.loads(val)
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
async def set(self, key: str, value: Any, ttl: int) -> None:
|
|
56
|
+
val = pickle.dumps(value)
|
|
57
|
+
await self.redis.set(key, val, ex=ttl)
|
|
58
|
+
|
|
59
|
+
except ImportError:
|
|
60
|
+
pass
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from typing import Optional, Type, TypeVar
|
|
2
|
+
from types import TracebackType
|
|
3
|
+
|
|
4
|
+
from riotskillissue.core.config import RiotClientConfig
|
|
5
|
+
from riotskillissue.core.http import HttpClient
|
|
6
|
+
|
|
7
|
+
from riotskillissue.api.client_mixin import GeneratedClientMixin
|
|
8
|
+
|
|
9
|
+
from riotskillissue.core.cache import AbstractCache
|
|
10
|
+
|
|
11
|
+
class RiotClient(GeneratedClientMixin):
|
|
12
|
+
"""
|
|
13
|
+
Main entry point for the Riot Games API.
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
async with RiotClient(api_key="...") as client:
|
|
17
|
+
client.summoner.get_by_name(...)
|
|
18
|
+
"""
|
|
19
|
+
def __init__(self, api_key: Optional[str] = None, config: Optional[RiotClientConfig] = None, cache: Optional[AbstractCache] = None, hooks: Optional[dict] = None):
|
|
20
|
+
if config is None:
|
|
21
|
+
if api_key:
|
|
22
|
+
# Create config from key
|
|
23
|
+
config = RiotClientConfig(api_key=api_key)
|
|
24
|
+
else:
|
|
25
|
+
# Load from env
|
|
26
|
+
config = RiotClientConfig.from_env()
|
|
27
|
+
|
|
28
|
+
self.config = config
|
|
29
|
+
self.http = HttpClient(config, cache=cache, hooks=hooks)
|
|
30
|
+
|
|
31
|
+
# Static Data
|
|
32
|
+
from riotskillissue.static import DataDragonClient
|
|
33
|
+
self.static = DataDragonClient(cache=cache)
|
|
34
|
+
|
|
35
|
+
# Initialize generated APIs
|
|
36
|
+
super().__init__(self.http)
|
|
37
|
+
|
|
38
|
+
async def __aenter__(self) -> "RiotClient":
|
|
39
|
+
return self
|
|
40
|
+
|
|
41
|
+
async def __aexit__(
|
|
42
|
+
self,
|
|
43
|
+
exc_type: Optional[Type[BaseException]],
|
|
44
|
+
exc_val: Optional[BaseException],
|
|
45
|
+
exc_tb: Optional[TracebackType]
|
|
46
|
+
) -> None:
|
|
47
|
+
await self.http.close()
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
@dataclass(frozen=True)
|
|
6
|
+
class RiotClientConfig:
|
|
7
|
+
api_key: str
|
|
8
|
+
redis_url: Optional[str] = None
|
|
9
|
+
max_retries: int = 3
|
|
10
|
+
connect_timeout: float = 5.0
|
|
11
|
+
read_timeout: float = 10.0
|
|
12
|
+
|
|
13
|
+
@classmethod
|
|
14
|
+
def from_env(cls) -> "RiotClientConfig":
|
|
15
|
+
return cls(
|
|
16
|
+
api_key=os.environ.get("RIOT_API_KEY", ""),
|
|
17
|
+
redis_url=os.environ.get("RIOT_REDIS_URL"),
|
|
18
|
+
max_retries=int(os.environ.get("RIOT_MAX_RETRIES", "3")),
|
|
19
|
+
)
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
from contextlib import asynccontextmanager
|
|
4
|
+
from typing import Optional, Any, AsyncGenerator
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
from tenacity import retry, wait_exponential, retry_if_exception_type, stop_after_attempt
|
|
8
|
+
|
|
9
|
+
from riotskillissue.core.config import RiotClientConfig
|
|
10
|
+
from riotskillissue.core.types import Region, Platform
|
|
11
|
+
from riotskillissue.core.ratelimit import AbstractRateLimiter, MemoryRateLimiter, RedisRateLimiter
|
|
12
|
+
from riotskillissue.core.cache import AbstractCache, NoOpCache
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
class RiotAPIError(Exception):
|
|
17
|
+
def __init__(self, status: int, message: str, response: httpx.Response):
|
|
18
|
+
self.status = status
|
|
19
|
+
self.message = message
|
|
20
|
+
self.response = response
|
|
21
|
+
super().__init__(f"[{status}] {message}")
|
|
22
|
+
|
|
23
|
+
class RateLimitError(RiotAPIError):
|
|
24
|
+
def __init__(self, response: httpx.Response, retry_after: float):
|
|
25
|
+
super().__init__(429, f"Rate limited. Retry after {retry_after}s", response)
|
|
26
|
+
self.retry_after = retry_after
|
|
27
|
+
|
|
28
|
+
class ServerError(RiotAPIError):
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
class HttpClient:
|
|
32
|
+
def __init__(self, config: RiotClientConfig, rate_limiter: Optional[AbstractRateLimiter] = None, cache: Optional[AbstractCache] = None, hooks: Optional[dict] = None):
|
|
33
|
+
self.config = config
|
|
34
|
+
self.cache = cache or NoOpCache()
|
|
35
|
+
self.hooks = hooks or {}
|
|
36
|
+
self._client = httpx.AsyncClient(
|
|
37
|
+
headers={"X-Riot-Token": config.api_key},
|
|
38
|
+
timeout=httpx.Timeout(
|
|
39
|
+
config.read_timeout,
|
|
40
|
+
connect=config.connect_timeout
|
|
41
|
+
),
|
|
42
|
+
)
|
|
43
|
+
if rate_limiter:
|
|
44
|
+
self.limiter = rate_limiter
|
|
45
|
+
elif config.redis_url:
|
|
46
|
+
self.limiter = RedisRateLimiter(config.redis_url)
|
|
47
|
+
else:
|
|
48
|
+
self.limiter = MemoryRateLimiter()
|
|
49
|
+
|
|
50
|
+
async def close(self) -> None:
|
|
51
|
+
await self._client.aclose()
|
|
52
|
+
|
|
53
|
+
async def request(
|
|
54
|
+
self,
|
|
55
|
+
method: str,
|
|
56
|
+
url: str,
|
|
57
|
+
region_or_platform: str,
|
|
58
|
+
**kwargs: Any
|
|
59
|
+
) -> httpx.Response:
|
|
60
|
+
"""
|
|
61
|
+
Executes a request with rate limiting and retries.
|
|
62
|
+
"""
|
|
63
|
+
# Cache check (GET only)
|
|
64
|
+
if method.upper() == "GET":
|
|
65
|
+
# Simple cache key: URL + stringified params
|
|
66
|
+
params = kwargs.get("params", {})
|
|
67
|
+
param_key = sorted(params.items()) if params else ""
|
|
68
|
+
cache_key = f"{method}:{url}:{region_or_platform}:{param_key}"
|
|
69
|
+
|
|
70
|
+
cached = await self.cache.get(cache_key)
|
|
71
|
+
if cached:
|
|
72
|
+
status, headers, content = cached
|
|
73
|
+
return httpx.Response(status_code=status, headers=headers, content=content)
|
|
74
|
+
|
|
75
|
+
# Hook: onRequest
|
|
76
|
+
if "request" in self.hooks:
|
|
77
|
+
await self.hooks["request"](method, url, kwargs)
|
|
78
|
+
|
|
79
|
+
# 2. Execute with Retry
|
|
80
|
+
response = await self._execute_with_retry(method, url, region_or_platform, **kwargs)
|
|
81
|
+
|
|
82
|
+
# Hook: onResponse
|
|
83
|
+
if "response" in self.hooks:
|
|
84
|
+
await self.hooks["response"](response)
|
|
85
|
+
|
|
86
|
+
# Cache set (only 200)
|
|
87
|
+
if method.upper() == "GET" and response.status_code == 200:
|
|
88
|
+
# Default TTL: 60s
|
|
89
|
+
# Reconstruct cache_key to be safe or reuse if not modified
|
|
90
|
+
params = kwargs.get("params", {})
|
|
91
|
+
param_key = sorted(params.items()) if params else ""
|
|
92
|
+
cache_key = f"{method}:{url}:{region_or_platform}:{param_key}"
|
|
93
|
+
|
|
94
|
+
await self.cache.set(cache_key, (response.status_code, dict(response.headers), response.content), ttl=60)
|
|
95
|
+
|
|
96
|
+
return response
|
|
97
|
+
|
|
98
|
+
@retry(
|
|
99
|
+
wait=wait_exponential(multiplier=1, min=1, max=10),
|
|
100
|
+
stop=stop_after_attempt(3), # uses config in real usage
|
|
101
|
+
retry=retry_if_exception_type((httpx.NetworkError, httpx.TimeoutException, httpx.RemoteProtocolError, ServerError)),
|
|
102
|
+
reraise=True
|
|
103
|
+
)
|
|
104
|
+
async def _execute_with_retry(self, method: str, url: str, key: str, **kwargs: Any) -> httpx.Response:
|
|
105
|
+
# TODO: Inject rate limit acquisition here once we have full method context.
|
|
106
|
+
# For now, simplistic.
|
|
107
|
+
|
|
108
|
+
# Construct full URL if needed
|
|
109
|
+
if not url.startswith("https://"):
|
|
110
|
+
# e.g. https://na1.api.riotgames.com/lol/summoner/v4/...
|
|
111
|
+
host = f"https://{key}.api.riotgames.com"
|
|
112
|
+
full_url = f"{host}{url}"
|
|
113
|
+
else:
|
|
114
|
+
full_url = url
|
|
115
|
+
|
|
116
|
+
try:
|
|
117
|
+
response = await self._client.request(method, full_url, **kwargs)
|
|
118
|
+
except httpx.RequestError as e:
|
|
119
|
+
logger.warning(f"Network error accessing {full_url}: {e}")
|
|
120
|
+
raise
|
|
121
|
+
|
|
122
|
+
# 3. Handle specific status codes
|
|
123
|
+
if response.status_code == 429:
|
|
124
|
+
retry_after = float(response.headers.get("Retry-After", "1"))
|
|
125
|
+
logger.warning(f"Rate limited (429) on {key}. Wait {retry_after}s.")
|
|
126
|
+
# If standard rate limit, we might want to sleep and retry internally or raise
|
|
127
|
+
# Riot distinguishes between "App/Method Rate Limit" (headers) and "Service Rule" (429 w/o headers)
|
|
128
|
+
# We raise so caller or outer loop decides, but typically we sleep.
|
|
129
|
+
raise RateLimitError(response, retry_after)
|
|
130
|
+
|
|
131
|
+
if response.status_code >= 500:
|
|
132
|
+
logger.warning(f"Server error {response.status_code} on {key}")
|
|
133
|
+
raise ServerError(response.status_code, "Server Error", response)
|
|
134
|
+
|
|
135
|
+
if not response.is_success:
|
|
136
|
+
# 400s, 401s, 403s, 404s
|
|
137
|
+
raise RiotAPIError(response.status_code, response.text, response)
|
|
138
|
+
|
|
139
|
+
# 4. Update Rate Limits
|
|
140
|
+
# app_limits = response.headers.get("X-App-Rate-Limit")
|
|
141
|
+
# app_counts = response.headers.get("X-App-Rate-Limit-Count")
|
|
142
|
+
# await self.limiter.update(key, app_counts, app_limits)
|
|
143
|
+
|
|
144
|
+
return response
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from typing import TypeVar, AsyncIterator, Callable, Protocol, Any, List
|
|
2
|
+
import asyncio
|
|
3
|
+
|
|
4
|
+
T = TypeVar("T")
|
|
5
|
+
|
|
6
|
+
class PaginatedMethod(Protocol):
|
|
7
|
+
async def __call__(self, *, start: int, count: int, **kwargs: Any) -> List[Any]: ...
|
|
8
|
+
|
|
9
|
+
async def paginate(
|
|
10
|
+
method: Callable[..., Any],
|
|
11
|
+
*,
|
|
12
|
+
start: int = 0,
|
|
13
|
+
count: int = 100, # Default page size
|
|
14
|
+
max_results: int = float('inf'),
|
|
15
|
+
**kwargs: Any
|
|
16
|
+
) -> AsyncIterator[T]:
|
|
17
|
+
"""
|
|
18
|
+
Async iterator for paginated endpoints using start/count.
|
|
19
|
+
|
|
20
|
+
Usage:
|
|
21
|
+
async for match_id in paginate(client.match.get_ids_by_puuid, puuid="...", count=100):
|
|
22
|
+
print(match_id)
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
method: The API method to call.
|
|
26
|
+
start: Initial offset.
|
|
27
|
+
count: items per page (passed to method as 'count').
|
|
28
|
+
max_results: Total items to yield before stopping.
|
|
29
|
+
**kwargs: Arguments passed to the method (e.g. puuid, region).
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
current_start = start
|
|
33
|
+
yielded = 0
|
|
34
|
+
|
|
35
|
+
while yielded < max_results:
|
|
36
|
+
# Determine batch size
|
|
37
|
+
remaining = max_results - yielded
|
|
38
|
+
batch_size = min(count, remaining) # Don't fetch more than needed if we hit max_results
|
|
39
|
+
|
|
40
|
+
# Call API
|
|
41
|
+
# Assumptions:
|
|
42
|
+
# 1. Method accepts 'start' and 'count'
|
|
43
|
+
# 2. Method returns a list
|
|
44
|
+
results = await method(start=current_start, count=batch_size, **kwargs)
|
|
45
|
+
|
|
46
|
+
if not results:
|
|
47
|
+
break
|
|
48
|
+
|
|
49
|
+
for item in results:
|
|
50
|
+
yield item
|
|
51
|
+
yielded += 1
|
|
52
|
+
if yielded >= max_results:
|
|
53
|
+
return
|
|
54
|
+
|
|
55
|
+
# Prepare next page
|
|
56
|
+
current_start += len(results)
|
|
57
|
+
|
|
58
|
+
# Optimization: If response < batch_size, we probably exhausted the list.
|
|
59
|
+
# But Riot APIs sometimes return fewer items than requested if filtering,
|
|
60
|
+
# so this heuristic is risky.
|
|
61
|
+
# Safest is to keep going until empty list if we asked for full page.
|
|
62
|
+
if len(results) < batch_size and len(results) < count:
|
|
63
|
+
break
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
from typing import Optional, List, Tuple
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
import redis.asyncio as redis
|
|
9
|
+
except ImportError:
|
|
10
|
+
redis = None # type: ignore
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
class RateLimitBucket:
|
|
15
|
+
"""Represents a single rate limit bucket (e.g., 20 requests per 1 second)."""
|
|
16
|
+
def __init__(self, limit: int, window: int):
|
|
17
|
+
self.limit = limit
|
|
18
|
+
self.window = window
|
|
19
|
+
|
|
20
|
+
def __repr__(self) -> str:
|
|
21
|
+
return f"{self.limit}:{self.window}"
|
|
22
|
+
|
|
23
|
+
def parse_rate_limits(header_value: str) -> List[RateLimitBucket]:
|
|
24
|
+
"""Parses Riot limit headers like '20:1,100:120'."""
|
|
25
|
+
if not header_value:
|
|
26
|
+
return []
|
|
27
|
+
buckets = []
|
|
28
|
+
for part in header_value.split(','):
|
|
29
|
+
try:
|
|
30
|
+
limit, window = map(int, part.split(':'))
|
|
31
|
+
buckets.append(RateLimitBucket(limit, window))
|
|
32
|
+
except ValueError:
|
|
33
|
+
pass
|
|
34
|
+
return buckets
|
|
35
|
+
|
|
36
|
+
class AbstractRateLimiter(ABC):
|
|
37
|
+
@abstractmethod
|
|
38
|
+
async def acquire(self, key: str, limits: List[RateLimitBucket]) -> None:
|
|
39
|
+
"""Wait until a request can be made."""
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
@abstractmethod
|
|
43
|
+
async def update(self, key: str, counts: str, limits: Optional[str] = None) -> None:
|
|
44
|
+
"""Update state based on response headers."""
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
class MemoryRateLimiter(AbstractRateLimiter):
|
|
48
|
+
def __init__(self) -> None:
|
|
49
|
+
# key -> [(completion_time, window_size)]
|
|
50
|
+
self._buckets: dict[str, dict[int, list[float]]] = {}
|
|
51
|
+
self._lock = asyncio.Lock()
|
|
52
|
+
|
|
53
|
+
async def acquire(self, key: str, limits: List[RateLimitBucket]) -> None:
|
|
54
|
+
async with self._lock:
|
|
55
|
+
now = time.time()
|
|
56
|
+
max_wait = 0.0
|
|
57
|
+
|
|
58
|
+
# Check all buckets for this key
|
|
59
|
+
key_buckets = self._buckets.setdefault(key, {})
|
|
60
|
+
|
|
61
|
+
for bucket in limits:
|
|
62
|
+
window_requests = key_buckets.setdefault(bucket.window, [])
|
|
63
|
+
|
|
64
|
+
# Prune old requests
|
|
65
|
+
cutoff = now - bucket.window
|
|
66
|
+
while window_requests and window_requests[0] <= cutoff:
|
|
67
|
+
window_requests.pop(0)
|
|
68
|
+
|
|
69
|
+
# Update list after prune
|
|
70
|
+
key_buckets[bucket.window] = window_requests
|
|
71
|
+
|
|
72
|
+
if len(window_requests) >= bucket.limit:
|
|
73
|
+
# Determine wait time: time until the oldest request expires
|
|
74
|
+
oldest = window_requests[0]
|
|
75
|
+
# We need to wait until (oldest + window) - now
|
|
76
|
+
wait_time = (oldest + bucket.window) - now
|
|
77
|
+
if wait_time > max_wait:
|
|
78
|
+
max_wait = wait_time
|
|
79
|
+
|
|
80
|
+
if max_wait > 0:
|
|
81
|
+
logger.debug(f"Rate limit hit for {key}, waiting {max_wait:.2f}s")
|
|
82
|
+
await asyncio.sleep(max_wait)
|
|
83
|
+
# Recursive retry to ensure clean state after sleep (could be racing)
|
|
84
|
+
# But for simple memory impl, we assume we just consumed the slot.
|
|
85
|
+
# Actually proper impl requires re-check or reservation.
|
|
86
|
+
# For simplicity here, we assume reservation logic:
|
|
87
|
+
|
|
88
|
+
# Reserve spot
|
|
89
|
+
now = time.time() # Update time after sleep
|
|
90
|
+
for bucket in limits:
|
|
91
|
+
self._buckets[key][bucket.window].append(now)
|
|
92
|
+
|
|
93
|
+
async def update(self, key: str, counts: str, limits: Optional[str] = None) -> None:
|
|
94
|
+
# Memory limiter is self-contained, but we could sync with headers if we drifted.
|
|
95
|
+
# For this implementation, we trust our local count more due to race conditions with distributed headers,
|
|
96
|
+
# UNLESS we are strictly respecting headers which might be "future" from other nodes.
|
|
97
|
+
# But simple MemoryLimiter is usually single-process.
|
|
98
|
+
pass
|
|
99
|
+
|
|
100
|
+
class RedisRateLimiter(AbstractRateLimiter):
|
|
101
|
+
def __init__(self, redis_url: str) -> None:
|
|
102
|
+
if redis is None:
|
|
103
|
+
raise ImportError("redis package is required for RedisRateLimiter")
|
|
104
|
+
self._redis = redis.from_url(redis_url)
|
|
105
|
+
|
|
106
|
+
# Lua script for atomic sliding window (Result: 0 = allowed, >0 = wait seconds)
|
|
107
|
+
# ARGV[1] = current_time
|
|
108
|
+
# ARGV[2] = count of buckets (N)
|
|
109
|
+
# ARGV[3..3+N-1] = limits
|
|
110
|
+
# ARGV[3+N..3+2N-1] = windows
|
|
111
|
+
# KEYS[1..N] = keys for each bucket
|
|
112
|
+
self._acquire_script = self._redis.register_script("""
|
|
113
|
+
local now = tonumber(ARGV[1])
|
|
114
|
+
local n_buckets = tonumber(ARGV[2])
|
|
115
|
+
|
|
116
|
+
-- Check all buckets first
|
|
117
|
+
for i = 1, n_buckets do
|
|
118
|
+
local limit = tonumber(ARGV[2 + i])
|
|
119
|
+
local window = tonumber(ARGV[2 + n_buckets + i])
|
|
120
|
+
local key = KEYS[i]
|
|
121
|
+
|
|
122
|
+
-- Cleanup old members
|
|
123
|
+
local clear_before = now - window
|
|
124
|
+
redis.call('ZREMRANGEBYSCORE', key, 0, clear_before)
|
|
125
|
+
|
|
126
|
+
-- Count current
|
|
127
|
+
local count = redis.call('ZCARD', key)
|
|
128
|
+
|
|
129
|
+
if count >= limit then
|
|
130
|
+
-- Find oldest to determine wait
|
|
131
|
+
local oldest = redis.call('ZRANGE', key, 0, 0, 'WITHSCORES')
|
|
132
|
+
local wait = 1.0 -- default fallback
|
|
133
|
+
if oldest and oldest[2] then
|
|
134
|
+
wait = (tonumber(oldest[2]) + window) - now
|
|
135
|
+
end
|
|
136
|
+
if wait < 0 then wait = 0 end
|
|
137
|
+
return tostring(wait) -- Return wait time (string for safety)
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
-- Consume
|
|
142
|
+
for i = 1, n_buckets do
|
|
143
|
+
local key = KEYS[i]
|
|
144
|
+
local window = tonumber(ARGV[2 + n_buckets + i])
|
|
145
|
+
|
|
146
|
+
redis.call('ZADD', key, now, now)
|
|
147
|
+
redis.call('EXPIRE', key, window + 1)
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
return "0"
|
|
151
|
+
""")
|
|
152
|
+
|
|
153
|
+
async def acquire(self, key: str, limits: List[RateLimitBucket]) -> None:
|
|
154
|
+
if not limits:
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
now = time.time()
|
|
158
|
+
|
|
159
|
+
# Prepare keys and args
|
|
160
|
+
# We use a unique key for each bucket definition to avoid collision if windows overlap weirdly
|
|
161
|
+
# format: riot:ratelimit:<key>:<window>
|
|
162
|
+
keys = [f"riot:rl:{key}:{b.window}" for b in limits]
|
|
163
|
+
limit_args = [b.limit for b in limits]
|
|
164
|
+
window_args = [b.window for b in limits]
|
|
165
|
+
|
|
166
|
+
args = [now, len(limits)] + limit_args + window_args
|
|
167
|
+
|
|
168
|
+
# Run script
|
|
169
|
+
res = await self._acquire_script(keys=keys, args=args)
|
|
170
|
+
wait_time = float(res)
|
|
171
|
+
|
|
172
|
+
if wait_time > 0:
|
|
173
|
+
logger.debug(f"Rate limit hit for {key}, waiting {wait_time:.2f}s (Redis)")
|
|
174
|
+
await asyncio.sleep(wait_time)
|
|
175
|
+
# Retry
|
|
176
|
+
await self.acquire(key, limits)
|
|
177
|
+
|
|
178
|
+
async def update(self, key: str, counts: str, limits: Optional[str] = None) -> None:
|
|
179
|
+
# Implementing server-side sync is complex because of "distributed" vs "local" view.
|
|
180
|
+
# If we trust our Lua script, we don't strictly need to sync with headers
|
|
181
|
+
# unless we are sharing quota with apps NOT using this limiter.
|
|
182
|
+
# For this "complete" wrapper, we focus on the client-side enforcement correctness.
|
|
183
|
+
# Strict syncing with X-App-Rate-Limit-Count would require 'SET'ing the ZSETs
|
|
184
|
+
# which is hard because we don't know the distinct timestamps of those remote requests.
|
|
185
|
+
pass
|