wayfinder-paths 0.1.23__py3-none-any.whl → 0.1.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of wayfinder-paths might be problematic. Click here for more details.
- wayfinder_paths/adapters/balance_adapter/adapter.py +250 -0
- wayfinder_paths/adapters/balance_adapter/manifest.yaml +8 -0
- wayfinder_paths/adapters/balance_adapter/test_adapter.py +0 -11
- wayfinder_paths/adapters/boros_adapter/__init__.py +17 -0
- wayfinder_paths/adapters/boros_adapter/adapter.py +1574 -0
- wayfinder_paths/adapters/boros_adapter/client.py +476 -0
- wayfinder_paths/adapters/boros_adapter/manifest.yaml +10 -0
- wayfinder_paths/adapters/boros_adapter/parsers.py +88 -0
- wayfinder_paths/adapters/boros_adapter/test_adapter.py +460 -0
- wayfinder_paths/adapters/boros_adapter/test_golden.py +156 -0
- wayfinder_paths/adapters/boros_adapter/types.py +70 -0
- wayfinder_paths/adapters/boros_adapter/utils.py +85 -0
- wayfinder_paths/adapters/brap_adapter/adapter.py +1 -1
- wayfinder_paths/adapters/brap_adapter/manifest.yaml +9 -0
- wayfinder_paths/adapters/hyperlend_adapter/adapter.py +161 -26
- wayfinder_paths/adapters/hyperlend_adapter/manifest.yaml +9 -0
- wayfinder_paths/adapters/hyperlend_adapter/test_adapter.py +77 -13
- wayfinder_paths/adapters/hyperliquid_adapter/__init__.py +2 -9
- wayfinder_paths/adapters/hyperliquid_adapter/adapter.py +585 -61
- wayfinder_paths/adapters/hyperliquid_adapter/executor.py +47 -68
- wayfinder_paths/adapters/hyperliquid_adapter/manifest.yaml +14 -0
- wayfinder_paths/adapters/hyperliquid_adapter/paired_filler.py +2 -3
- wayfinder_paths/adapters/hyperliquid_adapter/test_adapter.py +17 -21
- wayfinder_paths/adapters/hyperliquid_adapter/test_adapter_live.py +3 -6
- wayfinder_paths/adapters/hyperliquid_adapter/test_executor.py +4 -8
- wayfinder_paths/adapters/hyperliquid_adapter/test_utils.py +2 -2
- wayfinder_paths/adapters/ledger_adapter/manifest.yaml +7 -0
- wayfinder_paths/adapters/ledger_adapter/test_adapter.py +1 -2
- wayfinder_paths/adapters/moonwell_adapter/adapter.py +592 -400
- wayfinder_paths/adapters/moonwell_adapter/manifest.yaml +14 -0
- wayfinder_paths/adapters/moonwell_adapter/test_adapter.py +126 -219
- wayfinder_paths/adapters/multicall_adapter/__init__.py +7 -0
- wayfinder_paths/adapters/multicall_adapter/adapter.py +166 -0
- wayfinder_paths/adapters/multicall_adapter/manifest.yaml +5 -0
- wayfinder_paths/adapters/multicall_adapter/test_adapter.py +97 -0
- wayfinder_paths/adapters/pendle_adapter/README.md +102 -0
- wayfinder_paths/adapters/pendle_adapter/__init__.py +7 -0
- wayfinder_paths/adapters/pendle_adapter/adapter.py +1992 -0
- wayfinder_paths/adapters/pendle_adapter/examples.json +11 -0
- wayfinder_paths/adapters/pendle_adapter/manifest.yaml +21 -0
- wayfinder_paths/adapters/pendle_adapter/test_adapter.py +666 -0
- wayfinder_paths/adapters/pool_adapter/manifest.yaml +6 -0
- wayfinder_paths/adapters/token_adapter/examples.json +0 -4
- wayfinder_paths/adapters/token_adapter/manifest.yaml +7 -0
- wayfinder_paths/conftest.py +24 -17
- wayfinder_paths/core/adapters/BaseAdapter.py +0 -25
- wayfinder_paths/core/adapters/models.py +17 -7
- wayfinder_paths/core/clients/BRAPClient.py +1 -1
- wayfinder_paths/core/clients/TokenClient.py +47 -1
- wayfinder_paths/core/clients/WayfinderClient.py +1 -2
- wayfinder_paths/core/clients/protocols.py +21 -22
- wayfinder_paths/core/clients/test_ledger_client.py +448 -0
- wayfinder_paths/core/config.py +12 -0
- wayfinder_paths/core/constants/__init__.py +15 -0
- wayfinder_paths/core/constants/base.py +6 -1
- wayfinder_paths/core/constants/contracts.py +39 -26
- wayfinder_paths/core/constants/erc20_abi.py +0 -1
- wayfinder_paths/core/constants/hyperlend_abi.py +0 -4
- wayfinder_paths/core/constants/hyperliquid.py +16 -0
- wayfinder_paths/core/constants/moonwell_abi.py +0 -15
- wayfinder_paths/core/engine/manifest.py +66 -0
- wayfinder_paths/core/strategies/Strategy.py +0 -61
- wayfinder_paths/core/strategies/__init__.py +10 -1
- wayfinder_paths/core/strategies/opa_loop.py +167 -0
- wayfinder_paths/core/utils/test_transaction.py +289 -0
- wayfinder_paths/core/utils/transaction.py +44 -1
- wayfinder_paths/core/utils/web3.py +3 -0
- wayfinder_paths/mcp/__init__.py +5 -0
- wayfinder_paths/mcp/preview.py +185 -0
- wayfinder_paths/mcp/scripting.py +84 -0
- wayfinder_paths/mcp/server.py +52 -0
- wayfinder_paths/mcp/state/profile_store.py +195 -0
- wayfinder_paths/mcp/state/store.py +89 -0
- wayfinder_paths/mcp/test_scripting.py +267 -0
- wayfinder_paths/mcp/tools/__init__.py +0 -0
- wayfinder_paths/mcp/tools/balances.py +290 -0
- wayfinder_paths/mcp/tools/discovery.py +158 -0
- wayfinder_paths/mcp/tools/execute.py +770 -0
- wayfinder_paths/mcp/tools/hyperliquid.py +931 -0
- wayfinder_paths/mcp/tools/quotes.py +288 -0
- wayfinder_paths/mcp/tools/run_script.py +286 -0
- wayfinder_paths/mcp/tools/strategies.py +188 -0
- wayfinder_paths/mcp/tools/tokens.py +46 -0
- wayfinder_paths/mcp/tools/wallets.py +354 -0
- wayfinder_paths/mcp/utils.py +129 -0
- wayfinder_paths/policies/hyperliquid.py +1 -1
- wayfinder_paths/policies/lifi.py +18 -0
- wayfinder_paths/policies/util.py +8 -2
- wayfinder_paths/strategies/basis_trading_strategy/strategy.py +28 -119
- wayfinder_paths/strategies/basis_trading_strategy/test_strategy.py +24 -53
- wayfinder_paths/strategies/boros_hype_strategy/__init__.py +3 -0
- wayfinder_paths/strategies/boros_hype_strategy/boros_ops_mixin.py +450 -0
- wayfinder_paths/strategies/boros_hype_strategy/constants.py +255 -0
- wayfinder_paths/strategies/boros_hype_strategy/examples.json +37 -0
- wayfinder_paths/strategies/boros_hype_strategy/hyperevm_ops_mixin.py +114 -0
- wayfinder_paths/strategies/boros_hype_strategy/hyperliquid_ops_mixin.py +642 -0
- wayfinder_paths/strategies/boros_hype_strategy/manifest.yaml +36 -0
- wayfinder_paths/strategies/boros_hype_strategy/planner.py +460 -0
- wayfinder_paths/strategies/boros_hype_strategy/risk_ops_mixin.py +886 -0
- wayfinder_paths/strategies/boros_hype_strategy/snapshot_mixin.py +494 -0
- wayfinder_paths/strategies/boros_hype_strategy/strategy.py +1194 -0
- wayfinder_paths/strategies/boros_hype_strategy/test_planner_golden.py +374 -0
- wayfinder_paths/strategies/boros_hype_strategy/test_strategy.py +202 -0
- wayfinder_paths/strategies/boros_hype_strategy/types.py +365 -0
- wayfinder_paths/strategies/boros_hype_strategy/withdraw_mixin.py +997 -0
- wayfinder_paths/strategies/hyperlend_stable_yield_strategy/strategy.py +3 -12
- wayfinder_paths/strategies/hyperlend_stable_yield_strategy/test_strategy.py +7 -29
- wayfinder_paths/strategies/moonwell_wsteth_loop_strategy/strategy.py +63 -40
- wayfinder_paths/strategies/moonwell_wsteth_loop_strategy/test_strategy.py +5 -15
- wayfinder_paths/strategies/stablecoin_yield_strategy/strategy.py +0 -34
- wayfinder_paths/strategies/stablecoin_yield_strategy/test_strategy.py +11 -34
- wayfinder_paths/tests/test_mcp_quote_swap.py +165 -0
- wayfinder_paths/tests/test_test_coverage.py +1 -4
- wayfinder_paths-0.1.24.dist-info/METADATA +378 -0
- wayfinder_paths-0.1.24.dist-info/RECORD +185 -0
- {wayfinder_paths-0.1.23.dist-info → wayfinder_paths-0.1.24.dist-info}/WHEEL +1 -1
- wayfinder_paths/scripts/create_strategy.py +0 -139
- wayfinder_paths/scripts/make_wallets.py +0 -142
- wayfinder_paths-0.1.23.dist-info/METADATA +0 -354
- wayfinder_paths-0.1.23.dist-info/RECORD +0 -120
- /wayfinder_paths/{scripts → mcp/state}/__init__.py +0 -0
- {wayfinder_paths-0.1.23.dist-info → wayfinder_paths-0.1.24.dist-info}/LICENSE +0 -0
|
@@ -0,0 +1,1992 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from collections.abc import Awaitable, Callable, Sequence
|
|
5
|
+
from datetime import UTC, datetime
|
|
6
|
+
from typing import Any, Literal
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
from eth_utils import to_checksum_address
|
|
10
|
+
|
|
11
|
+
from wayfinder_paths.adapters.multicall_adapter.adapter import MulticallAdapter
|
|
12
|
+
from wayfinder_paths.core.adapters.BaseAdapter import BaseAdapter
|
|
13
|
+
from wayfinder_paths.core.constants.contracts import TOKENS_REQUIRING_APPROVAL_RESET
|
|
14
|
+
from wayfinder_paths.core.constants.erc20_abi import ERC20_ABI
|
|
15
|
+
from wayfinder_paths.core.utils.tokens import (
|
|
16
|
+
build_approve_transaction,
|
|
17
|
+
get_token_allowance,
|
|
18
|
+
get_token_balance,
|
|
19
|
+
)
|
|
20
|
+
from wayfinder_paths.core.utils.transaction import send_transaction
|
|
21
|
+
from wayfinder_paths.core.utils.web3 import web3_from_chain_id
|
|
22
|
+
|
|
23
|
+
# Available fields for historical data endpoint
|
|
24
|
+
PENDLE_HISTORY_FIELDS = [
|
|
25
|
+
"timestamp",
|
|
26
|
+
"baseApy",
|
|
27
|
+
"impliedApy",
|
|
28
|
+
"lastEpochVotes",
|
|
29
|
+
"lpPrice",
|
|
30
|
+
"lpRewardApy",
|
|
31
|
+
"maxApy",
|
|
32
|
+
"pendleApy",
|
|
33
|
+
"ptPrice",
|
|
34
|
+
"swapFeeApy",
|
|
35
|
+
"syPrice",
|
|
36
|
+
"totalPt",
|
|
37
|
+
"totalSupply",
|
|
38
|
+
"totalSy",
|
|
39
|
+
"totalTvl",
|
|
40
|
+
"tradingVolume",
|
|
41
|
+
"tvl",
|
|
42
|
+
"underlyingApy",
|
|
43
|
+
"underlyingInterestApy",
|
|
44
|
+
"underlyingRewardApy",
|
|
45
|
+
"voterApr",
|
|
46
|
+
"ytFloatingApy",
|
|
47
|
+
"ytPrice",
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
# Default fields to fetch for historical data
|
|
51
|
+
DEFAULT_HISTORY_FIELDS = (
|
|
52
|
+
"ptPrice,ytPrice,impliedApy,underlyingApy,tvl,totalTvl,lpPrice,syPrice"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Convenience mapping for target chains.
|
|
56
|
+
# - Arbitrum One: 42161
|
|
57
|
+
# - Base: 8453
|
|
58
|
+
# - HyperEVM (Hyperliquid EVM mainnet): 999
|
|
59
|
+
PENDLE_CHAIN_IDS: dict[str, int] = {
|
|
60
|
+
"arbitrum": 42161,
|
|
61
|
+
"base": 8453,
|
|
62
|
+
"hyperevm": 999,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
PENDLE_DEFAULT_DEPLOYMENTS_BASE_URL = "https://raw.githubusercontent.com/pendle-finance/pendle-core-v2-public/main/deployments"
|
|
66
|
+
|
|
67
|
+
PENDLE_ROUTER_STATIC_ABI: list[dict[str, Any]] = [
|
|
68
|
+
{
|
|
69
|
+
"inputs": [{"internalType": "address", "name": "market", "type": "address"}],
|
|
70
|
+
"name": "getLpToSyRate",
|
|
71
|
+
"outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}],
|
|
72
|
+
"stateMutability": "view",
|
|
73
|
+
"type": "function",
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
"inputs": [{"internalType": "address", "name": "market", "type": "address"}],
|
|
77
|
+
"name": "getPtToSyRate",
|
|
78
|
+
"outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}],
|
|
79
|
+
"stateMutability": "view",
|
|
80
|
+
"type": "function",
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
"inputs": [{"internalType": "address", "name": "market", "type": "address"}],
|
|
84
|
+
"name": "getLpToAssetRate",
|
|
85
|
+
"outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}],
|
|
86
|
+
"stateMutability": "view",
|
|
87
|
+
"type": "function",
|
|
88
|
+
},
|
|
89
|
+
{
|
|
90
|
+
"inputs": [{"internalType": "address", "name": "market", "type": "address"}],
|
|
91
|
+
"name": "getPtToAssetRate",
|
|
92
|
+
"outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}],
|
|
93
|
+
"stateMutability": "view",
|
|
94
|
+
"type": "function",
|
|
95
|
+
},
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
ChainLike = int | str
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _now_utc() -> datetime:
|
|
102
|
+
return datetime.now(UTC)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _parse_iso8601(s: str) -> datetime:
|
|
106
|
+
# Handles "2024-03-28T00:00:00.000Z"
|
|
107
|
+
return datetime.fromisoformat(s.replace("Z", "+00:00"))
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _split_pendle_id(pendle_id_or_address: str) -> tuple[int | None, str]:
|
|
111
|
+
"""
|
|
112
|
+
Pendle APIs sometimes return "id" fields like "42161-0xabc...".
|
|
113
|
+
This returns (chain_id, address). If it's already an address, chain_id is None.
|
|
114
|
+
"""
|
|
115
|
+
if not pendle_id_or_address:
|
|
116
|
+
return None, pendle_id_or_address
|
|
117
|
+
if "-" not in pendle_id_or_address:
|
|
118
|
+
return None, pendle_id_or_address
|
|
119
|
+
chain_str, addr = pendle_id_or_address.split("-", 1)
|
|
120
|
+
try:
|
|
121
|
+
return int(chain_str), addr
|
|
122
|
+
except ValueError:
|
|
123
|
+
return None, addr
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _as_address(pendle_id_or_address: str) -> str:
|
|
127
|
+
return _split_pendle_id(pendle_id_or_address)[1]
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def _as_chain_id(chain: ChainLike) -> int:
|
|
131
|
+
if isinstance(chain, int):
|
|
132
|
+
return chain
|
|
133
|
+
key = chain.strip().lower()
|
|
134
|
+
if key in PENDLE_CHAIN_IDS:
|
|
135
|
+
return PENDLE_CHAIN_IDS[key]
|
|
136
|
+
# Allow passing "42161" as a string
|
|
137
|
+
try:
|
|
138
|
+
return int(key)
|
|
139
|
+
except ValueError as exc:
|
|
140
|
+
raise ValueError(
|
|
141
|
+
f"Unknown chain '{chain}'. Use int chainId or one of {sorted(PENDLE_CHAIN_IDS)}"
|
|
142
|
+
) from exc
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _compact_params(params: dict[str, Any]) -> dict[str, Any]:
|
|
146
|
+
return {k: v for k, v in params.items() if v is not None}
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
async def _gather_limited(
|
|
150
|
+
coro_factories: Sequence[Callable[[], Awaitable[Any]]],
|
|
151
|
+
*,
|
|
152
|
+
concurrency: int = 8,
|
|
153
|
+
) -> list[Any]:
|
|
154
|
+
"""
|
|
155
|
+
Run coroutine factories with a concurrency limit.
|
|
156
|
+
Each entry of `coro_factories` is a zero-arg callable that returns an awaitable.
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
sem = asyncio.Semaphore(concurrency)
|
|
160
|
+
results: list[Any] = [None] * len(coro_factories)
|
|
161
|
+
|
|
162
|
+
async def runner(i: int, fn: Callable[[], Awaitable[Any]]) -> None:
|
|
163
|
+
async with sem:
|
|
164
|
+
results[i] = await fn()
|
|
165
|
+
|
|
166
|
+
await asyncio.gather(*(runner(i, fn) for i, fn in enumerate(coro_factories)))
|
|
167
|
+
return results
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
class PendleAdapter(BaseAdapter):
|
|
171
|
+
"""
|
|
172
|
+
Thin Pendle adapter around the Pendle API + Hosted SDK endpoints.
|
|
173
|
+
|
|
174
|
+
This adapter is designed for:
|
|
175
|
+
- Discovery: list active PT/YT markets and normalize metrics
|
|
176
|
+
- Execution planning: build ready-to-send swap transactions (tx + approvals)
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
adapter_type: str = "PENDLE"
|
|
180
|
+
|
|
181
|
+
MAX_UINT256 = 2**256 - 1
|
|
182
|
+
|
|
183
|
+
def __init__(
|
|
184
|
+
self,
|
|
185
|
+
config: dict[str, Any] | None = None,
|
|
186
|
+
*,
|
|
187
|
+
base_url: str | None = None,
|
|
188
|
+
client: httpx.AsyncClient | None = None,
|
|
189
|
+
timeout: float = 30.0,
|
|
190
|
+
strategy_wallet_signing_callback: Callable | None = None,
|
|
191
|
+
) -> None:
|
|
192
|
+
super().__init__("pendle_adapter", config)
|
|
193
|
+
cfg = config or {}
|
|
194
|
+
adapter_cfg = cfg.get("pendle_adapter") or {}
|
|
195
|
+
|
|
196
|
+
default_base = "https://api-v2.pendle.finance/core"
|
|
197
|
+
resolved_base = base_url or adapter_cfg.get("base_url") or default_base
|
|
198
|
+
self.base_url = str(resolved_base).rstrip("/")
|
|
199
|
+
self.client = client
|
|
200
|
+
self.timeout = float(adapter_cfg.get("timeout", timeout))
|
|
201
|
+
|
|
202
|
+
self._owns_client = False
|
|
203
|
+
self.strategy_wallet_signing_callback = strategy_wallet_signing_callback
|
|
204
|
+
self.strategy_wallet = cfg.get("strategy_wallet") or {}
|
|
205
|
+
self.max_retries = int(adapter_cfg.get("max_retries", 3))
|
|
206
|
+
self.retry_backoff_seconds = float(
|
|
207
|
+
adapter_cfg.get("retry_backoff_seconds", 0.75)
|
|
208
|
+
)
|
|
209
|
+
self.deployments_base_url = str(
|
|
210
|
+
adapter_cfg.get("deployments_base_url")
|
|
211
|
+
or PENDLE_DEFAULT_DEPLOYMENTS_BASE_URL
|
|
212
|
+
).rstrip("/")
|
|
213
|
+
self._deployments_cache: dict[int, dict[str, Any]] = {}
|
|
214
|
+
|
|
215
|
+
async def close(self) -> None:
|
|
216
|
+
if self._owns_client and self.client is not None:
|
|
217
|
+
await self.client.aclose()
|
|
218
|
+
self.client = None
|
|
219
|
+
self._owns_client = False
|
|
220
|
+
|
|
221
|
+
# ---------------------------
|
|
222
|
+
# Execution helpers
|
|
223
|
+
# ---------------------------
|
|
224
|
+
|
|
225
|
+
def _strategy_address(self) -> str:
|
|
226
|
+
addr = None
|
|
227
|
+
if isinstance(self.strategy_wallet, dict):
|
|
228
|
+
addr = self.strategy_wallet.get("address")
|
|
229
|
+
elif isinstance(self.strategy_wallet, str):
|
|
230
|
+
addr = self.strategy_wallet
|
|
231
|
+
if not addr:
|
|
232
|
+
raise ValueError("strategy_wallet address is required for Pendle execution")
|
|
233
|
+
return to_checksum_address(addr)
|
|
234
|
+
|
|
235
|
+
async def _send_tx(self, tx: dict[str, Any]) -> tuple[bool, Any]:
|
|
236
|
+
if self.strategy_wallet_signing_callback is None:
|
|
237
|
+
raise ValueError(
|
|
238
|
+
"strategy_wallet_signing_callback is required for tx execution"
|
|
239
|
+
)
|
|
240
|
+
txn_hash = await send_transaction(tx, self.strategy_wallet_signing_callback)
|
|
241
|
+
return True, txn_hash
|
|
242
|
+
|
|
243
|
+
async def _ensure_allowance(
|
|
244
|
+
self,
|
|
245
|
+
*,
|
|
246
|
+
chain_id: int,
|
|
247
|
+
token_address: str,
|
|
248
|
+
owner: str,
|
|
249
|
+
spender: str,
|
|
250
|
+
amount: int,
|
|
251
|
+
) -> tuple[bool, Any]:
|
|
252
|
+
token_checksum = to_checksum_address(token_address)
|
|
253
|
+
owner_checksum = to_checksum_address(owner)
|
|
254
|
+
spender_checksum = to_checksum_address(spender)
|
|
255
|
+
|
|
256
|
+
allowance = await get_token_allowance(
|
|
257
|
+
token_checksum,
|
|
258
|
+
chain_id,
|
|
259
|
+
owner_checksum,
|
|
260
|
+
spender_checksum,
|
|
261
|
+
)
|
|
262
|
+
if allowance >= amount:
|
|
263
|
+
return True, {"status": "already_approved"}
|
|
264
|
+
|
|
265
|
+
if (int(chain_id), token_checksum) in TOKENS_REQUIRING_APPROVAL_RESET:
|
|
266
|
+
# Some tokens (e.g., USDT) require allowance to be set to 0 before
|
|
267
|
+
# being increased.
|
|
268
|
+
if int(allowance) > 0:
|
|
269
|
+
clear_tx = await build_approve_transaction(
|
|
270
|
+
from_address=owner_checksum,
|
|
271
|
+
chain_id=chain_id,
|
|
272
|
+
token_address=token_checksum,
|
|
273
|
+
spender_address=spender_checksum,
|
|
274
|
+
amount=0,
|
|
275
|
+
)
|
|
276
|
+
try:
|
|
277
|
+
await self._send_tx(clear_tx)
|
|
278
|
+
except Exception as exc: # noqa: BLE001
|
|
279
|
+
return False, {"error": str(exc), "token": token_address}
|
|
280
|
+
|
|
281
|
+
approve_tx = await build_approve_transaction(
|
|
282
|
+
from_address=owner_checksum,
|
|
283
|
+
chain_id=chain_id,
|
|
284
|
+
token_address=token_checksum,
|
|
285
|
+
spender_address=spender_checksum,
|
|
286
|
+
amount=self.MAX_UINT256,
|
|
287
|
+
)
|
|
288
|
+
try:
|
|
289
|
+
return await self._send_tx(approve_tx)
|
|
290
|
+
except Exception as exc:
|
|
291
|
+
return False, {"error": str(exc), "token": token_address}
|
|
292
|
+
|
|
293
|
+
# ---------------------------
|
|
294
|
+
# Multicall helpers
|
|
295
|
+
# ---------------------------
|
|
296
|
+
|
|
297
|
+
@staticmethod
|
|
298
|
+
def _chunks(seq: list[Any], n: int) -> list[list[Any]]:
|
|
299
|
+
return [seq[i : i + n] for i in range(0, len(seq), n)]
|
|
300
|
+
|
|
301
|
+
async def _multicall_uint256_chunked(
|
|
302
|
+
self,
|
|
303
|
+
*,
|
|
304
|
+
multicall: MulticallAdapter,
|
|
305
|
+
calls: list[Any],
|
|
306
|
+
chunk_size: int,
|
|
307
|
+
) -> list[int | None]:
|
|
308
|
+
"""
|
|
309
|
+
Execute multicall and decode each return as uint256.
|
|
310
|
+
|
|
311
|
+
If a chunk reverts, fall back to executing calls one-by-one so we can salvage
|
|
312
|
+
partial results (returning None for failed calls).
|
|
313
|
+
"""
|
|
314
|
+
out: list[int | None] = []
|
|
315
|
+
for chunk in self._chunks(calls, max(1, int(chunk_size))):
|
|
316
|
+
if not chunk:
|
|
317
|
+
continue
|
|
318
|
+
try:
|
|
319
|
+
res = await multicall.aggregate(chunk)
|
|
320
|
+
out.extend([multicall.decode_uint256(b) for b in res.return_data])
|
|
321
|
+
except Exception: # noqa: BLE001 - fall back to individual calls
|
|
322
|
+
for call in chunk:
|
|
323
|
+
try:
|
|
324
|
+
r = await multicall.aggregate([call])
|
|
325
|
+
if r.return_data:
|
|
326
|
+
out.append(multicall.decode_uint256(r.return_data[0]))
|
|
327
|
+
else:
|
|
328
|
+
out.append(None)
|
|
329
|
+
except Exception: # noqa: BLE001
|
|
330
|
+
out.append(None)
|
|
331
|
+
return out
|
|
332
|
+
|
|
333
|
+
@staticmethod
|
|
334
|
+
def _rate_limit_from_headers(headers: httpx.Headers) -> dict[str, int | None]:
|
|
335
|
+
def _get_int(name: str) -> int | None:
|
|
336
|
+
value = headers.get(name)
|
|
337
|
+
if value is None:
|
|
338
|
+
return None
|
|
339
|
+
try:
|
|
340
|
+
return int(value)
|
|
341
|
+
except (TypeError, ValueError):
|
|
342
|
+
return None
|
|
343
|
+
|
|
344
|
+
return {
|
|
345
|
+
"ratelimitLimit": _get_int("x-ratelimit-limit"),
|
|
346
|
+
"ratelimitRemaining": _get_int("x-ratelimit-remaining"),
|
|
347
|
+
"ratelimitReset": _get_int("x-ratelimit-reset"),
|
|
348
|
+
"ratelimitWeeklyLimit": _get_int("x-ratelimit-weekly-limit"),
|
|
349
|
+
"ratelimitWeeklyRemaining": _get_int("x-ratelimit-weekly-remaining"),
|
|
350
|
+
"ratelimitWeeklyReset": _get_int("x-ratelimit-weekly-reset"),
|
|
351
|
+
"computingUnit": _get_int("x-computing-unit"),
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
@staticmethod
|
|
355
|
+
def _decode_response_payload(response: httpx.Response) -> Any:
|
|
356
|
+
try:
|
|
357
|
+
return response.json()
|
|
358
|
+
except Exception: # noqa: BLE001
|
|
359
|
+
return response.text
|
|
360
|
+
|
|
361
|
+
def _attach_meta(self, payload: Any, response: httpx.Response) -> Any:
|
|
362
|
+
rate_limit = self._rate_limit_from_headers(response.headers)
|
|
363
|
+
if isinstance(payload, dict):
|
|
364
|
+
if "rateLimit" not in payload:
|
|
365
|
+
payload["rateLimit"] = rate_limit
|
|
366
|
+
else:
|
|
367
|
+
payload["_rateLimit"] = rate_limit
|
|
368
|
+
return payload
|
|
369
|
+
return {"data": payload, "rateLimit": rate_limit}
|
|
370
|
+
|
|
371
|
+
async def _request_raw(
|
|
372
|
+
self,
|
|
373
|
+
method: Literal["GET", "POST"],
|
|
374
|
+
path: str,
|
|
375
|
+
*,
|
|
376
|
+
params: dict[str, Any] | None = None,
|
|
377
|
+
json: dict[str, Any] | None = None,
|
|
378
|
+
) -> httpx.Response:
|
|
379
|
+
url = f"{self.base_url}{path}"
|
|
380
|
+
last_exc: Exception | None = None
|
|
381
|
+
|
|
382
|
+
for attempt in range(1, max(1, self.max_retries) + 1):
|
|
383
|
+
try:
|
|
384
|
+
if self.client is not None:
|
|
385
|
+
return await self.client.request(
|
|
386
|
+
method,
|
|
387
|
+
url,
|
|
388
|
+
params=params,
|
|
389
|
+
json=json,
|
|
390
|
+
timeout=self.timeout,
|
|
391
|
+
)
|
|
392
|
+
async with httpx.AsyncClient() as client:
|
|
393
|
+
return await client.request(
|
|
394
|
+
method,
|
|
395
|
+
url,
|
|
396
|
+
params=params,
|
|
397
|
+
json=json,
|
|
398
|
+
timeout=self.timeout,
|
|
399
|
+
)
|
|
400
|
+
except httpx.RequestError as exc:
|
|
401
|
+
last_exc = exc
|
|
402
|
+
if attempt >= max(1, self.max_retries):
|
|
403
|
+
raise
|
|
404
|
+
await asyncio.sleep(self.retry_backoff_seconds * (2 ** (attempt - 1)))
|
|
405
|
+
|
|
406
|
+
if last_exc is not None:
|
|
407
|
+
raise last_exc
|
|
408
|
+
raise RuntimeError("unreachable")
|
|
409
|
+
|
|
410
|
+
async def _get(self, path: str, params: dict[str, Any] | None = None) -> Any:
|
|
411
|
+
response = await self._request_raw("GET", path, params=params)
|
|
412
|
+
response.raise_for_status()
|
|
413
|
+
payload = self._decode_response_payload(response)
|
|
414
|
+
return self._attach_meta(payload, response)
|
|
415
|
+
|
|
416
|
+
async def _post(self, path: str, json: dict[str, Any]) -> Any:
|
|
417
|
+
response = await self._request_raw("POST", path, json=json)
|
|
418
|
+
response.raise_for_status()
|
|
419
|
+
payload = self._decode_response_payload(response)
|
|
420
|
+
return self._attach_meta(payload, response)
|
|
421
|
+
|
|
422
|
+
# ---------------------------
|
|
423
|
+
# Pendle API endpoints
|
|
424
|
+
# ---------------------------
|
|
425
|
+
|
|
426
|
+
async def fetch_markets(
|
|
427
|
+
self,
|
|
428
|
+
chain_id: int | None = None,
|
|
429
|
+
is_active: bool | None = None,
|
|
430
|
+
ids: list[str] | None = None,
|
|
431
|
+
) -> dict[str, Any]:
|
|
432
|
+
"""
|
|
433
|
+
Fetch whitelisted markets with metadata.
|
|
434
|
+
|
|
435
|
+
Endpoint: `{base_url}/v1/markets/all` (default `base_url` ends with `/core`)
|
|
436
|
+
{ "markets": [ ... ] }
|
|
437
|
+
"""
|
|
438
|
+
params: dict[str, Any] = {}
|
|
439
|
+
if chain_id is not None:
|
|
440
|
+
params["chainId"] = int(chain_id)
|
|
441
|
+
if is_active is not None:
|
|
442
|
+
params["isActive"] = str(bool(is_active)).lower()
|
|
443
|
+
if ids:
|
|
444
|
+
params["ids"] = ",".join(ids)
|
|
445
|
+
data = await self._get("/v1/markets/all", params=params or None)
|
|
446
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
447
|
+
|
|
448
|
+
async def fetch_market_snapshot(
|
|
449
|
+
self,
|
|
450
|
+
chain_id: int,
|
|
451
|
+
market_address: str,
|
|
452
|
+
timestamp: str | None = None,
|
|
453
|
+
) -> dict[str, Any]:
|
|
454
|
+
params = {"timestamp": timestamp} if timestamp else None
|
|
455
|
+
data = await self._get(
|
|
456
|
+
f"/v2/{int(chain_id)}/markets/{market_address}/data", params=params
|
|
457
|
+
)
|
|
458
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
459
|
+
|
|
460
|
+
async def fetch_market_history(
|
|
461
|
+
self,
|
|
462
|
+
chain_id: int,
|
|
463
|
+
market_address: str,
|
|
464
|
+
time_frame: Literal["hour", "day", "week"] = "day",
|
|
465
|
+
timestamp_start: str | None = None,
|
|
466
|
+
timestamp_end: str | None = None,
|
|
467
|
+
fields: str | None = None,
|
|
468
|
+
) -> dict[str, Any]:
|
|
469
|
+
params: dict[str, Any] = {
|
|
470
|
+
"time_frame": time_frame,
|
|
471
|
+
"fields": fields or DEFAULT_HISTORY_FIELDS,
|
|
472
|
+
}
|
|
473
|
+
if timestamp_start:
|
|
474
|
+
params["timestamp_start"] = timestamp_start
|
|
475
|
+
if timestamp_end:
|
|
476
|
+
params["timestamp_end"] = timestamp_end
|
|
477
|
+
|
|
478
|
+
data = await self._get(
|
|
479
|
+
f"/v2/{int(chain_id)}/markets/{market_address}/historical-data",
|
|
480
|
+
params=params,
|
|
481
|
+
)
|
|
482
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
483
|
+
|
|
484
|
+
async def fetch_ohlcv_prices(
|
|
485
|
+
self,
|
|
486
|
+
chain_id: int,
|
|
487
|
+
token_address: str,
|
|
488
|
+
start: str | None = None,
|
|
489
|
+
end: str | None = None,
|
|
490
|
+
interval: str | None = None,
|
|
491
|
+
) -> dict[str, Any]:
|
|
492
|
+
params = _compact_params({"start": start, "end": end, "interval": interval})
|
|
493
|
+
data = await self._get(
|
|
494
|
+
f"/v4/{int(chain_id)}/prices/{token_address}/ohlcv", params=params or None
|
|
495
|
+
)
|
|
496
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
497
|
+
|
|
498
|
+
async def fetch_asset_prices(self) -> dict[str, Any]:
|
|
499
|
+
data = await self._get("/v1/prices/assets")
|
|
500
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
501
|
+
|
|
502
|
+
async def fetch_swapping_prices(
|
|
503
|
+
self, chain_id: int, market_address: str
|
|
504
|
+
) -> dict[str, Any]:
|
|
505
|
+
data = await self._get(
|
|
506
|
+
f"/v1/sdk/{int(chain_id)}/markets/{market_address}/swapping-prices"
|
|
507
|
+
)
|
|
508
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
509
|
+
|
|
510
|
+
# ---------------------------
|
|
511
|
+
# Helpful SDK & discovery
|
|
512
|
+
# ---------------------------
|
|
513
|
+
|
|
514
|
+
async def fetch_supported_chain_ids(self) -> dict[str, Any]:
|
|
515
|
+
data = await self._get("/v1/chains")
|
|
516
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
517
|
+
|
|
518
|
+
async def fetch_supported_aggregators(self, chain: ChainLike) -> dict[str, Any]:
|
|
519
|
+
chain_id = _as_chain_id(chain)
|
|
520
|
+
data = await self._get(f"/v1/sdk/{chain_id}/supported-aggregators")
|
|
521
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
522
|
+
|
|
523
|
+
async def fetch_positions_database(
|
|
524
|
+
self,
|
|
525
|
+
*,
|
|
526
|
+
user: str,
|
|
527
|
+
filter_usd: float | None = None,
|
|
528
|
+
) -> dict[str, Any]:
|
|
529
|
+
"""
|
|
530
|
+
Fast, indexed user positions across chains (claimables cached ~24h).
|
|
531
|
+
|
|
532
|
+
Endpoint: /v1/dashboard/positions/database/{user}
|
|
533
|
+
"""
|
|
534
|
+
params: dict[str, Any] = {}
|
|
535
|
+
if filter_usd is not None:
|
|
536
|
+
params["filterUsd"] = float(filter_usd)
|
|
537
|
+
data = await self._get(
|
|
538
|
+
f"/v1/dashboard/positions/database/{user}", params=params or None
|
|
539
|
+
)
|
|
540
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
541
|
+
|
|
542
|
+
# ---------------------------------------
|
|
543
|
+
# Limit Orders
|
|
544
|
+
# ---------------------------------------
|
|
545
|
+
|
|
546
|
+
async def fetch_taker_limit_orders(
|
|
547
|
+
self,
|
|
548
|
+
*,
|
|
549
|
+
chain: ChainLike,
|
|
550
|
+
yt: str,
|
|
551
|
+
order_type: int,
|
|
552
|
+
skip: int | None = None,
|
|
553
|
+
limit: int | None = None,
|
|
554
|
+
sort_by: str | None = None,
|
|
555
|
+
sort_order: Literal["asc", "desc"] | None = None,
|
|
556
|
+
) -> dict[str, Any]:
|
|
557
|
+
params: dict[str, Any] = {
|
|
558
|
+
"chainId": _as_chain_id(chain),
|
|
559
|
+
"yt": yt,
|
|
560
|
+
"type": int(order_type),
|
|
561
|
+
}
|
|
562
|
+
if skip is not None:
|
|
563
|
+
params["skip"] = int(skip)
|
|
564
|
+
if limit is not None:
|
|
565
|
+
params["limit"] = int(limit)
|
|
566
|
+
if sort_by is not None:
|
|
567
|
+
params["sortBy"] = str(sort_by)
|
|
568
|
+
if sort_order is not None:
|
|
569
|
+
params["sortOrder"] = sort_order
|
|
570
|
+
|
|
571
|
+
data = await self._get("/v1/limit-orders/takers/limit-orders", params=params)
|
|
572
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
573
|
+
|
|
574
|
+
async def generate_maker_limit_order_data(
|
|
575
|
+
self,
|
|
576
|
+
*,
|
|
577
|
+
payload: dict[str, Any],
|
|
578
|
+
) -> dict[str, Any]:
|
|
579
|
+
data = await self._post(
|
|
580
|
+
"/v1/limit-orders/makers/generate-limit-order-data", json=payload
|
|
581
|
+
)
|
|
582
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
583
|
+
|
|
584
|
+
async def post_maker_limit_order(
|
|
585
|
+
self,
|
|
586
|
+
*,
|
|
587
|
+
payload: dict[str, Any],
|
|
588
|
+
) -> dict[str, Any]:
|
|
589
|
+
data = await self._post("/v1/limit-orders/makers/limit-orders", json=payload)
|
|
590
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
591
|
+
|
|
592
|
+
async def fetch_maker_limit_orders(
|
|
593
|
+
self,
|
|
594
|
+
*,
|
|
595
|
+
chain: ChainLike,
|
|
596
|
+
maker: str,
|
|
597
|
+
yt: str | None = None,
|
|
598
|
+
order_type: int | None = None,
|
|
599
|
+
is_active: bool | None = None,
|
|
600
|
+
) -> dict[str, Any]:
|
|
601
|
+
params: dict[str, Any] = {
|
|
602
|
+
"chainId": _as_chain_id(chain),
|
|
603
|
+
"maker": maker,
|
|
604
|
+
}
|
|
605
|
+
if yt is not None:
|
|
606
|
+
params["yt"] = yt
|
|
607
|
+
if order_type is not None:
|
|
608
|
+
params["type"] = int(order_type)
|
|
609
|
+
if is_active is not None:
|
|
610
|
+
params["isActive"] = self._bool_q(bool(is_active))
|
|
611
|
+
data = await self._get("/v1/limit-orders/makers/limit-orders", params=params)
|
|
612
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
613
|
+
|
|
614
|
+
# ---------------------------------------
|
|
615
|
+
# Deployments (address discovery)
|
|
616
|
+
# ---------------------------------------
|
|
617
|
+
|
|
618
|
+
async def fetch_core_deployments(
|
|
619
|
+
self,
|
|
620
|
+
*,
|
|
621
|
+
chain: ChainLike,
|
|
622
|
+
force_refresh: bool = False,
|
|
623
|
+
) -> dict[str, Any]:
|
|
624
|
+
chain_id = _as_chain_id(chain)
|
|
625
|
+
if not force_refresh and chain_id in self._deployments_cache:
|
|
626
|
+
return self._deployments_cache[chain_id]
|
|
627
|
+
|
|
628
|
+
url = f"{self.deployments_base_url}/{chain_id}-core.json"
|
|
629
|
+
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
|
630
|
+
resp = await client.get(url)
|
|
631
|
+
resp.raise_for_status()
|
|
632
|
+
data = self._decode_response_payload(resp)
|
|
633
|
+
|
|
634
|
+
if not isinstance(data, dict):
|
|
635
|
+
raise ValueError(f"Unexpected deployments payload for chain {chain_id}")
|
|
636
|
+
|
|
637
|
+
self._deployments_cache[chain_id] = data
|
|
638
|
+
return data
|
|
639
|
+
|
|
640
|
+
async def get_router_static_address(self, *, chain: ChainLike) -> str:
|
|
641
|
+
deployments = await self.fetch_core_deployments(chain=chain)
|
|
642
|
+
addr = deployments.get("routerStatic")
|
|
643
|
+
if not isinstance(addr, str) or not addr:
|
|
644
|
+
raise ValueError("routerStatic not found in Pendle deployments")
|
|
645
|
+
return to_checksum_address(addr)
|
|
646
|
+
|
|
647
|
+
async def get_limit_router_address(self, *, chain: ChainLike) -> str:
|
|
648
|
+
deployments = await self.fetch_core_deployments(chain=chain)
|
|
649
|
+
addr = deployments.get("limitRouter")
|
|
650
|
+
if not isinstance(addr, str) or not addr:
|
|
651
|
+
raise ValueError("limitRouter not found in Pendle deployments")
|
|
652
|
+
return to_checksum_address(addr)
|
|
653
|
+
|
|
654
|
+
# ---------------------------------------
|
|
655
|
+
# RouterStatic (off-chain spot-rate checks)
|
|
656
|
+
# ---------------------------------------
|
|
657
|
+
|
|
658
|
+
async def router_static_rates(
|
|
659
|
+
self,
|
|
660
|
+
*,
|
|
661
|
+
chain: ChainLike,
|
|
662
|
+
market: str,
|
|
663
|
+
) -> dict[str, Any]:
|
|
664
|
+
chain_id = _as_chain_id(chain)
|
|
665
|
+
router_static = await self.get_router_static_address(chain=chain_id)
|
|
666
|
+
market_checksum = to_checksum_address(market)
|
|
667
|
+
|
|
668
|
+
async with web3_from_chain_id(chain_id) as web3:
|
|
669
|
+
contract = web3.eth.contract(
|
|
670
|
+
address=to_checksum_address(router_static),
|
|
671
|
+
abi=PENDLE_ROUTER_STATIC_ABI,
|
|
672
|
+
)
|
|
673
|
+
lp_to_sy = await contract.functions.getLpToSyRate(market_checksum).call()
|
|
674
|
+
pt_to_sy = await contract.functions.getPtToSyRate(market_checksum).call()
|
|
675
|
+
lp_to_asset = await contract.functions.getLpToAssetRate(
|
|
676
|
+
market_checksum
|
|
677
|
+
).call()
|
|
678
|
+
pt_to_asset = await contract.functions.getPtToAssetRate(
|
|
679
|
+
market_checksum
|
|
680
|
+
).call()
|
|
681
|
+
|
|
682
|
+
return {
|
|
683
|
+
"chainId": int(chain_id),
|
|
684
|
+
"routerStatic": router_static,
|
|
685
|
+
"market": market_checksum,
|
|
686
|
+
"rates": {
|
|
687
|
+
"lpToSy": int(lp_to_sy),
|
|
688
|
+
"ptToSy": int(pt_to_sy),
|
|
689
|
+
"lpToAsset": int(lp_to_asset),
|
|
690
|
+
"ptToAsset": int(pt_to_asset),
|
|
691
|
+
},
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
async def sdk_swap_v2(
|
|
695
|
+
self,
|
|
696
|
+
*,
|
|
697
|
+
chain: ChainLike,
|
|
698
|
+
market_address: str,
|
|
699
|
+
receiver: str | None,
|
|
700
|
+
slippage: float,
|
|
701
|
+
token_in: str,
|
|
702
|
+
token_out: str,
|
|
703
|
+
amount_in: str,
|
|
704
|
+
enable_aggregator: bool = False,
|
|
705
|
+
aggregators: Sequence[str] | str | None = None,
|
|
706
|
+
additional_data: Sequence[str] | str | None = None,
|
|
707
|
+
need_scale: bool | None = None,
|
|
708
|
+
) -> dict[str, Any]:
|
|
709
|
+
"""
|
|
710
|
+
Build calldata to swap tokenIn -> tokenOut via Pendle Hosted SDK.
|
|
711
|
+
Uses /v2/sdk/{chainId}/markets/{market}/swap (GET with query params).
|
|
712
|
+
|
|
713
|
+
Returns a payload that typically includes:
|
|
714
|
+
- tx: { to, data, value, from }
|
|
715
|
+
- tokenApprovals: [{ token, amount }, ...]
|
|
716
|
+
- data: { amountOut, priceImpact, impliedApy?, effectiveApy? } (depending on additionalData)
|
|
717
|
+
"""
|
|
718
|
+
chain_id = _as_chain_id(chain)
|
|
719
|
+
|
|
720
|
+
# API wants comma-separated strings for aggregators + additionalData
|
|
721
|
+
if isinstance(aggregators, (list, tuple)):
|
|
722
|
+
aggregators_q = ",".join(aggregators)
|
|
723
|
+
else:
|
|
724
|
+
aggregators_q = aggregators
|
|
725
|
+
|
|
726
|
+
if isinstance(additional_data, (list, tuple)):
|
|
727
|
+
additional_data_q = ",".join(additional_data)
|
|
728
|
+
else:
|
|
729
|
+
additional_data_q = additional_data
|
|
730
|
+
|
|
731
|
+
params = _compact_params(
|
|
732
|
+
{
|
|
733
|
+
"receiver": receiver,
|
|
734
|
+
"slippage": slippage,
|
|
735
|
+
"enableAggregator": str(bool(enable_aggregator)).lower(),
|
|
736
|
+
"aggregators": aggregators_q,
|
|
737
|
+
"tokenIn": token_in,
|
|
738
|
+
"tokenOut": token_out,
|
|
739
|
+
"amountIn": amount_in,
|
|
740
|
+
"additionalData": additional_data_q,
|
|
741
|
+
"needScale": str(bool(need_scale)).lower()
|
|
742
|
+
if need_scale is not None
|
|
743
|
+
else None,
|
|
744
|
+
}
|
|
745
|
+
)
|
|
746
|
+
|
|
747
|
+
data = await self._get(
|
|
748
|
+
f"/v2/sdk/{chain_id}/markets/{market_address}/swap", params=params
|
|
749
|
+
)
|
|
750
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
751
|
+
|
|
752
|
+
# ---------------------------------------
|
|
753
|
+
# Hosted SDK: Universal Convert
|
|
754
|
+
# ---------------------------------------
|
|
755
|
+
|
|
756
|
+
@staticmethod
|
|
757
|
+
def _bool_q(v: bool) -> str:
|
|
758
|
+
return "true" if v else "false"
|
|
759
|
+
|
|
760
|
+
@staticmethod
|
|
761
|
+
def _coerce_int(value: Any, *, default: int = 0) -> int:
|
|
762
|
+
if value is None:
|
|
763
|
+
return default
|
|
764
|
+
if isinstance(value, bool):
|
|
765
|
+
return int(value)
|
|
766
|
+
if isinstance(value, int):
|
|
767
|
+
return value
|
|
768
|
+
if isinstance(value, str):
|
|
769
|
+
try:
|
|
770
|
+
if value.startswith("0x"):
|
|
771
|
+
return int(value, 16)
|
|
772
|
+
return int(value)
|
|
773
|
+
except ValueError:
|
|
774
|
+
return default
|
|
775
|
+
return default
|
|
776
|
+
|
|
777
|
+
@classmethod
|
|
778
|
+
def _select_best_convert_route(cls, convert: dict[str, Any]) -> dict[str, Any]:
|
|
779
|
+
routes = convert.get("routes") or []
|
|
780
|
+
if not isinstance(routes, list) or not routes:
|
|
781
|
+
raise ValueError("Pendle convert response missing routes")
|
|
782
|
+
|
|
783
|
+
def _score(route: dict[str, Any]) -> int:
|
|
784
|
+
outputs = route.get("outputs") or []
|
|
785
|
+
if not isinstance(outputs, list) or not outputs:
|
|
786
|
+
return 0
|
|
787
|
+
score = 0
|
|
788
|
+
for out in outputs:
|
|
789
|
+
if not isinstance(out, dict):
|
|
790
|
+
continue
|
|
791
|
+
score += cls._coerce_int(out.get("amount"), default=0)
|
|
792
|
+
return score
|
|
793
|
+
|
|
794
|
+
return max((r for r in routes if isinstance(r, dict)), key=_score)
|
|
795
|
+
|
|
796
|
+
@staticmethod
|
|
797
|
+
def _extract_convert_approvals(
|
|
798
|
+
convert: dict[str, Any],
|
|
799
|
+
*,
|
|
800
|
+
route: dict[str, Any] | None = None,
|
|
801
|
+
) -> list[dict[str, Any]]:
|
|
802
|
+
approvals = (
|
|
803
|
+
convert.get("requiredApprovals")
|
|
804
|
+
or convert.get("tokenApprovals")
|
|
805
|
+
or (route.get("requiredApprovals") if isinstance(route, dict) else None)
|
|
806
|
+
or (route.get("tokenApprovals") if isinstance(route, dict) else None)
|
|
807
|
+
or []
|
|
808
|
+
)
|
|
809
|
+
if not isinstance(approvals, list):
|
|
810
|
+
return []
|
|
811
|
+
|
|
812
|
+
out: list[dict[str, Any]] = []
|
|
813
|
+
for approval in approvals:
|
|
814
|
+
if not isinstance(approval, dict):
|
|
815
|
+
continue
|
|
816
|
+
token = approval.get("token")
|
|
817
|
+
amount = approval.get("amount")
|
|
818
|
+
if not (isinstance(token, str) and token and amount is not None):
|
|
819
|
+
continue
|
|
820
|
+
out.append({"token": token, "amount": str(amount)})
|
|
821
|
+
return out
|
|
822
|
+
|
|
823
|
+
async def sdk_convert_v2(
|
|
824
|
+
self,
|
|
825
|
+
*,
|
|
826
|
+
chain: ChainLike,
|
|
827
|
+
slippage: float,
|
|
828
|
+
inputs: Sequence[dict[str, str]],
|
|
829
|
+
outputs: Sequence[str],
|
|
830
|
+
receiver: str | None = None,
|
|
831
|
+
enable_aggregator: bool = False,
|
|
832
|
+
aggregators: Sequence[str] | str | None = None,
|
|
833
|
+
additional_data: Sequence[str] | str | None = None,
|
|
834
|
+
need_scale: bool | None = None,
|
|
835
|
+
use_limit_order: bool | None = True,
|
|
836
|
+
redeem_rewards: bool | None = False,
|
|
837
|
+
prefer_post: bool = True,
|
|
838
|
+
) -> dict[str, Any]:
|
|
839
|
+
"""
|
|
840
|
+
Universal Convert endpoint (swap, mint/redeem, LP add/remove, roll, etc).
|
|
841
|
+
|
|
842
|
+
Prefer POST (OpenAPI), but Pendle currently serves GET for some setups, so we
|
|
843
|
+
fallback to GET when POST returns 404.
|
|
844
|
+
"""
|
|
845
|
+
chain_id = _as_chain_id(chain)
|
|
846
|
+
|
|
847
|
+
if receiver is None:
|
|
848
|
+
# Always set receiver, per Pendle recommendation.
|
|
849
|
+
receiver = self._strategy_address()
|
|
850
|
+
|
|
851
|
+
s = float(slippage)
|
|
852
|
+
if not (0.0 <= s <= 1.0):
|
|
853
|
+
raise ValueError("slippage must be between 0 and 1 (inclusive)")
|
|
854
|
+
if not inputs:
|
|
855
|
+
raise ValueError("inputs is required")
|
|
856
|
+
if not outputs:
|
|
857
|
+
raise ValueError("outputs is required")
|
|
858
|
+
|
|
859
|
+
inputs_norm: list[dict[str, str]] = []
|
|
860
|
+
for i, item in enumerate(inputs):
|
|
861
|
+
token = item.get("token")
|
|
862
|
+
amount = item.get("amount")
|
|
863
|
+
if not (isinstance(token, str) and token):
|
|
864
|
+
raise ValueError(f"inputs[{i}].token is required")
|
|
865
|
+
if amount is None:
|
|
866
|
+
raise ValueError(f"inputs[{i}].amount is required")
|
|
867
|
+
inputs_norm.append({"token": token, "amount": str(amount)})
|
|
868
|
+
|
|
869
|
+
outputs_norm = [str(o) for o in outputs if str(o).strip()]
|
|
870
|
+
if not outputs_norm:
|
|
871
|
+
raise ValueError("outputs is required")
|
|
872
|
+
|
|
873
|
+
# Normalize aggregators/additionalData for both POST and GET shapes.
|
|
874
|
+
if isinstance(aggregators, str):
|
|
875
|
+
aggregators_list = [a.strip() for a in aggregators.split(",") if a.strip()]
|
|
876
|
+
else:
|
|
877
|
+
aggregators_list = list(aggregators) if aggregators is not None else None
|
|
878
|
+
|
|
879
|
+
if isinstance(additional_data, str):
|
|
880
|
+
additional_data_csv = additional_data
|
|
881
|
+
elif additional_data is None:
|
|
882
|
+
additional_data_csv = None
|
|
883
|
+
else:
|
|
884
|
+
additional_data_csv = ",".join([str(a) for a in additional_data if str(a)])
|
|
885
|
+
|
|
886
|
+
if prefer_post:
|
|
887
|
+
body = _compact_params(
|
|
888
|
+
{
|
|
889
|
+
"slippage": s,
|
|
890
|
+
"inputs": inputs_norm,
|
|
891
|
+
"outputs": outputs_norm,
|
|
892
|
+
"receiver": receiver,
|
|
893
|
+
"enableAggregator": bool(enable_aggregator),
|
|
894
|
+
"aggregators": aggregators_list,
|
|
895
|
+
"additionalData": additional_data_csv,
|
|
896
|
+
"needScale": need_scale,
|
|
897
|
+
"useLimitOrder": use_limit_order,
|
|
898
|
+
"redeemRewards": redeem_rewards,
|
|
899
|
+
}
|
|
900
|
+
)
|
|
901
|
+
resp = await self._request_raw(
|
|
902
|
+
"POST", f"/v2/sdk/{chain_id}/convert", json=body
|
|
903
|
+
)
|
|
904
|
+
if resp.status_code != 404:
|
|
905
|
+
resp.raise_for_status()
|
|
906
|
+
payload = self._decode_response_payload(resp)
|
|
907
|
+
attached = self._attach_meta(payload, resp)
|
|
908
|
+
return attached if isinstance(attached, dict) else {"data": attached}
|
|
909
|
+
|
|
910
|
+
params: dict[str, Any] = {
|
|
911
|
+
"receiver": receiver,
|
|
912
|
+
"slippage": s,
|
|
913
|
+
"tokensIn": ",".join([i["token"] for i in inputs_norm]),
|
|
914
|
+
"amountsIn": ",".join([i["amount"] for i in inputs_norm]),
|
|
915
|
+
"tokensOut": ",".join(outputs_norm),
|
|
916
|
+
"enableAggregator": self._bool_q(bool(enable_aggregator)),
|
|
917
|
+
}
|
|
918
|
+
if aggregators_list:
|
|
919
|
+
params["aggregators"] = ",".join([str(a) for a in aggregators_list])
|
|
920
|
+
if additional_data_csv:
|
|
921
|
+
params["additionalData"] = additional_data_csv
|
|
922
|
+
if need_scale is not None:
|
|
923
|
+
params["needScale"] = self._bool_q(bool(need_scale))
|
|
924
|
+
if use_limit_order is not None:
|
|
925
|
+
params["useLimitOrder"] = self._bool_q(bool(use_limit_order))
|
|
926
|
+
if redeem_rewards is not None:
|
|
927
|
+
params["redeemRewards"] = self._bool_q(bool(redeem_rewards))
|
|
928
|
+
|
|
929
|
+
data = await self._get(f"/v2/sdk/{chain_id}/convert", params=params)
|
|
930
|
+
return data if isinstance(data, dict) else {"data": data}
|
|
931
|
+
|
|
932
|
+
def build_convert_plan(
|
|
933
|
+
self,
|
|
934
|
+
*,
|
|
935
|
+
chain: ChainLike,
|
|
936
|
+
convert_response: dict[str, Any],
|
|
937
|
+
) -> dict[str, Any]:
|
|
938
|
+
"""
|
|
939
|
+
Turn a Hosted SDK convert response into a ready-to-send tx + approvals.
|
|
940
|
+
|
|
941
|
+
Notes:
|
|
942
|
+
- Choose the best route by maximizing summed output amounts.
|
|
943
|
+
- Always trust the returned tx.to for execution (Pendle warns it may change).
|
|
944
|
+
"""
|
|
945
|
+
chain_id = _as_chain_id(chain)
|
|
946
|
+
|
|
947
|
+
if not isinstance(convert_response, dict):
|
|
948
|
+
raise ValueError("convert_response must be a dict")
|
|
949
|
+
|
|
950
|
+
best_route = self._select_best_convert_route(convert_response)
|
|
951
|
+
tx = best_route.get("tx")
|
|
952
|
+
if not isinstance(tx, dict):
|
|
953
|
+
raise ValueError("Pendle convert route missing tx")
|
|
954
|
+
|
|
955
|
+
to_addr = tx.get("to")
|
|
956
|
+
from_addr = tx.get("from")
|
|
957
|
+
data = tx.get("data")
|
|
958
|
+
if not (
|
|
959
|
+
isinstance(to_addr, str)
|
|
960
|
+
and isinstance(from_addr, str)
|
|
961
|
+
and isinstance(data, str)
|
|
962
|
+
):
|
|
963
|
+
raise ValueError("Pendle convert tx missing to/from/data")
|
|
964
|
+
|
|
965
|
+
return {
|
|
966
|
+
"chainId": int(chain_id),
|
|
967
|
+
"action": convert_response.get("action"),
|
|
968
|
+
"route": best_route,
|
|
969
|
+
"approvals": self._extract_convert_approvals(
|
|
970
|
+
convert_response, route=best_route
|
|
971
|
+
),
|
|
972
|
+
"outputs": best_route.get("outputs")
|
|
973
|
+
if isinstance(best_route.get("outputs"), list)
|
|
974
|
+
else [],
|
|
975
|
+
"tx": {
|
|
976
|
+
"chainId": int(chain_id),
|
|
977
|
+
"from": to_checksum_address(from_addr),
|
|
978
|
+
"to": to_checksum_address(to_addr),
|
|
979
|
+
"data": data,
|
|
980
|
+
"value": self._coerce_int(tx.get("value"), default=0),
|
|
981
|
+
},
|
|
982
|
+
"raw": convert_response,
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
# ---------------------------------------
|
|
986
|
+
# Market discovery: PT/YT markets
|
|
987
|
+
# ---------------------------------------
|
|
988
|
+
|
|
989
|
+
async def list_active_pt_yt_markets(
|
|
990
|
+
self,
|
|
991
|
+
*,
|
|
992
|
+
chains: Sequence[ChainLike] | None = None,
|
|
993
|
+
chain: ChainLike | None = None,
|
|
994
|
+
min_liquidity_usd: float = 0.0,
|
|
995
|
+
min_volume_usd_24h: float = 0.0,
|
|
996
|
+
min_days_to_expiry: float = 0.0,
|
|
997
|
+
sort_by: Literal[
|
|
998
|
+
"fixed_apy", "liquidity", "volume", "underlying_apy", "expiry"
|
|
999
|
+
] = "fixed_apy",
|
|
1000
|
+
descending: bool = True,
|
|
1001
|
+
) -> list[dict[str, Any]]:
|
|
1002
|
+
"""
|
|
1003
|
+
Fetch active markets and return a normalized list with:
|
|
1004
|
+
- marketAddress, ptAddress, ytAddress, syAddress, underlyingAddress
|
|
1005
|
+
- fixedApy (impliedApy), underlyingApy, floatingApy (underlyingApy - impliedApy)
|
|
1006
|
+
- liquidityUsd, volumeUsd24h, totalTvlUsd, expiry, daysToExpiry
|
|
1007
|
+
|
|
1008
|
+
NOTE: "fixed_apy" uses `impliedApy` from /v1/markets/all market.details.
|
|
1009
|
+
"""
|
|
1010
|
+
if chain is not None and chains is not None:
|
|
1011
|
+
raise ValueError("Pass either chain=... or chains=[...], not both.")
|
|
1012
|
+
if chain is not None:
|
|
1013
|
+
chains = [chain]
|
|
1014
|
+
if chains is None:
|
|
1015
|
+
chains = [42161, 8453, 999]
|
|
1016
|
+
|
|
1017
|
+
chain_ids = [_as_chain_id(c) for c in chains]
|
|
1018
|
+
|
|
1019
|
+
async def fetch_one(cid: int) -> dict[str, Any]:
|
|
1020
|
+
return await self.fetch_markets(chain_id=cid, is_active=True)
|
|
1021
|
+
|
|
1022
|
+
markets_responses = await _gather_limited(
|
|
1023
|
+
[lambda cid=cid: fetch_one(cid) for cid in chain_ids], concurrency=4
|
|
1024
|
+
)
|
|
1025
|
+
|
|
1026
|
+
rows: list[dict[str, Any]] = []
|
|
1027
|
+
now = _now_utc()
|
|
1028
|
+
|
|
1029
|
+
for resp in markets_responses:
|
|
1030
|
+
for m in (resp.get("markets") or []) if isinstance(resp, dict) else []:
|
|
1031
|
+
details = m.get("details", {}) or {}
|
|
1032
|
+
expiry_s = m.get("expiry")
|
|
1033
|
+
if not expiry_s:
|
|
1034
|
+
continue
|
|
1035
|
+
|
|
1036
|
+
try:
|
|
1037
|
+
expiry_dt = _parse_iso8601(str(expiry_s))
|
|
1038
|
+
except Exception:
|
|
1039
|
+
continue
|
|
1040
|
+
|
|
1041
|
+
days_to_expiry = (expiry_dt - now).total_seconds() / 86400.0
|
|
1042
|
+
|
|
1043
|
+
try:
|
|
1044
|
+
liquidity = float(details.get("liquidity", 0.0) or 0.0)
|
|
1045
|
+
volume = float(details.get("tradingVolume", 0.0) or 0.0)
|
|
1046
|
+
total_tvl = float(details.get("totalTvl", 0.0) or 0.0)
|
|
1047
|
+
except Exception:
|
|
1048
|
+
# If a market has unexpected formatting, skip it.
|
|
1049
|
+
continue
|
|
1050
|
+
|
|
1051
|
+
implied_apy = float(details.get("impliedApy", 0.0) or 0.0)
|
|
1052
|
+
underlying_apy = float(details.get("underlyingApy", 0.0) or 0.0)
|
|
1053
|
+
floating_apy = underlying_apy - implied_apy
|
|
1054
|
+
|
|
1055
|
+
if liquidity < min_liquidity_usd:
|
|
1056
|
+
continue
|
|
1057
|
+
if volume < min_volume_usd_24h:
|
|
1058
|
+
continue
|
|
1059
|
+
if days_to_expiry < min_days_to_expiry:
|
|
1060
|
+
continue
|
|
1061
|
+
|
|
1062
|
+
chain_id_val = m.get("chainId")
|
|
1063
|
+
try:
|
|
1064
|
+
chain_id_int = (
|
|
1065
|
+
int(chain_id_val) if chain_id_val is not None else None
|
|
1066
|
+
)
|
|
1067
|
+
except Exception:
|
|
1068
|
+
chain_id_int = None
|
|
1069
|
+
if chain_id_int is None:
|
|
1070
|
+
continue
|
|
1071
|
+
|
|
1072
|
+
row: dict[str, Any] = {
|
|
1073
|
+
"chainId": chain_id_int,
|
|
1074
|
+
"marketName": m.get("name"),
|
|
1075
|
+
"marketAddress": _as_address(str(m.get("address", ""))),
|
|
1076
|
+
"expiry": expiry_s,
|
|
1077
|
+
"daysToExpiry": days_to_expiry,
|
|
1078
|
+
"ptAddress": _as_address(str(m.get("pt", ""))),
|
|
1079
|
+
"ytAddress": _as_address(str(m.get("yt", ""))),
|
|
1080
|
+
"syAddress": _as_address(str(m.get("sy", ""))),
|
|
1081
|
+
"underlyingAddress": _as_address(str(m.get("underlyingAsset", ""))),
|
|
1082
|
+
# Key metrics
|
|
1083
|
+
"fixedApy": implied_apy,
|
|
1084
|
+
"underlyingApy": underlying_apy,
|
|
1085
|
+
"floatingApy": floating_apy,
|
|
1086
|
+
"liquidityUsd": liquidity,
|
|
1087
|
+
"volumeUsd24h": volume,
|
|
1088
|
+
"totalTvlUsd": total_tvl,
|
|
1089
|
+
# Extra details if you want them for decision making
|
|
1090
|
+
"swapFeeApy": float(details.get("swapFeeApy", 0.0) or 0.0),
|
|
1091
|
+
"pendleApy": float(details.get("pendleApy", 0.0) or 0.0),
|
|
1092
|
+
"aggregatedApy": float(details.get("aggregatedApy", 0.0) or 0.0),
|
|
1093
|
+
"maxBoostedApy": float(details.get("maxBoostedApy", 0.0) or 0.0),
|
|
1094
|
+
}
|
|
1095
|
+
rows.append(row)
|
|
1096
|
+
|
|
1097
|
+
def sort_key(r: dict[str, Any]) -> Any:
|
|
1098
|
+
if sort_by == "fixed_apy":
|
|
1099
|
+
return r["fixedApy"]
|
|
1100
|
+
if sort_by == "liquidity":
|
|
1101
|
+
return r["liquidityUsd"]
|
|
1102
|
+
if sort_by == "volume":
|
|
1103
|
+
return r["volumeUsd24h"]
|
|
1104
|
+
if sort_by == "underlying_apy":
|
|
1105
|
+
return r["underlyingApy"]
|
|
1106
|
+
if sort_by == "expiry":
|
|
1107
|
+
return r["daysToExpiry"]
|
|
1108
|
+
return r["fixedApy"]
|
|
1109
|
+
|
|
1110
|
+
rows.sort(key=sort_key, reverse=descending)
|
|
1111
|
+
return rows
|
|
1112
|
+
|
|
1113
|
+
# ---------------------------------------
|
|
1114
|
+
# Decision + execution: best PT swap
|
|
1115
|
+
# ---------------------------------------
|
|
1116
|
+
|
|
1117
|
+
async def build_best_pt_swap_tx(
|
|
1118
|
+
self,
|
|
1119
|
+
*,
|
|
1120
|
+
chain: ChainLike,
|
|
1121
|
+
token_in: str,
|
|
1122
|
+
amount_in: str,
|
|
1123
|
+
receiver: str,
|
|
1124
|
+
slippage: float = 0.01,
|
|
1125
|
+
enable_aggregator: bool = True,
|
|
1126
|
+
aggregators: Sequence[str] | str | None = None,
|
|
1127
|
+
# filters
|
|
1128
|
+
min_liquidity_usd: float = 250_000.0,
|
|
1129
|
+
min_volume_usd_24h: float = 25_000.0,
|
|
1130
|
+
min_days_to_expiry: float = 7.0,
|
|
1131
|
+
# performance / rate-limit controls
|
|
1132
|
+
max_markets_to_quote: int = 10,
|
|
1133
|
+
quote_concurrency: int = 6,
|
|
1134
|
+
# selection preference
|
|
1135
|
+
prefer: Literal["effective_apy", "fixed_apy"] = "effective_apy",
|
|
1136
|
+
) -> dict[str, Any]:
|
|
1137
|
+
"""
|
|
1138
|
+
1) Fetch active markets on chain
|
|
1139
|
+
2) Filter by liquidity/volume/expiry window
|
|
1140
|
+
3) Take top N by fixedApy (impliedApy)
|
|
1141
|
+
4) Quote swap token_in -> PT for each candidate market via Hosted SDK swap endpoint
|
|
1142
|
+
requesting additionalData: impliedApy,effectiveApy
|
|
1143
|
+
5) Pick best by effectiveApy (default), return full swap response incl tx + approvals
|
|
1144
|
+
"""
|
|
1145
|
+
chain_id = _as_chain_id(chain)
|
|
1146
|
+
|
|
1147
|
+
markets = await self.list_active_pt_yt_markets(
|
|
1148
|
+
chain=chain_id,
|
|
1149
|
+
min_liquidity_usd=min_liquidity_usd,
|
|
1150
|
+
min_volume_usd_24h=min_volume_usd_24h,
|
|
1151
|
+
min_days_to_expiry=min_days_to_expiry,
|
|
1152
|
+
sort_by="fixed_apy",
|
|
1153
|
+
descending=True,
|
|
1154
|
+
)
|
|
1155
|
+
|
|
1156
|
+
if not markets:
|
|
1157
|
+
return {
|
|
1158
|
+
"ok": False,
|
|
1159
|
+
"reason": "No markets matched filters",
|
|
1160
|
+
"chainId": chain_id,
|
|
1161
|
+
"filters": {
|
|
1162
|
+
"min_liquidity_usd": min_liquidity_usd,
|
|
1163
|
+
"min_volume_usd_24h": min_volume_usd_24h,
|
|
1164
|
+
"min_days_to_expiry": min_days_to_expiry,
|
|
1165
|
+
},
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
candidates = markets[: max(1, int(max_markets_to_quote))]
|
|
1169
|
+
|
|
1170
|
+
async def quote_one(m: dict[str, Any]) -> dict[str, Any]:
|
|
1171
|
+
swap = await self.sdk_swap_v2(
|
|
1172
|
+
chain=chain_id,
|
|
1173
|
+
market_address=m["marketAddress"],
|
|
1174
|
+
receiver=receiver,
|
|
1175
|
+
slippage=slippage,
|
|
1176
|
+
token_in=token_in,
|
|
1177
|
+
token_out=m["ptAddress"],
|
|
1178
|
+
amount_in=amount_in,
|
|
1179
|
+
enable_aggregator=enable_aggregator,
|
|
1180
|
+
aggregators=aggregators,
|
|
1181
|
+
additional_data=["impliedApy", "effectiveApy"],
|
|
1182
|
+
)
|
|
1183
|
+
return {"market": m, "swap": swap}
|
|
1184
|
+
|
|
1185
|
+
quote_jobs: list[Callable[[], Awaitable[Any]]] = [
|
|
1186
|
+
(lambda m=m: quote_one(m)) for m in candidates
|
|
1187
|
+
]
|
|
1188
|
+
|
|
1189
|
+
try:
|
|
1190
|
+
quoted = await _gather_limited(
|
|
1191
|
+
quote_jobs, concurrency=int(quote_concurrency)
|
|
1192
|
+
)
|
|
1193
|
+
except Exception as exc:
|
|
1194
|
+
return {
|
|
1195
|
+
"ok": False,
|
|
1196
|
+
"reason": "Quote failed",
|
|
1197
|
+
"chainId": chain_id,
|
|
1198
|
+
"error": repr(exc),
|
|
1199
|
+
}
|
|
1200
|
+
|
|
1201
|
+
def extract_effective_apy(bundle: dict[str, Any]) -> float | None:
|
|
1202
|
+
data = (bundle.get("swap") or {}).get("data") or {}
|
|
1203
|
+
val = data.get("effectiveApy")
|
|
1204
|
+
if val is None:
|
|
1205
|
+
return None
|
|
1206
|
+
try:
|
|
1207
|
+
return float(val)
|
|
1208
|
+
except Exception:
|
|
1209
|
+
return None
|
|
1210
|
+
|
|
1211
|
+
def extract_implied_after(bundle: dict[str, Any]) -> float | None:
|
|
1212
|
+
data = (bundle.get("swap") or {}).get("data") or {}
|
|
1213
|
+
imp = data.get("impliedApy")
|
|
1214
|
+
# impliedApy can be {before, after}
|
|
1215
|
+
if isinstance(imp, dict):
|
|
1216
|
+
after = imp.get("after")
|
|
1217
|
+
if after is None:
|
|
1218
|
+
return None
|
|
1219
|
+
try:
|
|
1220
|
+
return float(after)
|
|
1221
|
+
except Exception:
|
|
1222
|
+
return None
|
|
1223
|
+
return None
|
|
1224
|
+
|
|
1225
|
+
def extract_price_impact(bundle: dict[str, Any]) -> float:
|
|
1226
|
+
data = (bundle.get("swap") or {}).get("data") or {}
|
|
1227
|
+
try:
|
|
1228
|
+
return float(data.get("priceImpact", 0.0) or 0.0)
|
|
1229
|
+
except Exception:
|
|
1230
|
+
return 0.0
|
|
1231
|
+
|
|
1232
|
+
valid: list[dict[str, Any]] = []
|
|
1233
|
+
for b in quoted:
|
|
1234
|
+
swap = b.get("swap") or {}
|
|
1235
|
+
tx = swap.get("tx")
|
|
1236
|
+
if isinstance(tx, dict) and tx.get("to") and tx.get("data") is not None:
|
|
1237
|
+
valid.append(b)
|
|
1238
|
+
|
|
1239
|
+
if not valid:
|
|
1240
|
+
return {
|
|
1241
|
+
"ok": False,
|
|
1242
|
+
"reason": "No valid swap quotes (tx missing). Check token_in existence/decimals and enable_aggregator.",
|
|
1243
|
+
"chainId": chain_id,
|
|
1244
|
+
}
|
|
1245
|
+
|
|
1246
|
+
def score(bundle: dict[str, Any]) -> tuple[float, float, float, float]:
|
|
1247
|
+
m = bundle["market"]
|
|
1248
|
+
eff = extract_effective_apy(bundle)
|
|
1249
|
+
imp_after = extract_implied_after(bundle)
|
|
1250
|
+
fixed = float(m.get("fixedApy", 0.0) or 0.0)
|
|
1251
|
+
|
|
1252
|
+
if prefer == "effective_apy":
|
|
1253
|
+
primary = (
|
|
1254
|
+
eff
|
|
1255
|
+
if eff is not None
|
|
1256
|
+
else (imp_after if imp_after is not None else fixed)
|
|
1257
|
+
)
|
|
1258
|
+
else:
|
|
1259
|
+
primary = fixed
|
|
1260
|
+
|
|
1261
|
+
pi = extract_price_impact(bundle)
|
|
1262
|
+
liq = float(m.get("liquidityUsd", 0.0) or 0.0)
|
|
1263
|
+
vol = float(m.get("volumeUsd24h", 0.0) or 0.0)
|
|
1264
|
+
|
|
1265
|
+
# Max primary, min price impact, max liquidity, max volume
|
|
1266
|
+
return (primary, -pi, liq, vol)
|
|
1267
|
+
|
|
1268
|
+
best = max(valid, key=score)
|
|
1269
|
+
best_market = best["market"]
|
|
1270
|
+
best_swap = best["swap"]
|
|
1271
|
+
best_data = best_swap.get("data") or {}
|
|
1272
|
+
|
|
1273
|
+
return {
|
|
1274
|
+
"ok": True,
|
|
1275
|
+
"chainId": chain_id,
|
|
1276
|
+
"selectedMarket": best_market,
|
|
1277
|
+
"quote": {
|
|
1278
|
+
"amountOut": best_data.get("amountOut"),
|
|
1279
|
+
"priceImpact": best_data.get("priceImpact"),
|
|
1280
|
+
"impliedApy": best_data.get("impliedApy"),
|
|
1281
|
+
"effectiveApy": best_data.get("effectiveApy"),
|
|
1282
|
+
},
|
|
1283
|
+
"tx": best_swap.get("tx"),
|
|
1284
|
+
"tokenApprovals": best_swap.get("tokenApprovals", []),
|
|
1285
|
+
"raw": best_swap,
|
|
1286
|
+
"evaluated": [
|
|
1287
|
+
{
|
|
1288
|
+
"marketAddress": b["market"]["marketAddress"],
|
|
1289
|
+
"ptAddress": b["market"]["ptAddress"],
|
|
1290
|
+
"fixedApy": b["market"]["fixedApy"],
|
|
1291
|
+
"liquidityUsd": b["market"]["liquidityUsd"],
|
|
1292
|
+
"volumeUsd24h": b["market"]["volumeUsd24h"],
|
|
1293
|
+
"daysToExpiry": b["market"]["daysToExpiry"],
|
|
1294
|
+
"effectiveApy": extract_effective_apy(b),
|
|
1295
|
+
"impliedApyAfter": extract_implied_after(b),
|
|
1296
|
+
"priceImpact": extract_price_impact(b),
|
|
1297
|
+
}
|
|
1298
|
+
for b in valid
|
|
1299
|
+
],
|
|
1300
|
+
}
|
|
1301
|
+
|
|
1302
|
+
async def build_best_pt_convert_tx(
|
|
1303
|
+
self,
|
|
1304
|
+
*,
|
|
1305
|
+
chain: ChainLike,
|
|
1306
|
+
token_in: str,
|
|
1307
|
+
amount_in: str,
|
|
1308
|
+
receiver: str,
|
|
1309
|
+
slippage: float = 0.01,
|
|
1310
|
+
enable_aggregator: bool = True,
|
|
1311
|
+
aggregators: Sequence[str] | str | None = None,
|
|
1312
|
+
additional_data: Sequence[str] | str | None = (
|
|
1313
|
+
"impliedApy",
|
|
1314
|
+
"effectiveApy",
|
|
1315
|
+
"priceImpact",
|
|
1316
|
+
),
|
|
1317
|
+
# filters
|
|
1318
|
+
min_liquidity_usd: float = 250_000.0,
|
|
1319
|
+
min_volume_usd_24h: float = 25_000.0,
|
|
1320
|
+
min_days_to_expiry: float = 7.0,
|
|
1321
|
+
# rate-limit controls
|
|
1322
|
+
max_markets_to_quote: int = 10,
|
|
1323
|
+
min_ratelimit_remaining: int = 1,
|
|
1324
|
+
# selection preference
|
|
1325
|
+
prefer: Literal["effective_apy", "fixed_apy"] = "effective_apy",
|
|
1326
|
+
) -> dict[str, Any]:
|
|
1327
|
+
"""
|
|
1328
|
+
Like build_best_pt_swap_tx(), but uses the universal convert endpoint.
|
|
1329
|
+
|
|
1330
|
+
This avoids hardcoding market routers and supports mint/redeem/roll flows.
|
|
1331
|
+
"""
|
|
1332
|
+
chain_id = _as_chain_id(chain)
|
|
1333
|
+
|
|
1334
|
+
markets = await self.list_active_pt_yt_markets(
|
|
1335
|
+
chain=chain_id,
|
|
1336
|
+
min_liquidity_usd=min_liquidity_usd,
|
|
1337
|
+
min_volume_usd_24h=min_volume_usd_24h,
|
|
1338
|
+
min_days_to_expiry=min_days_to_expiry,
|
|
1339
|
+
sort_by="fixed_apy",
|
|
1340
|
+
descending=True,
|
|
1341
|
+
)
|
|
1342
|
+
|
|
1343
|
+
if not markets:
|
|
1344
|
+
return {
|
|
1345
|
+
"ok": False,
|
|
1346
|
+
"reason": "No markets matched filters",
|
|
1347
|
+
"chainId": chain_id,
|
|
1348
|
+
}
|
|
1349
|
+
|
|
1350
|
+
candidates = markets[: max(1, int(max_markets_to_quote))]
|
|
1351
|
+
|
|
1352
|
+
def extract_effective_apy(bundle: dict[str, Any]) -> float | None:
|
|
1353
|
+
data = ((bundle.get("plan") or {}).get("route") or {}).get("data") or {}
|
|
1354
|
+
val = data.get("effectiveApy")
|
|
1355
|
+
if val is None:
|
|
1356
|
+
return None
|
|
1357
|
+
try:
|
|
1358
|
+
return float(val)
|
|
1359
|
+
except Exception:
|
|
1360
|
+
return None
|
|
1361
|
+
|
|
1362
|
+
def extract_implied_after(bundle: dict[str, Any]) -> float | None:
|
|
1363
|
+
data = ((bundle.get("plan") or {}).get("route") or {}).get("data") or {}
|
|
1364
|
+
imp = data.get("impliedApy")
|
|
1365
|
+
if isinstance(imp, dict):
|
|
1366
|
+
after = imp.get("after")
|
|
1367
|
+
if after is None:
|
|
1368
|
+
return None
|
|
1369
|
+
try:
|
|
1370
|
+
return float(after)
|
|
1371
|
+
except Exception:
|
|
1372
|
+
return None
|
|
1373
|
+
return None
|
|
1374
|
+
|
|
1375
|
+
def extract_price_impact(bundle: dict[str, Any]) -> float:
|
|
1376
|
+
data = ((bundle.get("plan") or {}).get("route") or {}).get("data") or {}
|
|
1377
|
+
try:
|
|
1378
|
+
return float(data.get("priceImpact", 0.0) or 0.0)
|
|
1379
|
+
except Exception:
|
|
1380
|
+
return 0.0
|
|
1381
|
+
|
|
1382
|
+
valid: list[dict[str, Any]] = []
|
|
1383
|
+
last_rate_limit: dict[str, Any] | None = None
|
|
1384
|
+
|
|
1385
|
+
for m in candidates:
|
|
1386
|
+
if (
|
|
1387
|
+
last_rate_limit is not None
|
|
1388
|
+
and isinstance(last_rate_limit.get("ratelimitRemaining"), int)
|
|
1389
|
+
and last_rate_limit["ratelimitRemaining"]
|
|
1390
|
+
<= int(min_ratelimit_remaining)
|
|
1391
|
+
):
|
|
1392
|
+
break
|
|
1393
|
+
|
|
1394
|
+
try:
|
|
1395
|
+
convert_resp = await self.sdk_convert_v2(
|
|
1396
|
+
chain=chain_id,
|
|
1397
|
+
slippage=slippage,
|
|
1398
|
+
receiver=receiver,
|
|
1399
|
+
inputs=[{"token": token_in, "amount": str(amount_in)}],
|
|
1400
|
+
outputs=[m["ptAddress"]],
|
|
1401
|
+
enable_aggregator=enable_aggregator,
|
|
1402
|
+
aggregators=aggregators,
|
|
1403
|
+
additional_data=additional_data,
|
|
1404
|
+
)
|
|
1405
|
+
plan = self.build_convert_plan(
|
|
1406
|
+
chain=chain_id, convert_response=convert_resp
|
|
1407
|
+
)
|
|
1408
|
+
except Exception:
|
|
1409
|
+
continue
|
|
1410
|
+
|
|
1411
|
+
last_rate_limit = (
|
|
1412
|
+
convert_resp.get("rateLimit")
|
|
1413
|
+
if isinstance(convert_resp, dict)
|
|
1414
|
+
else None
|
|
1415
|
+
)
|
|
1416
|
+
|
|
1417
|
+
tx = plan.get("tx") if isinstance(plan, dict) else None
|
|
1418
|
+
if not (
|
|
1419
|
+
isinstance(tx, dict) and tx.get("to") and tx.get("data") is not None
|
|
1420
|
+
):
|
|
1421
|
+
continue
|
|
1422
|
+
|
|
1423
|
+
valid.append({"market": m, "plan": plan, "raw": convert_resp})
|
|
1424
|
+
|
|
1425
|
+
if not valid:
|
|
1426
|
+
return {
|
|
1427
|
+
"ok": False,
|
|
1428
|
+
"reason": "No valid convert quotes (tx missing). Check token_in existence/decimals and enable_aggregator.",
|
|
1429
|
+
"chainId": chain_id,
|
|
1430
|
+
"rateLimit": last_rate_limit,
|
|
1431
|
+
}
|
|
1432
|
+
|
|
1433
|
+
def score(bundle: dict[str, Any]) -> tuple[float, float, float, float]:
|
|
1434
|
+
m = bundle["market"]
|
|
1435
|
+
eff = extract_effective_apy(bundle)
|
|
1436
|
+
imp_after = extract_implied_after(bundle)
|
|
1437
|
+
fixed = float(m.get("fixedApy", 0.0) or 0.0)
|
|
1438
|
+
|
|
1439
|
+
if prefer == "effective_apy":
|
|
1440
|
+
primary = (
|
|
1441
|
+
eff
|
|
1442
|
+
if eff is not None
|
|
1443
|
+
else (imp_after if imp_after is not None else fixed)
|
|
1444
|
+
)
|
|
1445
|
+
else:
|
|
1446
|
+
primary = fixed
|
|
1447
|
+
|
|
1448
|
+
pi = extract_price_impact(bundle)
|
|
1449
|
+
liq = float(m.get("liquidityUsd", 0.0) or 0.0)
|
|
1450
|
+
vol = float(m.get("volumeUsd24h", 0.0) or 0.0)
|
|
1451
|
+
return (primary, -pi, liq, vol)
|
|
1452
|
+
|
|
1453
|
+
best = max(valid, key=score)
|
|
1454
|
+
best_market = best["market"]
|
|
1455
|
+
best_plan = best["plan"]
|
|
1456
|
+
best_route_data = (best_plan.get("route") or {}).get("data") or {}
|
|
1457
|
+
best_outputs = best_plan.get("outputs") or []
|
|
1458
|
+
amount_out = (
|
|
1459
|
+
best_outputs[0].get("amount")
|
|
1460
|
+
if isinstance(best_outputs, list) and best_outputs
|
|
1461
|
+
else None
|
|
1462
|
+
)
|
|
1463
|
+
|
|
1464
|
+
return {
|
|
1465
|
+
"ok": True,
|
|
1466
|
+
"chainId": chain_id,
|
|
1467
|
+
"selectedMarket": best_market,
|
|
1468
|
+
"quote": {
|
|
1469
|
+
"amountOut": amount_out,
|
|
1470
|
+
"priceImpact": best_route_data.get("priceImpact"),
|
|
1471
|
+
"impliedApy": best_route_data.get("impliedApy"),
|
|
1472
|
+
"effectiveApy": best_route_data.get("effectiveApy"),
|
|
1473
|
+
},
|
|
1474
|
+
"tx": best_plan.get("tx"),
|
|
1475
|
+
"requiredApprovals": best_plan.get("approvals", []),
|
|
1476
|
+
# Backwards-friendly alias
|
|
1477
|
+
"tokenApprovals": best_plan.get("approvals", []),
|
|
1478
|
+
"raw": best.get("raw"),
|
|
1479
|
+
"rateLimit": (best.get("raw") or {}).get("rateLimit")
|
|
1480
|
+
if isinstance(best.get("raw"), dict)
|
|
1481
|
+
else None,
|
|
1482
|
+
"evaluated": [
|
|
1483
|
+
{
|
|
1484
|
+
"marketAddress": b["market"]["marketAddress"],
|
|
1485
|
+
"ptAddress": b["market"]["ptAddress"],
|
|
1486
|
+
"fixedApy": b["market"]["fixedApy"],
|
|
1487
|
+
"liquidityUsd": b["market"]["liquidityUsd"],
|
|
1488
|
+
"volumeUsd24h": b["market"]["volumeUsd24h"],
|
|
1489
|
+
"daysToExpiry": b["market"]["daysToExpiry"],
|
|
1490
|
+
"effectiveApy": extract_effective_apy(b),
|
|
1491
|
+
"impliedApyAfter": extract_implied_after(b),
|
|
1492
|
+
"priceImpact": extract_price_impact(b),
|
|
1493
|
+
}
|
|
1494
|
+
for b in valid
|
|
1495
|
+
],
|
|
1496
|
+
}
|
|
1497
|
+
|
|
1498
|
+
async def build_best_pt_swap_tx_multi_chain(
|
|
1499
|
+
self,
|
|
1500
|
+
*,
|
|
1501
|
+
chains: Sequence[ChainLike] = ("arbitrum", "hyperevm", "base"),
|
|
1502
|
+
token_in_by_chain: dict[int, str],
|
|
1503
|
+
amount_in_by_chain: dict[int, str],
|
|
1504
|
+
receiver_by_chain: dict[int, str],
|
|
1505
|
+
slippage: float = 0.01,
|
|
1506
|
+
enable_aggregator: bool = True,
|
|
1507
|
+
aggregators: Sequence[str] | str | None = None,
|
|
1508
|
+
min_liquidity_usd: float = 250_000.0,
|
|
1509
|
+
min_volume_usd_24h: float = 25_000.0,
|
|
1510
|
+
min_days_to_expiry: float = 7.0,
|
|
1511
|
+
max_markets_to_quote: int = 10,
|
|
1512
|
+
quote_concurrency: int = 6,
|
|
1513
|
+
prefer: Literal["effective_apy", "fixed_apy"] = "effective_apy",
|
|
1514
|
+
) -> dict[int, dict[str, Any]]:
|
|
1515
|
+
"""
|
|
1516
|
+
Convenience: run best-PT selection per chain.
|
|
1517
|
+
You must supply token/amount/receiver per chain (tokens live on that chain).
|
|
1518
|
+
"""
|
|
1519
|
+
chain_ids = [_as_chain_id(c) for c in chains]
|
|
1520
|
+
|
|
1521
|
+
async def run_one(cid: int) -> dict[str, Any]:
|
|
1522
|
+
return await self.build_best_pt_swap_tx(
|
|
1523
|
+
chain=cid,
|
|
1524
|
+
token_in=token_in_by_chain[cid],
|
|
1525
|
+
amount_in=amount_in_by_chain[cid],
|
|
1526
|
+
receiver=receiver_by_chain[cid],
|
|
1527
|
+
slippage=slippage,
|
|
1528
|
+
enable_aggregator=enable_aggregator,
|
|
1529
|
+
aggregators=aggregators,
|
|
1530
|
+
min_liquidity_usd=min_liquidity_usd,
|
|
1531
|
+
min_volume_usd_24h=min_volume_usd_24h,
|
|
1532
|
+
min_days_to_expiry=min_days_to_expiry,
|
|
1533
|
+
max_markets_to_quote=max_markets_to_quote,
|
|
1534
|
+
quote_concurrency=quote_concurrency,
|
|
1535
|
+
prefer=prefer,
|
|
1536
|
+
)
|
|
1537
|
+
|
|
1538
|
+
results = await _gather_limited(
|
|
1539
|
+
[lambda cid=cid: run_one(cid) for cid in chain_ids], concurrency=3
|
|
1540
|
+
)
|
|
1541
|
+
return dict(zip(chain_ids, results, strict=False))
|
|
1542
|
+
|
|
1543
|
+
async def get_full_user_state(
|
|
1544
|
+
self,
|
|
1545
|
+
*,
|
|
1546
|
+
chain: ChainLike,
|
|
1547
|
+
account: str,
|
|
1548
|
+
include_inactive: bool = True,
|
|
1549
|
+
include_sy: bool = True,
|
|
1550
|
+
include_zero_positions: bool = False,
|
|
1551
|
+
multicall_chunk_size: int = 400,
|
|
1552
|
+
include_prices: bool = False,
|
|
1553
|
+
price_concurrency: int = 8,
|
|
1554
|
+
) -> tuple[bool, dict[str, Any] | str]:
|
|
1555
|
+
"""
|
|
1556
|
+
Pendle "full user state" snapshot via on-chain ERC20 balance scan.
|
|
1557
|
+
|
|
1558
|
+
Flow:
|
|
1559
|
+
1) Fetch markets from Pendle API (market/pt/yt/sy addresses + expiry metadata)
|
|
1560
|
+
2) Multicall ERC20.balanceOf(account) + ERC20.decimals() for PT/YT/LP/(SY)
|
|
1561
|
+
3) Optionally fetch market snapshots (API) for markets with positions
|
|
1562
|
+
"""
|
|
1563
|
+
chain_id = _as_chain_id(chain)
|
|
1564
|
+
|
|
1565
|
+
try:
|
|
1566
|
+
markets_resp = await self.fetch_markets(
|
|
1567
|
+
chain_id=chain_id,
|
|
1568
|
+
is_active=None if include_inactive else True,
|
|
1569
|
+
)
|
|
1570
|
+
markets = markets_resp.get("markets") or []
|
|
1571
|
+
|
|
1572
|
+
now = _now_utc()
|
|
1573
|
+
normalized: list[dict[str, Any]] = []
|
|
1574
|
+
for m in markets:
|
|
1575
|
+
expiry_s = m.get("expiry")
|
|
1576
|
+
try:
|
|
1577
|
+
expiry_dt = _parse_iso8601(str(expiry_s)) if expiry_s else None
|
|
1578
|
+
except Exception: # noqa: BLE001
|
|
1579
|
+
expiry_dt = None
|
|
1580
|
+
days_to_expiry = (
|
|
1581
|
+
(expiry_dt - now).total_seconds() / 86400.0 if expiry_dt else None
|
|
1582
|
+
)
|
|
1583
|
+
|
|
1584
|
+
normalized.append(
|
|
1585
|
+
{
|
|
1586
|
+
"chainId": int(m.get("chainId") or chain_id),
|
|
1587
|
+
"marketName": m.get("name"),
|
|
1588
|
+
"marketAddress": _as_address(str(m.get("address", ""))),
|
|
1589
|
+
"pt": _as_address(str(m.get("pt", ""))),
|
|
1590
|
+
"yt": _as_address(str(m.get("yt", ""))),
|
|
1591
|
+
"sy": _as_address(str(m.get("sy", ""))),
|
|
1592
|
+
"underlying": _as_address(str(m.get("underlyingAsset", ""))),
|
|
1593
|
+
"expiry": expiry_s,
|
|
1594
|
+
"daysToExpiry": days_to_expiry,
|
|
1595
|
+
"active": bool(m.get("isActive"))
|
|
1596
|
+
if m.get("isActive") is not None
|
|
1597
|
+
else None,
|
|
1598
|
+
}
|
|
1599
|
+
)
|
|
1600
|
+
|
|
1601
|
+
async with web3_from_chain_id(chain_id) as web3:
|
|
1602
|
+
user_ck = web3.to_checksum_address(account)
|
|
1603
|
+
multicall = MulticallAdapter(chain_id=chain_id, web3=web3)
|
|
1604
|
+
|
|
1605
|
+
call_specs: list[tuple[int, str, str, str]] = []
|
|
1606
|
+
calls: list[Any] = []
|
|
1607
|
+
|
|
1608
|
+
def add_token_calls(midx: int, kind: str, token: str) -> None:
|
|
1609
|
+
if not token:
|
|
1610
|
+
return
|
|
1611
|
+
token_ck = web3.to_checksum_address(token)
|
|
1612
|
+
erc20 = web3.eth.contract(address=token_ck, abi=ERC20_ABI)
|
|
1613
|
+
|
|
1614
|
+
calls.append(
|
|
1615
|
+
multicall.build_call(
|
|
1616
|
+
token_ck,
|
|
1617
|
+
erc20.encode_abi("balanceOf", args=[user_ck]),
|
|
1618
|
+
)
|
|
1619
|
+
)
|
|
1620
|
+
call_specs.append((midx, kind, token_ck, "bal"))
|
|
1621
|
+
|
|
1622
|
+
calls.append(
|
|
1623
|
+
multicall.build_call(
|
|
1624
|
+
token_ck,
|
|
1625
|
+
erc20.encode_abi("decimals", args=[]),
|
|
1626
|
+
)
|
|
1627
|
+
)
|
|
1628
|
+
call_specs.append((midx, kind, token_ck, "dec"))
|
|
1629
|
+
|
|
1630
|
+
for i, m in enumerate(normalized):
|
|
1631
|
+
add_token_calls(i, "pt", m["pt"])
|
|
1632
|
+
add_token_calls(i, "yt", m["yt"])
|
|
1633
|
+
add_token_calls(i, "lp", m["marketAddress"])
|
|
1634
|
+
if include_sy:
|
|
1635
|
+
add_token_calls(i, "sy", m["sy"])
|
|
1636
|
+
|
|
1637
|
+
decoded = await self._multicall_uint256_chunked(
|
|
1638
|
+
multicall=multicall,
|
|
1639
|
+
calls=calls,
|
|
1640
|
+
chunk_size=multicall_chunk_size,
|
|
1641
|
+
)
|
|
1642
|
+
|
|
1643
|
+
per_market: list[dict[str, Any]] = [
|
|
1644
|
+
dict(m, balances={}) for m in normalized
|
|
1645
|
+
]
|
|
1646
|
+
|
|
1647
|
+
for spec, val in zip(call_specs, decoded, strict=False):
|
|
1648
|
+
midx, kind, token, which = spec
|
|
1649
|
+
if midx >= len(per_market):
|
|
1650
|
+
continue
|
|
1651
|
+
bucket = per_market[midx]["balances"].setdefault(
|
|
1652
|
+
kind,
|
|
1653
|
+
{
|
|
1654
|
+
"address": token,
|
|
1655
|
+
"raw": 0,
|
|
1656
|
+
"decimals": None,
|
|
1657
|
+
},
|
|
1658
|
+
)
|
|
1659
|
+
if which == "bal":
|
|
1660
|
+
bucket["raw"] = int(val or 0)
|
|
1661
|
+
else:
|
|
1662
|
+
bucket["decimals"] = int(val) if val is not None else None
|
|
1663
|
+
|
|
1664
|
+
positions: list[dict[str, Any]] = []
|
|
1665
|
+
for m in per_market:
|
|
1666
|
+
balances = m.get("balances") or {}
|
|
1667
|
+
has_any = False
|
|
1668
|
+
for kind in ("pt", "yt", "lp", "sy"):
|
|
1669
|
+
if kind in balances and int(balances[kind].get("raw") or 0) > 0:
|
|
1670
|
+
has_any = True
|
|
1671
|
+
break
|
|
1672
|
+
if not include_zero_positions and not has_any:
|
|
1673
|
+
continue
|
|
1674
|
+
positions.append(m)
|
|
1675
|
+
|
|
1676
|
+
if include_prices and positions:
|
|
1677
|
+
|
|
1678
|
+
async def fetch_one(pos: dict[str, Any]) -> dict[str, Any]:
|
|
1679
|
+
cid = int(pos.get("chainId") or chain_id)
|
|
1680
|
+
market_address = str(pos.get("marketAddress") or "").strip()
|
|
1681
|
+
if not market_address:
|
|
1682
|
+
return {}
|
|
1683
|
+
try:
|
|
1684
|
+
return await self.fetch_market_snapshot(
|
|
1685
|
+
chain_id=cid, market_address=market_address
|
|
1686
|
+
)
|
|
1687
|
+
except Exception: # noqa: BLE001
|
|
1688
|
+
return {}
|
|
1689
|
+
|
|
1690
|
+
snapshots = await _gather_limited(
|
|
1691
|
+
[lambda pos=pos: fetch_one(pos) for pos in positions],
|
|
1692
|
+
concurrency=int(price_concurrency),
|
|
1693
|
+
)
|
|
1694
|
+
for pos, snap in zip(positions, snapshots, strict=False):
|
|
1695
|
+
if snap:
|
|
1696
|
+
pos["marketSnapshot"] = snap
|
|
1697
|
+
|
|
1698
|
+
return (
|
|
1699
|
+
True,
|
|
1700
|
+
{
|
|
1701
|
+
"protocol": "pendle",
|
|
1702
|
+
"source": "onchain_scan_multicall",
|
|
1703
|
+
"chainId": int(chain_id),
|
|
1704
|
+
"account": account,
|
|
1705
|
+
"positions": positions,
|
|
1706
|
+
},
|
|
1707
|
+
)
|
|
1708
|
+
except Exception as exc: # noqa: BLE001
|
|
1709
|
+
return False, str(exc)
|
|
1710
|
+
|
|
1711
|
+
# ---------------------------------------
|
|
1712
|
+
# Execute swap
|
|
1713
|
+
# ---------------------------------------
|
|
1714
|
+
|
|
1715
|
+
async def execute_swap(
|
|
1716
|
+
self,
|
|
1717
|
+
*,
|
|
1718
|
+
chain: ChainLike,
|
|
1719
|
+
market_address: str,
|
|
1720
|
+
token_in: str,
|
|
1721
|
+
token_out: str,
|
|
1722
|
+
amount_in: str,
|
|
1723
|
+
receiver: str | None = None,
|
|
1724
|
+
slippage: float = 0.01,
|
|
1725
|
+
enable_aggregator: bool = False,
|
|
1726
|
+
aggregators: Sequence[str] | str | None = None,
|
|
1727
|
+
) -> tuple[bool, dict[str, Any]]:
|
|
1728
|
+
"""
|
|
1729
|
+
Execute a Pendle swap: get quote, handle approvals, broadcast tx.
|
|
1730
|
+
|
|
1731
|
+
This is a generic execution method that can swap into any token:
|
|
1732
|
+
- PT (Principal Token) - for fixed yield
|
|
1733
|
+
- YT (Yield Token) - for floating yield
|
|
1734
|
+
- SY (Standardized Yield) - underlying wrapper
|
|
1735
|
+
- Or any other token the market supports
|
|
1736
|
+
|
|
1737
|
+
Args:
|
|
1738
|
+
chain: Chain ID or name (e.g., 42161 or "arbitrum")
|
|
1739
|
+
market_address: Pendle market address
|
|
1740
|
+
token_in: Input token address (ERC20)
|
|
1741
|
+
token_out: Output token address (PT, YT, SY, etc.)
|
|
1742
|
+
amount_in: Amount in raw base units (string)
|
|
1743
|
+
receiver: Optional receiver address (defaults to strategy wallet)
|
|
1744
|
+
slippage: Slippage tolerance as decimal (0.01 = 1%)
|
|
1745
|
+
enable_aggregator: Enable DEX aggregators for routing
|
|
1746
|
+
aggregators: Specific aggregators to use
|
|
1747
|
+
|
|
1748
|
+
Returns:
|
|
1749
|
+
tuple[bool, dict]: (success, details_dict)
|
|
1750
|
+
"""
|
|
1751
|
+
chain_id = _as_chain_id(chain)
|
|
1752
|
+
sender = self._strategy_address()
|
|
1753
|
+
actual_receiver = receiver or sender
|
|
1754
|
+
|
|
1755
|
+
# Step 1: Get quote via sdk_swap_v2
|
|
1756
|
+
quote_result = await self.sdk_swap_v2(
|
|
1757
|
+
chain=chain_id,
|
|
1758
|
+
market_address=market_address,
|
|
1759
|
+
receiver=actual_receiver,
|
|
1760
|
+
slippage=slippage,
|
|
1761
|
+
token_in=token_in,
|
|
1762
|
+
token_out=token_out,
|
|
1763
|
+
amount_in=amount_in,
|
|
1764
|
+
enable_aggregator=enable_aggregator,
|
|
1765
|
+
aggregators=aggregators,
|
|
1766
|
+
additional_data=["impliedApy", "effectiveApy"],
|
|
1767
|
+
)
|
|
1768
|
+
|
|
1769
|
+
tx_data = quote_result.get("tx")
|
|
1770
|
+
if not tx_data or not tx_data.get("to"):
|
|
1771
|
+
return False, {
|
|
1772
|
+
"error": "Quote returned invalid tx",
|
|
1773
|
+
"stage": "quote",
|
|
1774
|
+
"raw": quote_result,
|
|
1775
|
+
}
|
|
1776
|
+
|
|
1777
|
+
# Step 2: Handle token approvals
|
|
1778
|
+
token_approvals = quote_result.get("tokenApprovals") or []
|
|
1779
|
+
spender = to_checksum_address(tx_data["to"])
|
|
1780
|
+
|
|
1781
|
+
for approval in token_approvals:
|
|
1782
|
+
token = approval.get("token")
|
|
1783
|
+
amount = approval.get("amount")
|
|
1784
|
+
if not token or not amount:
|
|
1785
|
+
continue
|
|
1786
|
+
approved, result = await self._ensure_allowance(
|
|
1787
|
+
chain_id=chain_id,
|
|
1788
|
+
token_address=token,
|
|
1789
|
+
owner=sender,
|
|
1790
|
+
spender=spender,
|
|
1791
|
+
amount=int(amount),
|
|
1792
|
+
)
|
|
1793
|
+
if not approved:
|
|
1794
|
+
return False, {
|
|
1795
|
+
"error": f"Approval failed for {token}",
|
|
1796
|
+
"stage": "approval",
|
|
1797
|
+
"details": result,
|
|
1798
|
+
}
|
|
1799
|
+
|
|
1800
|
+
# Step 3: Broadcast swap tx
|
|
1801
|
+
swap_tx = {
|
|
1802
|
+
"chainId": chain_id,
|
|
1803
|
+
"from": to_checksum_address(sender),
|
|
1804
|
+
"to": to_checksum_address(tx_data["to"]),
|
|
1805
|
+
"data": tx_data["data"],
|
|
1806
|
+
"value": int(tx_data.get("value") or 0),
|
|
1807
|
+
}
|
|
1808
|
+
|
|
1809
|
+
try:
|
|
1810
|
+
success, tx_hash = await self._send_tx(swap_tx)
|
|
1811
|
+
except Exception as exc:
|
|
1812
|
+
return False, {
|
|
1813
|
+
"error": str(exc),
|
|
1814
|
+
"stage": "broadcast",
|
|
1815
|
+
"quote": quote_result,
|
|
1816
|
+
}
|
|
1817
|
+
|
|
1818
|
+
return True, {
|
|
1819
|
+
"tx_hash": tx_hash,
|
|
1820
|
+
"chainId": chain_id,
|
|
1821
|
+
"quote": quote_result.get("data"),
|
|
1822
|
+
"tokenApprovals": token_approvals,
|
|
1823
|
+
}
|
|
1824
|
+
|
|
1825
|
+
# ---------------------------------------
|
|
1826
|
+
# Execute universal convert
|
|
1827
|
+
# ---------------------------------------
|
|
1828
|
+
|
|
1829
|
+
async def execute_convert(
|
|
1830
|
+
self,
|
|
1831
|
+
*,
|
|
1832
|
+
chain: ChainLike,
|
|
1833
|
+
slippage: float,
|
|
1834
|
+
inputs: Sequence[dict[str, str]],
|
|
1835
|
+
outputs: Sequence[str],
|
|
1836
|
+
receiver: str | None = None,
|
|
1837
|
+
enable_aggregator: bool = False,
|
|
1838
|
+
aggregators: Sequence[str] | str | None = None,
|
|
1839
|
+
additional_data: Sequence[str] | str | None = None,
|
|
1840
|
+
need_scale: bool | None = None,
|
|
1841
|
+
use_limit_order: bool | None = True,
|
|
1842
|
+
redeem_rewards: bool | None = False,
|
|
1843
|
+
rebuild_after_approval: bool = True,
|
|
1844
|
+
) -> tuple[bool, dict[str, Any]]:
|
|
1845
|
+
"""
|
|
1846
|
+
Execute a Pendle Hosted SDK convert: build tx, handle approvals, broadcast.
|
|
1847
|
+
|
|
1848
|
+
This is the recommended universal entrypoint for Pendle actions:
|
|
1849
|
+
swaps, LP add/remove, mint/redeem, roll, etc.
|
|
1850
|
+
"""
|
|
1851
|
+
chain_id = _as_chain_id(chain)
|
|
1852
|
+
sender = self._strategy_address()
|
|
1853
|
+
actual_receiver = receiver or sender
|
|
1854
|
+
|
|
1855
|
+
# Preflight balances for each input token.
|
|
1856
|
+
try:
|
|
1857
|
+
for i, item in enumerate(inputs):
|
|
1858
|
+
token = item.get("token")
|
|
1859
|
+
amount_s = item.get("amount")
|
|
1860
|
+
if not (isinstance(token, str) and token):
|
|
1861
|
+
return False, {
|
|
1862
|
+
"stage": "preflight",
|
|
1863
|
+
"error": f"inputs[{i}].token is required",
|
|
1864
|
+
}
|
|
1865
|
+
if amount_s is None:
|
|
1866
|
+
return False, {
|
|
1867
|
+
"stage": "preflight",
|
|
1868
|
+
"error": f"inputs[{i}].amount is required",
|
|
1869
|
+
}
|
|
1870
|
+
amount = int(str(amount_s))
|
|
1871
|
+
bal = await get_token_balance(token, chain_id, sender)
|
|
1872
|
+
if int(bal) < int(amount):
|
|
1873
|
+
return False, {
|
|
1874
|
+
"stage": "preflight",
|
|
1875
|
+
"error": "Insufficient balance",
|
|
1876
|
+
"token": token,
|
|
1877
|
+
"need": amount,
|
|
1878
|
+
"have": int(bal),
|
|
1879
|
+
}
|
|
1880
|
+
except Exception as exc: # noqa: BLE001
|
|
1881
|
+
return False, {"stage": "preflight", "error": str(exc)}
|
|
1882
|
+
|
|
1883
|
+
# Build convert plan
|
|
1884
|
+
try:
|
|
1885
|
+
convert_resp = await self.sdk_convert_v2(
|
|
1886
|
+
chain=chain_id,
|
|
1887
|
+
slippage=slippage,
|
|
1888
|
+
receiver=actual_receiver,
|
|
1889
|
+
inputs=inputs,
|
|
1890
|
+
outputs=outputs,
|
|
1891
|
+
enable_aggregator=enable_aggregator,
|
|
1892
|
+
aggregators=aggregators,
|
|
1893
|
+
additional_data=additional_data,
|
|
1894
|
+
need_scale=need_scale,
|
|
1895
|
+
use_limit_order=use_limit_order,
|
|
1896
|
+
redeem_rewards=redeem_rewards,
|
|
1897
|
+
)
|
|
1898
|
+
plan = self.build_convert_plan(
|
|
1899
|
+
chain=chain_id, convert_response=convert_resp
|
|
1900
|
+
)
|
|
1901
|
+
except Exception as exc: # noqa: BLE001
|
|
1902
|
+
return False, {"stage": "quote", "error": str(exc)}
|
|
1903
|
+
|
|
1904
|
+
spender = plan["tx"]["to"]
|
|
1905
|
+
approvals = plan.get("approvals") or []
|
|
1906
|
+
|
|
1907
|
+
# Approvals
|
|
1908
|
+
for approval in approvals:
|
|
1909
|
+
token = approval.get("token")
|
|
1910
|
+
amount = approval.get("amount")
|
|
1911
|
+
if not (isinstance(token, str) and token and amount is not None):
|
|
1912
|
+
continue
|
|
1913
|
+
try:
|
|
1914
|
+
approved, result = await self._ensure_allowance(
|
|
1915
|
+
chain_id=chain_id,
|
|
1916
|
+
token_address=token,
|
|
1917
|
+
owner=sender,
|
|
1918
|
+
spender=spender,
|
|
1919
|
+
amount=int(str(amount)),
|
|
1920
|
+
)
|
|
1921
|
+
except Exception as exc: # noqa: BLE001
|
|
1922
|
+
return False, {
|
|
1923
|
+
"stage": "approval",
|
|
1924
|
+
"error": str(exc),
|
|
1925
|
+
"token": token,
|
|
1926
|
+
}
|
|
1927
|
+
if not approved:
|
|
1928
|
+
return False, {
|
|
1929
|
+
"stage": "approval",
|
|
1930
|
+
"error": f"Approval failed for {token}",
|
|
1931
|
+
"details": result,
|
|
1932
|
+
}
|
|
1933
|
+
|
|
1934
|
+
# Optional re-build after approvals to avoid "preview routes".
|
|
1935
|
+
if approvals and rebuild_after_approval:
|
|
1936
|
+
try:
|
|
1937
|
+
convert_resp = await self.sdk_convert_v2(
|
|
1938
|
+
chain=chain_id,
|
|
1939
|
+
slippage=slippage,
|
|
1940
|
+
receiver=actual_receiver,
|
|
1941
|
+
inputs=inputs,
|
|
1942
|
+
outputs=outputs,
|
|
1943
|
+
enable_aggregator=enable_aggregator,
|
|
1944
|
+
aggregators=aggregators,
|
|
1945
|
+
additional_data=additional_data,
|
|
1946
|
+
need_scale=need_scale,
|
|
1947
|
+
use_limit_order=use_limit_order,
|
|
1948
|
+
redeem_rewards=redeem_rewards,
|
|
1949
|
+
)
|
|
1950
|
+
plan = self.build_convert_plan(
|
|
1951
|
+
chain=chain_id, convert_response=convert_resp
|
|
1952
|
+
)
|
|
1953
|
+
approvals = plan.get("approvals") or approvals
|
|
1954
|
+
except Exception as exc: # noqa: BLE001
|
|
1955
|
+
return False, {"stage": "rebuild", "error": str(exc)}
|
|
1956
|
+
|
|
1957
|
+
# Broadcast tx (exactly as returned)
|
|
1958
|
+
try:
|
|
1959
|
+
_, tx_hash = await self._send_tx(plan["tx"])
|
|
1960
|
+
except Exception as exc: # noqa: BLE001
|
|
1961
|
+
return False, {
|
|
1962
|
+
"stage": "broadcast",
|
|
1963
|
+
"error": str(exc),
|
|
1964
|
+
"tx": plan.get("tx"),
|
|
1965
|
+
}
|
|
1966
|
+
|
|
1967
|
+
# Post-check balances for output tokens.
|
|
1968
|
+
post_balances: dict[str, int] = {}
|
|
1969
|
+
for out in plan.get("outputs") or []:
|
|
1970
|
+
if not isinstance(out, dict):
|
|
1971
|
+
continue
|
|
1972
|
+
token = out.get("token")
|
|
1973
|
+
if not isinstance(token, str):
|
|
1974
|
+
continue
|
|
1975
|
+
try:
|
|
1976
|
+
post_balances[token] = int(
|
|
1977
|
+
await get_token_balance(token, chain_id, actual_receiver)
|
|
1978
|
+
)
|
|
1979
|
+
except Exception: # noqa: BLE001
|
|
1980
|
+
continue
|
|
1981
|
+
|
|
1982
|
+
return True, {
|
|
1983
|
+
"tx_hash": tx_hash,
|
|
1984
|
+
"chainId": chain_id,
|
|
1985
|
+
"action": plan.get("action"),
|
|
1986
|
+
"approvals": approvals,
|
|
1987
|
+
"outputs": plan.get("outputs"),
|
|
1988
|
+
"postBalances": post_balances,
|
|
1989
|
+
"rateLimit": (convert_resp or {}).get("rateLimit")
|
|
1990
|
+
if isinstance(convert_resp, dict)
|
|
1991
|
+
else None,
|
|
1992
|
+
}
|