openhands-sdk 1.10.0__py3-none-any.whl → 1.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openhands/sdk/agent/agent.py +60 -27
- openhands/sdk/agent/base.py +1 -1
- openhands/sdk/context/condenser/base.py +36 -3
- openhands/sdk/context/condenser/llm_summarizing_condenser.py +65 -1
- openhands/sdk/context/prompts/templates/system_message_suffix.j2 +2 -1
- openhands/sdk/context/skills/skill.py +2 -25
- openhands/sdk/conversation/conversation.py +5 -0
- openhands/sdk/conversation/impl/local_conversation.py +19 -13
- openhands/sdk/conversation/impl/remote_conversation.py +10 -0
- openhands/sdk/conversation/stuck_detector.py +18 -9
- openhands/sdk/llm/__init__.py +16 -0
- openhands/sdk/llm/auth/__init__.py +28 -0
- openhands/sdk/llm/auth/credentials.py +157 -0
- openhands/sdk/llm/auth/openai.py +762 -0
- openhands/sdk/llm/llm.py +175 -20
- openhands/sdk/llm/options/responses_options.py +8 -7
- openhands/sdk/llm/utils/model_features.py +2 -0
- openhands/sdk/secret/secrets.py +13 -1
- openhands/sdk/workspace/remote/base.py +8 -3
- openhands/sdk/workspace/remote/remote_workspace_mixin.py +40 -7
- {openhands_sdk-1.10.0.dist-info → openhands_sdk-1.11.0.dist-info}/METADATA +1 -1
- {openhands_sdk-1.10.0.dist-info → openhands_sdk-1.11.0.dist-info}/RECORD +24 -21
- {openhands_sdk-1.10.0.dist-info → openhands_sdk-1.11.0.dist-info}/WHEEL +0 -0
- {openhands_sdk-1.10.0.dist-info → openhands_sdk-1.11.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,762 @@
|
|
|
1
|
+
"""OpenAI subscription-based authentication via OAuth.
|
|
2
|
+
|
|
3
|
+
This module implements OAuth PKCE flow for authenticating with OpenAI's ChatGPT
|
|
4
|
+
service, allowing users with ChatGPT Plus/Pro subscriptions to use Codex models
|
|
5
|
+
without consuming API credits.
|
|
6
|
+
|
|
7
|
+
Uses authlib for OAuth handling and aiohttp for the callback server.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
import platform
|
|
14
|
+
import sys
|
|
15
|
+
import threading
|
|
16
|
+
import time
|
|
17
|
+
import webbrowser
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
20
|
+
from urllib.parse import urlencode
|
|
21
|
+
|
|
22
|
+
from aiohttp import web
|
|
23
|
+
from authlib.common.security import generate_token
|
|
24
|
+
from authlib.jose import JsonWebKey, jwt
|
|
25
|
+
from authlib.jose.errors import JoseError
|
|
26
|
+
from authlib.oauth2.rfc7636 import create_s256_code_challenge
|
|
27
|
+
from httpx import AsyncClient, Client
|
|
28
|
+
|
|
29
|
+
from openhands.sdk.llm.auth.credentials import (
|
|
30
|
+
CredentialStore,
|
|
31
|
+
OAuthCredentials,
|
|
32
|
+
get_credentials_dir,
|
|
33
|
+
)
|
|
34
|
+
from openhands.sdk.logger import get_logger
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
if TYPE_CHECKING:
|
|
38
|
+
from openhands.sdk.llm.llm import LLM
|
|
39
|
+
|
|
40
|
+
# Supported vendors for subscription-based authentication.
|
|
41
|
+
# Add new vendors here as they become supported.
|
|
42
|
+
SupportedVendor = Literal["openai"]
|
|
43
|
+
|
|
44
|
+
logger = get_logger(__name__)
|
|
45
|
+
|
|
46
|
+
# =========================================================================
|
|
47
|
+
# Consent banner constants
|
|
48
|
+
# =========================================================================
|
|
49
|
+
|
|
50
|
+
CONSENT_BANNER = """\
|
|
51
|
+
Signing in with ChatGPT uses your ChatGPT account. By continuing, you confirm \
|
|
52
|
+
you are a ChatGPT End User and are subject to OpenAI's Terms of Use.
|
|
53
|
+
https://openai.com/policies/terms-of-use/
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
CONSENT_MARKER_FILENAME = ".chatgpt_consent_acknowledged"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _get_consent_marker_path() -> Path:
|
|
60
|
+
"""Get the path to the consent acknowledgment marker file."""
|
|
61
|
+
return get_credentials_dir() / CONSENT_MARKER_FILENAME
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _has_acknowledged_consent() -> bool:
|
|
65
|
+
"""Check if the user has previously acknowledged the consent disclaimer."""
|
|
66
|
+
return _get_consent_marker_path().exists()
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _mark_consent_acknowledged() -> None:
|
|
70
|
+
"""Mark that the user has acknowledged the consent disclaimer."""
|
|
71
|
+
marker_path = _get_consent_marker_path()
|
|
72
|
+
marker_path.parent.mkdir(parents=True, exist_ok=True)
|
|
73
|
+
marker_path.touch()
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _display_consent_and_confirm() -> bool:
|
|
77
|
+
"""Display consent banner and get user confirmation.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
True if user confirms, False otherwise.
|
|
81
|
+
|
|
82
|
+
Raises:
|
|
83
|
+
RuntimeError: If running in non-interactive mode without prior consent.
|
|
84
|
+
"""
|
|
85
|
+
is_first_time = not _has_acknowledged_consent()
|
|
86
|
+
|
|
87
|
+
# Always show the consent banner
|
|
88
|
+
print("\n" + "=" * 70)
|
|
89
|
+
print(CONSENT_BANNER)
|
|
90
|
+
print("=" * 70 + "\n")
|
|
91
|
+
|
|
92
|
+
# Check if we're in an interactive terminal
|
|
93
|
+
if not sys.stdin.isatty():
|
|
94
|
+
if is_first_time:
|
|
95
|
+
raise RuntimeError(
|
|
96
|
+
"Cannot proceed with ChatGPT sign-in: running in non-interactive mode "
|
|
97
|
+
"and consent has not been previously acknowledged. Please run "
|
|
98
|
+
"interactively first to acknowledge the terms."
|
|
99
|
+
)
|
|
100
|
+
# Non-interactive but consent was previously given - proceed
|
|
101
|
+
logger.info("Non-interactive mode: using previously acknowledged consent")
|
|
102
|
+
return True
|
|
103
|
+
|
|
104
|
+
# Interactive mode: prompt for confirmation
|
|
105
|
+
try:
|
|
106
|
+
response = input("Do you want to continue? [y/N]: ").strip().lower()
|
|
107
|
+
if response in ("y", "yes"):
|
|
108
|
+
if is_first_time:
|
|
109
|
+
_mark_consent_acknowledged()
|
|
110
|
+
return True
|
|
111
|
+
return False
|
|
112
|
+
except (EOFError, KeyboardInterrupt):
|
|
113
|
+
print() # Newline after ^C
|
|
114
|
+
return False
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
# OAuth configuration for OpenAI Codex
|
|
118
|
+
# This is a public client ID for OpenAI's OAuth flow (safe to commit)
|
|
119
|
+
CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
|
|
120
|
+
ISSUER = "https://auth.openai.com"
|
|
121
|
+
JWKS_URL = f"{ISSUER}/.well-known/jwks.json"
|
|
122
|
+
CODEX_API_ENDPOINT = "https://chatgpt.com/backend-api/codex/responses"
|
|
123
|
+
DEFAULT_OAUTH_PORT = 1455
|
|
124
|
+
OAUTH_TIMEOUT_SECONDS = 300 # 5 minutes
|
|
125
|
+
JWKS_CACHE_TTL_SECONDS = 3600 # 1 hour
|
|
126
|
+
|
|
127
|
+
# Models available via ChatGPT subscription (not API)
|
|
128
|
+
OPENAI_CODEX_MODELS = frozenset(
|
|
129
|
+
{
|
|
130
|
+
"gpt-5.1-codex-max",
|
|
131
|
+
"gpt-5.1-codex-mini",
|
|
132
|
+
"gpt-5.2",
|
|
133
|
+
"gpt-5.2-codex",
|
|
134
|
+
}
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
# Thread-safe JWKS cache
|
|
139
|
+
class _JWKSCache:
|
|
140
|
+
"""Thread-safe cache for OpenAI's JWKS (JSON Web Key Set)."""
|
|
141
|
+
|
|
142
|
+
def __init__(self) -> None:
|
|
143
|
+
self._keys: dict[str, Any] = {}
|
|
144
|
+
self._fetched_at: float = 0
|
|
145
|
+
self._lock = threading.Lock()
|
|
146
|
+
|
|
147
|
+
def get_key_set(self) -> Any:
|
|
148
|
+
"""Get the JWKS, fetching from OpenAI if cache is stale or empty.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
KeySet for verifying JWT signatures.
|
|
152
|
+
|
|
153
|
+
Raises:
|
|
154
|
+
RuntimeError: If JWKS cannot be fetched.
|
|
155
|
+
"""
|
|
156
|
+
with self._lock:
|
|
157
|
+
now = time.time()
|
|
158
|
+
if not self._keys or (now - self._fetched_at) > JWKS_CACHE_TTL_SECONDS:
|
|
159
|
+
self._fetch_jwks()
|
|
160
|
+
return JsonWebKey.import_key_set(self._keys)
|
|
161
|
+
|
|
162
|
+
def _fetch_jwks(self) -> None:
|
|
163
|
+
"""Fetch JWKS from OpenAI's well-known endpoint."""
|
|
164
|
+
try:
|
|
165
|
+
with Client(timeout=10) as client:
|
|
166
|
+
response = client.get(JWKS_URL)
|
|
167
|
+
response.raise_for_status()
|
|
168
|
+
self._keys = response.json()
|
|
169
|
+
self._fetched_at = time.time()
|
|
170
|
+
logger.debug(
|
|
171
|
+
f"Fetched JWKS from OpenAI: {len(self._keys.get('keys', []))} keys"
|
|
172
|
+
)
|
|
173
|
+
except Exception as e:
|
|
174
|
+
raise RuntimeError(f"Failed to fetch OpenAI JWKS: {e}") from e
|
|
175
|
+
|
|
176
|
+
def clear(self) -> None:
|
|
177
|
+
"""Clear the cache (useful for testing)."""
|
|
178
|
+
with self._lock:
|
|
179
|
+
self._keys = {}
|
|
180
|
+
self._fetched_at = 0
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
_jwks_cache = _JWKSCache()
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _generate_pkce() -> tuple[str, str]:
|
|
187
|
+
"""Generate PKCE verifier and challenge using authlib."""
|
|
188
|
+
verifier = generate_token(43)
|
|
189
|
+
challenge = create_s256_code_challenge(verifier)
|
|
190
|
+
return verifier, challenge
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def _extract_chatgpt_account_id(access_token: str) -> str | None:
|
|
194
|
+
"""Extract chatgpt_account_id from JWT access token with signature verification.
|
|
195
|
+
|
|
196
|
+
Verifies the JWT signature using OpenAI's published JWKS before extracting
|
|
197
|
+
claims. This prevents attacks where a manipulated token could be injected
|
|
198
|
+
through OAuth callback interception.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
access_token: The JWT access token from OAuth flow
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
The chatgpt_account_id if found and signature is valid, None otherwise
|
|
205
|
+
"""
|
|
206
|
+
try:
|
|
207
|
+
# Fetch JWKS and verify JWT signature
|
|
208
|
+
key_set = _jwks_cache.get_key_set()
|
|
209
|
+
claims = jwt.decode(access_token, key_set)
|
|
210
|
+
|
|
211
|
+
# Validate standard claims (issuer)
|
|
212
|
+
claims.validate()
|
|
213
|
+
|
|
214
|
+
# Extract account ID from nested structure
|
|
215
|
+
auth_info = claims.get("https://api.openai.com/auth", {})
|
|
216
|
+
account_id = auth_info.get("chatgpt_account_id")
|
|
217
|
+
|
|
218
|
+
if account_id:
|
|
219
|
+
logger.debug(f"Extracted chatgpt_account_id: {account_id}")
|
|
220
|
+
return account_id
|
|
221
|
+
else:
|
|
222
|
+
logger.warning("chatgpt_account_id not found in JWT payload")
|
|
223
|
+
return None
|
|
224
|
+
|
|
225
|
+
except JoseError as e:
|
|
226
|
+
logger.warning(f"JWT signature verification failed: {e}")
|
|
227
|
+
return None
|
|
228
|
+
except RuntimeError as e:
|
|
229
|
+
# JWKS fetch failed - log but don't crash
|
|
230
|
+
logger.warning(f"Could not verify JWT: {e}")
|
|
231
|
+
return None
|
|
232
|
+
except Exception as e:
|
|
233
|
+
logger.warning(f"Failed to decode JWT: {e}")
|
|
234
|
+
return None
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def _build_authorize_url(redirect_uri: str, code_challenge: str, state: str) -> str:
|
|
238
|
+
"""Build the OAuth authorization URL."""
|
|
239
|
+
params = {
|
|
240
|
+
"response_type": "code",
|
|
241
|
+
"client_id": CLIENT_ID,
|
|
242
|
+
"redirect_uri": redirect_uri,
|
|
243
|
+
"scope": "openid profile email offline_access",
|
|
244
|
+
"code_challenge": code_challenge,
|
|
245
|
+
"code_challenge_method": "S256",
|
|
246
|
+
"id_token_add_organizations": "true",
|
|
247
|
+
"codex_cli_simplified_flow": "true",
|
|
248
|
+
"state": state,
|
|
249
|
+
"originator": "openhands",
|
|
250
|
+
}
|
|
251
|
+
return f"{ISSUER}/oauth/authorize?{urlencode(params)}"
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
async def _exchange_code_for_tokens(
|
|
255
|
+
code: str, redirect_uri: str, code_verifier: str
|
|
256
|
+
) -> dict[str, Any]:
|
|
257
|
+
"""Exchange authorization code for tokens."""
|
|
258
|
+
async with AsyncClient() as client:
|
|
259
|
+
response = await client.post(
|
|
260
|
+
f"{ISSUER}/oauth/token",
|
|
261
|
+
data={
|
|
262
|
+
"grant_type": "authorization_code",
|
|
263
|
+
"code": code,
|
|
264
|
+
"redirect_uri": redirect_uri,
|
|
265
|
+
"client_id": CLIENT_ID,
|
|
266
|
+
"code_verifier": code_verifier,
|
|
267
|
+
},
|
|
268
|
+
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
|
269
|
+
)
|
|
270
|
+
if not response.is_success:
|
|
271
|
+
raise RuntimeError(f"Token exchange failed: {response.status_code}")
|
|
272
|
+
return response.json()
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
async def _refresh_access_token(refresh_token: str) -> dict[str, Any]:
|
|
276
|
+
"""Refresh the access token using a refresh token."""
|
|
277
|
+
async with AsyncClient() as client:
|
|
278
|
+
response = await client.post(
|
|
279
|
+
f"{ISSUER}/oauth/token",
|
|
280
|
+
data={
|
|
281
|
+
"grant_type": "refresh_token",
|
|
282
|
+
"refresh_token": refresh_token,
|
|
283
|
+
"client_id": CLIENT_ID,
|
|
284
|
+
},
|
|
285
|
+
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
|
286
|
+
)
|
|
287
|
+
if not response.is_success:
|
|
288
|
+
raise RuntimeError(f"Token refresh failed: {response.status_code}")
|
|
289
|
+
return response.json()
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
# HTML templates for OAuth callback
|
|
293
|
+
_HTML_SUCCESS = """<!DOCTYPE html>
|
|
294
|
+
<html>
|
|
295
|
+
<head>
|
|
296
|
+
<title>OpenHands - Authorization Successful</title>
|
|
297
|
+
<style>
|
|
298
|
+
body { font-family: system-ui, sans-serif; display: flex;
|
|
299
|
+
justify-content: center; align-items: center; height: 100vh;
|
|
300
|
+
margin: 0; background: #1a1a2e; color: #eee; }
|
|
301
|
+
.container { text-align: center; padding: 2rem; }
|
|
302
|
+
h1 { color: #4ade80; }
|
|
303
|
+
p { color: #aaa; }
|
|
304
|
+
</style>
|
|
305
|
+
</head>
|
|
306
|
+
<body>
|
|
307
|
+
<div class="container">
|
|
308
|
+
<h1>Authorization Successful</h1>
|
|
309
|
+
<p>You can close this window and return to OpenHands.</p>
|
|
310
|
+
</div>
|
|
311
|
+
<script>setTimeout(() => window.close(), 2000);</script>
|
|
312
|
+
</body>
|
|
313
|
+
</html>"""
|
|
314
|
+
|
|
315
|
+
_HTML_ERROR = """<!DOCTYPE html>
|
|
316
|
+
<html>
|
|
317
|
+
<head>
|
|
318
|
+
<title>OpenHands - Authorization Failed</title>
|
|
319
|
+
<style>
|
|
320
|
+
body { font-family: system-ui, sans-serif; display: flex;
|
|
321
|
+
justify-content: center; align-items: center; height: 100vh;
|
|
322
|
+
margin: 0; background: #1a1a2e; color: #eee; }
|
|
323
|
+
.container { text-align: center; padding: 2rem; }
|
|
324
|
+
h1 { color: #f87171; }
|
|
325
|
+
p { color: #aaa; }
|
|
326
|
+
.error { color: #fca5a5; font-family: monospace; margin-top: 1rem;
|
|
327
|
+
padding: 1rem; background: rgba(248,113,113,0.1);
|
|
328
|
+
border-radius: 0.5rem; }
|
|
329
|
+
</style>
|
|
330
|
+
</head>
|
|
331
|
+
<body>
|
|
332
|
+
<div class="container">
|
|
333
|
+
<h1>Authorization Failed</h1>
|
|
334
|
+
<p>An error occurred during authorization.</p>
|
|
335
|
+
<div class="error">{error}</div>
|
|
336
|
+
</div>
|
|
337
|
+
</body>
|
|
338
|
+
</html>"""
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
class OpenAISubscriptionAuth:
|
|
342
|
+
"""Handle OAuth authentication for OpenAI ChatGPT subscription access."""
|
|
343
|
+
|
|
344
|
+
def __init__(
|
|
345
|
+
self,
|
|
346
|
+
credential_store: CredentialStore | None = None,
|
|
347
|
+
oauth_port: int = DEFAULT_OAUTH_PORT,
|
|
348
|
+
):
|
|
349
|
+
"""Initialize the OpenAI subscription auth handler.
|
|
350
|
+
|
|
351
|
+
Args:
|
|
352
|
+
credential_store: Optional custom credential store.
|
|
353
|
+
oauth_port: Port for the local OAuth callback server.
|
|
354
|
+
"""
|
|
355
|
+
self._credential_store = credential_store or CredentialStore()
|
|
356
|
+
self._oauth_port = oauth_port
|
|
357
|
+
|
|
358
|
+
@property
|
|
359
|
+
def vendor(self) -> str:
|
|
360
|
+
"""Get the vendor name."""
|
|
361
|
+
return "openai"
|
|
362
|
+
|
|
363
|
+
def get_credentials(self) -> OAuthCredentials | None:
|
|
364
|
+
"""Get stored credentials if they exist."""
|
|
365
|
+
return self._credential_store.get(self.vendor)
|
|
366
|
+
|
|
367
|
+
def has_valid_credentials(self) -> bool:
|
|
368
|
+
"""Check if valid (non-expired) credentials exist."""
|
|
369
|
+
creds = self.get_credentials()
|
|
370
|
+
return creds is not None and not creds.is_expired()
|
|
371
|
+
|
|
372
|
+
async def refresh_if_needed(self) -> OAuthCredentials | None:
|
|
373
|
+
"""Refresh credentials if they are expired.
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Updated credentials, or None if no credentials exist.
|
|
377
|
+
|
|
378
|
+
Raises:
|
|
379
|
+
RuntimeError: If token refresh fails.
|
|
380
|
+
"""
|
|
381
|
+
creds = self.get_credentials()
|
|
382
|
+
if creds is None:
|
|
383
|
+
return None
|
|
384
|
+
|
|
385
|
+
if not creds.is_expired():
|
|
386
|
+
return creds
|
|
387
|
+
|
|
388
|
+
logger.info("Refreshing OpenAI access token")
|
|
389
|
+
tokens = await _refresh_access_token(creds.refresh_token)
|
|
390
|
+
updated = self._credential_store.update_tokens(
|
|
391
|
+
vendor=self.vendor,
|
|
392
|
+
access_token=tokens["access_token"],
|
|
393
|
+
refresh_token=tokens.get("refresh_token"),
|
|
394
|
+
expires_in=tokens.get("expires_in", 3600),
|
|
395
|
+
)
|
|
396
|
+
return updated
|
|
397
|
+
|
|
398
|
+
async def login(self, open_browser: bool = True) -> OAuthCredentials:
|
|
399
|
+
"""Perform OAuth login flow.
|
|
400
|
+
|
|
401
|
+
This starts a local HTTP server to handle the OAuth callback,
|
|
402
|
+
opens the browser for user authentication, and waits for the
|
|
403
|
+
callback with the authorization code.
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
open_browser: Whether to automatically open the browser.
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
The obtained OAuth credentials.
|
|
410
|
+
|
|
411
|
+
Raises:
|
|
412
|
+
RuntimeError: If the OAuth flow fails or times out.
|
|
413
|
+
"""
|
|
414
|
+
code_verifier, code_challenge = _generate_pkce()
|
|
415
|
+
state = generate_token(32)
|
|
416
|
+
redirect_uri = f"http://localhost:{self._oauth_port}/auth/callback"
|
|
417
|
+
auth_url = _build_authorize_url(redirect_uri, code_challenge, state)
|
|
418
|
+
|
|
419
|
+
# Future to receive callback result
|
|
420
|
+
callback_future: asyncio.Future[dict[str, Any]] = asyncio.Future()
|
|
421
|
+
|
|
422
|
+
# Create aiohttp app for callback
|
|
423
|
+
app = web.Application()
|
|
424
|
+
|
|
425
|
+
async def handle_callback(request: web.Request) -> web.Response:
|
|
426
|
+
params = request.query
|
|
427
|
+
|
|
428
|
+
if "error" in params:
|
|
429
|
+
error_msg = params.get("error_description", params["error"])
|
|
430
|
+
if not callback_future.done():
|
|
431
|
+
callback_future.set_exception(RuntimeError(error_msg))
|
|
432
|
+
return web.Response(
|
|
433
|
+
text=_HTML_ERROR.format(error=error_msg),
|
|
434
|
+
content_type="text/html",
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
code = params.get("code")
|
|
438
|
+
if not code:
|
|
439
|
+
error_msg = "Missing authorization code"
|
|
440
|
+
if not callback_future.done():
|
|
441
|
+
callback_future.set_exception(RuntimeError(error_msg))
|
|
442
|
+
return web.Response(
|
|
443
|
+
text=_HTML_ERROR.format(error=error_msg),
|
|
444
|
+
content_type="text/html",
|
|
445
|
+
status=400,
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
if params.get("state") != state:
|
|
449
|
+
error_msg = "Invalid state - potential CSRF attack"
|
|
450
|
+
if not callback_future.done():
|
|
451
|
+
callback_future.set_exception(RuntimeError(error_msg))
|
|
452
|
+
return web.Response(
|
|
453
|
+
text=_HTML_ERROR.format(error=error_msg),
|
|
454
|
+
content_type="text/html",
|
|
455
|
+
status=400,
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
try:
|
|
459
|
+
tokens = await _exchange_code_for_tokens(
|
|
460
|
+
code, redirect_uri, code_verifier
|
|
461
|
+
)
|
|
462
|
+
if not callback_future.done():
|
|
463
|
+
callback_future.set_result(tokens)
|
|
464
|
+
return web.Response(text=_HTML_SUCCESS, content_type="text/html")
|
|
465
|
+
except Exception as e:
|
|
466
|
+
if not callback_future.done():
|
|
467
|
+
callback_future.set_exception(e)
|
|
468
|
+
return web.Response(
|
|
469
|
+
text=_HTML_ERROR.format(error=str(e)),
|
|
470
|
+
content_type="text/html",
|
|
471
|
+
status=500,
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
app.router.add_get("/auth/callback", handle_callback)
|
|
475
|
+
|
|
476
|
+
runner = web.AppRunner(app)
|
|
477
|
+
await runner.setup()
|
|
478
|
+
site = web.TCPSite(runner, "localhost", self._oauth_port)
|
|
479
|
+
|
|
480
|
+
try:
|
|
481
|
+
try:
|
|
482
|
+
await site.start()
|
|
483
|
+
except OSError as exc:
|
|
484
|
+
if "address already in use" in str(exc).lower():
|
|
485
|
+
raise RuntimeError(
|
|
486
|
+
"OAuth callback server port "
|
|
487
|
+
f"{self._oauth_port} is already in use. "
|
|
488
|
+
"Please free the port or set a different one via "
|
|
489
|
+
"OPENHANDS_OAUTH_PORT."
|
|
490
|
+
) from exc
|
|
491
|
+
raise
|
|
492
|
+
|
|
493
|
+
logger.debug(f"OAuth callback server started on port {self._oauth_port}")
|
|
494
|
+
|
|
495
|
+
if open_browser:
|
|
496
|
+
logger.info("Opening browser for OpenAI authentication...")
|
|
497
|
+
webbrowser.open(auth_url)
|
|
498
|
+
else:
|
|
499
|
+
logger.info(
|
|
500
|
+
f"Please open the following URL in your browser:\n{auth_url}"
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
try:
|
|
504
|
+
tokens = await asyncio.wait_for(
|
|
505
|
+
callback_future, timeout=OAUTH_TIMEOUT_SECONDS
|
|
506
|
+
)
|
|
507
|
+
except TimeoutError:
|
|
508
|
+
raise RuntimeError(
|
|
509
|
+
"OAuth callback timeout - authorization took too long"
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
expires_at = int(time.time() * 1000) + (
|
|
513
|
+
tokens.get("expires_in", 3600) * 1000
|
|
514
|
+
)
|
|
515
|
+
credentials = OAuthCredentials(
|
|
516
|
+
vendor=self.vendor,
|
|
517
|
+
access_token=tokens["access_token"],
|
|
518
|
+
refresh_token=tokens["refresh_token"],
|
|
519
|
+
expires_at=expires_at,
|
|
520
|
+
)
|
|
521
|
+
self._credential_store.save(credentials)
|
|
522
|
+
logger.info("OpenAI OAuth login successful")
|
|
523
|
+
return credentials
|
|
524
|
+
|
|
525
|
+
finally:
|
|
526
|
+
await runner.cleanup()
|
|
527
|
+
|
|
528
|
+
def logout(self) -> bool:
|
|
529
|
+
"""Remove stored credentials.
|
|
530
|
+
|
|
531
|
+
Returns:
|
|
532
|
+
True if credentials were removed, False if none existed.
|
|
533
|
+
"""
|
|
534
|
+
return self._credential_store.delete(self.vendor)
|
|
535
|
+
|
|
536
|
+
def create_llm(
|
|
537
|
+
self,
|
|
538
|
+
model: str = "gpt-5.2-codex",
|
|
539
|
+
credentials: OAuthCredentials | None = None,
|
|
540
|
+
instructions: str | None = None,
|
|
541
|
+
**llm_kwargs: Any,
|
|
542
|
+
) -> LLM:
|
|
543
|
+
"""Create an LLM instance configured for Codex subscription access.
|
|
544
|
+
|
|
545
|
+
Args:
|
|
546
|
+
model: The model to use (must be in OPENAI_CODEX_MODELS).
|
|
547
|
+
credentials: OAuth credentials to use. If None, uses stored credentials.
|
|
548
|
+
instructions: Optional instructions for the Codex model.
|
|
549
|
+
**llm_kwargs: Additional arguments to pass to LLM constructor.
|
|
550
|
+
|
|
551
|
+
Returns:
|
|
552
|
+
An LLM instance configured for Codex access.
|
|
553
|
+
|
|
554
|
+
Raises:
|
|
555
|
+
ValueError: If the model is not supported or no credentials available.
|
|
556
|
+
"""
|
|
557
|
+
from openhands.sdk.llm.llm import LLM
|
|
558
|
+
|
|
559
|
+
if model not in OPENAI_CODEX_MODELS:
|
|
560
|
+
raise ValueError(
|
|
561
|
+
f"Model '{model}' is not supported for subscription access. "
|
|
562
|
+
f"Supported models: {', '.join(sorted(OPENAI_CODEX_MODELS))}"
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
creds = credentials or self.get_credentials()
|
|
566
|
+
if creds is None:
|
|
567
|
+
raise ValueError(
|
|
568
|
+
"No credentials available. Call login() first or provide credentials."
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
account_id = _extract_chatgpt_account_id(creds.access_token)
|
|
572
|
+
if not account_id:
|
|
573
|
+
logger.warning(
|
|
574
|
+
"Could not extract chatgpt_account_id from access token. "
|
|
575
|
+
"API requests may fail."
|
|
576
|
+
)
|
|
577
|
+
|
|
578
|
+
# Build extra_body with Codex-specific params
|
|
579
|
+
extra_body: dict[str, Any] = {"store": False}
|
|
580
|
+
if instructions:
|
|
581
|
+
extra_body["instructions"] = instructions
|
|
582
|
+
if "litellm_extra_body" in llm_kwargs:
|
|
583
|
+
extra_body.update(llm_kwargs.pop("litellm_extra_body"))
|
|
584
|
+
|
|
585
|
+
# Build headers matching OpenAI's official Codex CLI
|
|
586
|
+
extra_headers: dict[str, str] = {
|
|
587
|
+
"originator": "codex_cli_rs",
|
|
588
|
+
"OpenAI-Beta": "responses=experimental",
|
|
589
|
+
"User-Agent": f"openhands-sdk ({platform.system()}; {platform.machine()})",
|
|
590
|
+
}
|
|
591
|
+
if account_id:
|
|
592
|
+
extra_headers["chatgpt-account-id"] = account_id
|
|
593
|
+
|
|
594
|
+
# Codex API requires streaming and doesn't support temperature/max_output_tokens
|
|
595
|
+
llm = LLM(
|
|
596
|
+
model=f"openai/{model}",
|
|
597
|
+
base_url=CODEX_API_ENDPOINT.rsplit("/", 1)[0],
|
|
598
|
+
api_key=creds.access_token,
|
|
599
|
+
extra_headers=extra_headers,
|
|
600
|
+
litellm_extra_body=extra_body,
|
|
601
|
+
temperature=None,
|
|
602
|
+
max_output_tokens=None,
|
|
603
|
+
stream=True,
|
|
604
|
+
**llm_kwargs,
|
|
605
|
+
)
|
|
606
|
+
llm._is_subscription = True
|
|
607
|
+
# Ensure these stay None even if model info tried to set them
|
|
608
|
+
llm.max_output_tokens = None
|
|
609
|
+
llm.temperature = None
|
|
610
|
+
return llm
|
|
611
|
+
|
|
612
|
+
|
|
613
|
+
async def subscription_login_async(
|
|
614
|
+
vendor: SupportedVendor = "openai",
|
|
615
|
+
model: str = "gpt-5.2-codex",
|
|
616
|
+
force_login: bool = False,
|
|
617
|
+
open_browser: bool = True,
|
|
618
|
+
skip_consent: bool = False,
|
|
619
|
+
**llm_kwargs: Any,
|
|
620
|
+
) -> LLM:
|
|
621
|
+
"""Authenticate with a subscription and return an LLM instance.
|
|
622
|
+
|
|
623
|
+
This is the main entry point for subscription-based LLM access.
|
|
624
|
+
It handles credential caching, token refresh, and login flow.
|
|
625
|
+
|
|
626
|
+
Args:
|
|
627
|
+
vendor: The vendor/provider (currently only "openai" is supported).
|
|
628
|
+
model: The model to use.
|
|
629
|
+
force_login: If True, always perform a fresh login.
|
|
630
|
+
open_browser: Whether to automatically open the browser for login.
|
|
631
|
+
skip_consent: If True, skip the consent prompt (for programmatic use
|
|
632
|
+
where consent has been obtained through other means).
|
|
633
|
+
**llm_kwargs: Additional arguments to pass to LLM constructor.
|
|
634
|
+
|
|
635
|
+
Returns:
|
|
636
|
+
An LLM instance configured for subscription access.
|
|
637
|
+
|
|
638
|
+
Raises:
|
|
639
|
+
ValueError: If the vendor is not supported.
|
|
640
|
+
RuntimeError: If authentication fails or user declines consent.
|
|
641
|
+
|
|
642
|
+
Example:
|
|
643
|
+
>>> import asyncio
|
|
644
|
+
>>> from openhands.sdk.llm.auth import subscription_login_async
|
|
645
|
+
>>> llm = asyncio.run(subscription_login_async(model="gpt-5.2-codex"))
|
|
646
|
+
"""
|
|
647
|
+
if vendor != "openai":
|
|
648
|
+
raise ValueError(
|
|
649
|
+
f"Vendor '{vendor}' is not supported. Only 'openai' is supported."
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
auth = OpenAISubscriptionAuth()
|
|
653
|
+
|
|
654
|
+
# Check for existing valid credentials
|
|
655
|
+
if not force_login:
|
|
656
|
+
creds = await auth.refresh_if_needed()
|
|
657
|
+
if creds is not None:
|
|
658
|
+
logger.info("Using existing OpenAI credentials")
|
|
659
|
+
return auth.create_llm(model=model, credentials=creds, **llm_kwargs)
|
|
660
|
+
|
|
661
|
+
# Display consent banner and get confirmation before login
|
|
662
|
+
if not skip_consent:
|
|
663
|
+
if not _display_consent_and_confirm():
|
|
664
|
+
raise RuntimeError("User declined to continue with ChatGPT sign-in")
|
|
665
|
+
|
|
666
|
+
# Perform login
|
|
667
|
+
creds = await auth.login(open_browser=open_browser)
|
|
668
|
+
return auth.create_llm(model=model, credentials=creds, **llm_kwargs)
|
|
669
|
+
|
|
670
|
+
|
|
671
|
+
def subscription_login(
|
|
672
|
+
vendor: SupportedVendor = "openai",
|
|
673
|
+
model: str = "gpt-5.2-codex",
|
|
674
|
+
force_login: bool = False,
|
|
675
|
+
open_browser: bool = True,
|
|
676
|
+
skip_consent: bool = False,
|
|
677
|
+
**llm_kwargs: Any,
|
|
678
|
+
) -> LLM:
|
|
679
|
+
"""Synchronous wrapper for subscription_login_async.
|
|
680
|
+
|
|
681
|
+
See subscription_login_async for full documentation.
|
|
682
|
+
"""
|
|
683
|
+
return asyncio.run(
|
|
684
|
+
subscription_login_async(
|
|
685
|
+
vendor=vendor,
|
|
686
|
+
model=model,
|
|
687
|
+
force_login=force_login,
|
|
688
|
+
open_browser=open_browser,
|
|
689
|
+
skip_consent=skip_consent,
|
|
690
|
+
**llm_kwargs,
|
|
691
|
+
)
|
|
692
|
+
)
|
|
693
|
+
|
|
694
|
+
|
|
695
|
+
# =========================================================================
|
|
696
|
+
# Message transformation utilities for subscription mode
|
|
697
|
+
# =========================================================================
|
|
698
|
+
|
|
699
|
+
DEFAULT_SYSTEM_MESSAGE = (
|
|
700
|
+
"You are OpenHands agent, a helpful AI assistant that can interact "
|
|
701
|
+
"with a computer to solve tasks."
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
def inject_system_prefix(
|
|
706
|
+
input_items: list[dict[str, Any]], prefix_content: dict[str, Any]
|
|
707
|
+
) -> None:
|
|
708
|
+
"""Inject system prefix into the first user message, or create one.
|
|
709
|
+
|
|
710
|
+
This modifies input_items in place.
|
|
711
|
+
|
|
712
|
+
Args:
|
|
713
|
+
input_items: List of input items (messages) to modify.
|
|
714
|
+
prefix_content: The content dict to prepend
|
|
715
|
+
(e.g., {"type": "input_text", "text": "..."}).
|
|
716
|
+
"""
|
|
717
|
+
for item in input_items:
|
|
718
|
+
if item.get("type") == "message" and item.get("role") == "user":
|
|
719
|
+
content = item.get("content")
|
|
720
|
+
if not isinstance(content, list):
|
|
721
|
+
content = [content] if content else []
|
|
722
|
+
item["content"] = [prefix_content] + content
|
|
723
|
+
return
|
|
724
|
+
|
|
725
|
+
# No user message found, create a synthetic one
|
|
726
|
+
input_items.insert(0, {"role": "user", "content": [prefix_content]})
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
def transform_for_subscription(
|
|
730
|
+
system_chunks: list[str], input_items: list[dict[str, Any]]
|
|
731
|
+
) -> tuple[str, list[dict[str, Any]]]:
|
|
732
|
+
"""Transform messages for Codex subscription transport.
|
|
733
|
+
|
|
734
|
+
Codex subscription endpoints reject complex/long `instructions`, so we:
|
|
735
|
+
1. Use a minimal default instruction string
|
|
736
|
+
2. Prepend system prompts to the first user message
|
|
737
|
+
3. Normalize message format to match OpenCode's Codex client
|
|
738
|
+
|
|
739
|
+
Args:
|
|
740
|
+
system_chunks: List of system prompt strings to merge.
|
|
741
|
+
input_items: List of input items (messages) to transform.
|
|
742
|
+
|
|
743
|
+
Returns:
|
|
744
|
+
A tuple of (instructions, normalized_input_items).
|
|
745
|
+
"""
|
|
746
|
+
# Prepend system prompts to first user message
|
|
747
|
+
if system_chunks:
|
|
748
|
+
merged = "\n\n---\n\n".join(system_chunks)
|
|
749
|
+
prefix_content = {
|
|
750
|
+
"type": "input_text",
|
|
751
|
+
"text": f"Context (system prompt):\n{merged}\n\n",
|
|
752
|
+
}
|
|
753
|
+
inject_system_prefix(input_items, prefix_content)
|
|
754
|
+
|
|
755
|
+
# Normalize: {"type": "message", ...} -> {"role": ..., "content": ...}
|
|
756
|
+
normalized = [
|
|
757
|
+
{"role": item.get("role"), "content": item.get("content") or []}
|
|
758
|
+
if item.get("type") == "message"
|
|
759
|
+
else item
|
|
760
|
+
for item in input_items
|
|
761
|
+
]
|
|
762
|
+
return DEFAULT_SYSTEM_MESSAGE, normalized
|