@wangxt0223/codex-switcher 0.5.1 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/README.en.md +13 -15
- package/README.md +17 -42
- package/docs/macos-manual-checklist.md +8 -7
- package/docs/upgrade.md +1 -1
- package/package.json +7 -3
- package/plugins/codex-switcher/README.en.md +55 -82
- package/plugins/codex-switcher/README.md +53 -80
- package/plugins/codex-switcher/scripts/codex-sw +13 -1
- package/plugins/codex-switcher/scripts/codex-switcher +1147 -413
- package/plugins/codex-switcher/scripts/profile-metrics.py +391 -0
- package/plugins/codex-switcher/scripts/test-switcher.sh +116 -108
|
@@ -0,0 +1,391 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import argparse
|
|
3
|
+
import base64
|
|
4
|
+
import datetime as dt
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
import subprocess
|
|
8
|
+
import sys
|
|
9
|
+
import time
|
|
10
|
+
from typing import Any, Dict, Optional
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
USAGE_ENDPOINT = "https://chatgpt.com/backend-api/wham/usage"
|
|
14
|
+
DEFAULT_USER_AGENT = (
|
|
15
|
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
|
|
16
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
|
|
17
|
+
)
|
|
18
|
+
DASH = "-"
|
|
19
|
+
KNOWN_PLANS = {"free", "plus", "pro", "team", "business", "enterprise", "edu"}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def parse_args() -> argparse.Namespace:
|
|
23
|
+
parser = argparse.ArgumentParser(description="Collect account usage metrics for codex-switcher list output.")
|
|
24
|
+
parser.add_argument("--account-name", required=True)
|
|
25
|
+
parser.add_argument("--auth-file", required=True)
|
|
26
|
+
parser.add_argument("--data-path", required=True)
|
|
27
|
+
parser.add_argument("--timeout-seconds", type=int, default=4)
|
|
28
|
+
return parser.parse_args()
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def clamp_percent(value: float) -> int:
|
|
32
|
+
return int(round(max(0.0, min(100.0, value))))
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def sanitize_field(value: str) -> str:
|
|
36
|
+
return value.replace("\t", " ").replace("\n", " ").strip()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def decode_jwt_payload(token: str) -> Dict[str, Any]:
|
|
40
|
+
if not token or "." not in token:
|
|
41
|
+
return {}
|
|
42
|
+
parts = token.split(".")
|
|
43
|
+
if len(parts) < 2:
|
|
44
|
+
return {}
|
|
45
|
+
payload = parts[1]
|
|
46
|
+
padding = "=" * ((4 - len(payload) % 4) % 4)
|
|
47
|
+
try:
|
|
48
|
+
raw = base64.urlsafe_b64decode(payload + padding)
|
|
49
|
+
decoded = json.loads(raw.decode("utf-8"))
|
|
50
|
+
if isinstance(decoded, dict):
|
|
51
|
+
return decoded
|
|
52
|
+
except Exception:
|
|
53
|
+
return {}
|
|
54
|
+
return {}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def parse_timestamp(value: Any) -> Optional[int]:
|
|
58
|
+
if value is None:
|
|
59
|
+
return None
|
|
60
|
+
if isinstance(value, bool):
|
|
61
|
+
return None
|
|
62
|
+
if isinstance(value, (int, float)):
|
|
63
|
+
timestamp = int(value)
|
|
64
|
+
if timestamp > 10_000_000_000:
|
|
65
|
+
timestamp = int(timestamp / 1000)
|
|
66
|
+
return timestamp
|
|
67
|
+
if isinstance(value, str):
|
|
68
|
+
s = value.strip()
|
|
69
|
+
if not s:
|
|
70
|
+
return None
|
|
71
|
+
if s.isdigit():
|
|
72
|
+
return parse_timestamp(int(s))
|
|
73
|
+
if s.endswith("Z"):
|
|
74
|
+
s = s[:-1] + "+00:00"
|
|
75
|
+
try:
|
|
76
|
+
parsed = dt.datetime.fromisoformat(s)
|
|
77
|
+
return int(parsed.timestamp())
|
|
78
|
+
except Exception:
|
|
79
|
+
return None
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def normalize_plan(plan: Any) -> str:
|
|
84
|
+
if not isinstance(plan, str):
|
|
85
|
+
return "unknown"
|
|
86
|
+
value = plan.strip().lower()
|
|
87
|
+
if not value:
|
|
88
|
+
return "unknown"
|
|
89
|
+
if value in KNOWN_PLANS:
|
|
90
|
+
return value
|
|
91
|
+
if value.startswith("chatgpt_"):
|
|
92
|
+
trimmed = value[len("chatgpt_") :]
|
|
93
|
+
if trimmed in KNOWN_PLANS:
|
|
94
|
+
return trimmed
|
|
95
|
+
return "unknown"
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def load_json(path: str) -> Dict[str, Any]:
|
|
99
|
+
try:
|
|
100
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
101
|
+
obj = json.load(f)
|
|
102
|
+
if isinstance(obj, dict):
|
|
103
|
+
return obj
|
|
104
|
+
except Exception:
|
|
105
|
+
return {}
|
|
106
|
+
return {}
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def parse_window(window: Any) -> Optional[Dict[str, Any]]:
|
|
110
|
+
if not isinstance(window, dict):
|
|
111
|
+
return None
|
|
112
|
+
|
|
113
|
+
used = window.get("used_percent")
|
|
114
|
+
if not isinstance(used, (int, float)):
|
|
115
|
+
return None
|
|
116
|
+
|
|
117
|
+
minutes = window.get("window_minutes")
|
|
118
|
+
if not isinstance(minutes, (int, float)):
|
|
119
|
+
seconds = window.get("limit_window_seconds")
|
|
120
|
+
if isinstance(seconds, (int, float)) and seconds > 0:
|
|
121
|
+
minutes = int(round(float(seconds) / 60.0))
|
|
122
|
+
if not isinstance(minutes, (int, float)):
|
|
123
|
+
return None
|
|
124
|
+
|
|
125
|
+
reset_epoch = parse_timestamp(
|
|
126
|
+
window.get("resets_at")
|
|
127
|
+
if "resets_at" in window
|
|
128
|
+
else window.get("reset_at")
|
|
129
|
+
)
|
|
130
|
+
return {
|
|
131
|
+
"minutes": int(minutes),
|
|
132
|
+
"remaining_percent": 100.0 - float(used),
|
|
133
|
+
"reset_epoch": reset_epoch,
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def pick_window(windows: Dict[int, Dict[str, Any]], target: int) -> Optional[Dict[str, Any]]:
|
|
138
|
+
if target in windows:
|
|
139
|
+
return windows[target]
|
|
140
|
+
if not windows:
|
|
141
|
+
return None
|
|
142
|
+
nearest = min(windows.keys(), key=lambda m: abs(m - target))
|
|
143
|
+
if target == 300 and abs(nearest - target) <= 30:
|
|
144
|
+
return windows[nearest]
|
|
145
|
+
if target == 10080 and abs(nearest - target) <= 720:
|
|
146
|
+
return windows[nearest]
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def extract_windows_from_usage_blob(blob: Dict[str, Any]) -> Dict[int, Dict[str, Any]]:
|
|
151
|
+
windows: Dict[int, Dict[str, Any]] = {}
|
|
152
|
+
candidates = []
|
|
153
|
+
for key in ("rate_limit", "rate_limits"):
|
|
154
|
+
value = blob.get(key)
|
|
155
|
+
if isinstance(value, dict):
|
|
156
|
+
candidates.append(value)
|
|
157
|
+
candidates.append(blob)
|
|
158
|
+
|
|
159
|
+
for container in candidates:
|
|
160
|
+
for key in ("primary_window", "secondary_window", "primary", "secondary"):
|
|
161
|
+
parsed = parse_window(container.get(key))
|
|
162
|
+
if parsed:
|
|
163
|
+
windows[parsed["minutes"]] = parsed
|
|
164
|
+
if isinstance(container.get("windows"), list):
|
|
165
|
+
for item in container["windows"]:
|
|
166
|
+
parsed = parse_window(item)
|
|
167
|
+
if parsed:
|
|
168
|
+
windows[parsed["minutes"]] = parsed
|
|
169
|
+
return windows
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def request_usage(access_token: str, account_id: str, timeout_seconds: int) -> Optional[Dict[str, Any]]:
|
|
173
|
+
if not access_token:
|
|
174
|
+
return None
|
|
175
|
+
cmd = [
|
|
176
|
+
"curl",
|
|
177
|
+
"-fsS",
|
|
178
|
+
"--connect-timeout",
|
|
179
|
+
str(max(1, timeout_seconds)),
|
|
180
|
+
"--max-time",
|
|
181
|
+
str(max(2, timeout_seconds + 2)),
|
|
182
|
+
"-H",
|
|
183
|
+
f"Authorization: Bearer {access_token}",
|
|
184
|
+
"-H",
|
|
185
|
+
"Accept: application/json",
|
|
186
|
+
"-H",
|
|
187
|
+
f"User-Agent: {DEFAULT_USER_AGENT}",
|
|
188
|
+
]
|
|
189
|
+
if account_id:
|
|
190
|
+
cmd.extend(["-H", f"ChatGPT-Account-Id: {account_id}"])
|
|
191
|
+
cmd.append(USAGE_ENDPOINT)
|
|
192
|
+
try:
|
|
193
|
+
result = subprocess.run(
|
|
194
|
+
cmd,
|
|
195
|
+
check=False,
|
|
196
|
+
stdout=subprocess.PIPE,
|
|
197
|
+
stderr=subprocess.PIPE,
|
|
198
|
+
text=True,
|
|
199
|
+
)
|
|
200
|
+
except Exception:
|
|
201
|
+
return None
|
|
202
|
+
if result.returncode != 0 or not result.stdout.strip():
|
|
203
|
+
return None
|
|
204
|
+
try:
|
|
205
|
+
payload = json.loads(result.stdout)
|
|
206
|
+
if isinstance(payload, dict):
|
|
207
|
+
return payload
|
|
208
|
+
except Exception:
|
|
209
|
+
return None
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def format_usage(window: Optional[Dict[str, Any]]) -> str:
|
|
214
|
+
if not window:
|
|
215
|
+
return DASH
|
|
216
|
+
percent = clamp_percent(float(window["remaining_percent"]))
|
|
217
|
+
reset_epoch = window.get("reset_epoch")
|
|
218
|
+
if isinstance(reset_epoch, int):
|
|
219
|
+
reset_text = dt.datetime.fromtimestamp(reset_epoch).strftime("%H:%M")
|
|
220
|
+
return f"{percent}% ({reset_text})"
|
|
221
|
+
return f"{percent}%"
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def format_relative(last_epoch: Optional[int], now_epoch: int) -> str:
|
|
225
|
+
if not isinstance(last_epoch, int):
|
|
226
|
+
return DASH
|
|
227
|
+
delta = max(0, now_epoch - last_epoch)
|
|
228
|
+
if delta < 60:
|
|
229
|
+
return "just now"
|
|
230
|
+
if delta < 3600:
|
|
231
|
+
return f"{delta // 60}m ago"
|
|
232
|
+
if delta < 86400:
|
|
233
|
+
return f"{delta // 3600}h ago"
|
|
234
|
+
return f"{delta // 86400}d ago"
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def collect_local_metrics(data_path: str) -> Dict[str, Any]:
|
|
238
|
+
sessions_dir = os.path.join(data_path, "sessions")
|
|
239
|
+
if not os.path.isdir(sessions_dir):
|
|
240
|
+
return {"source": "local"}
|
|
241
|
+
|
|
242
|
+
best: Optional[Dict[str, Any]] = None
|
|
243
|
+
for root, _, files in os.walk(sessions_dir):
|
|
244
|
+
for file_name in files:
|
|
245
|
+
if not file_name.endswith(".jsonl"):
|
|
246
|
+
continue
|
|
247
|
+
file_path = os.path.join(root, file_name)
|
|
248
|
+
try:
|
|
249
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
250
|
+
for line in f:
|
|
251
|
+
if '"rate_limits"' not in line:
|
|
252
|
+
continue
|
|
253
|
+
obj = json.loads(line)
|
|
254
|
+
if not isinstance(obj, dict):
|
|
255
|
+
continue
|
|
256
|
+
payload = obj.get("payload")
|
|
257
|
+
if not isinstance(payload, dict):
|
|
258
|
+
continue
|
|
259
|
+
rate_limits = payload.get("rate_limits")
|
|
260
|
+
if not isinstance(rate_limits, dict):
|
|
261
|
+
continue
|
|
262
|
+
|
|
263
|
+
windows = extract_windows_from_usage_blob({"rate_limits": rate_limits})
|
|
264
|
+
if not windows:
|
|
265
|
+
continue
|
|
266
|
+
|
|
267
|
+
timestamp = parse_timestamp(obj.get("timestamp")) or parse_timestamp(payload.get("timestamp"))
|
|
268
|
+
plan = normalize_plan(rate_limits.get("plan_type"))
|
|
269
|
+
candidate = {
|
|
270
|
+
"windows": windows,
|
|
271
|
+
"plan_type": plan,
|
|
272
|
+
"last_activity_epoch": timestamp,
|
|
273
|
+
"source": "local",
|
|
274
|
+
}
|
|
275
|
+
if best is None:
|
|
276
|
+
best = candidate
|
|
277
|
+
else:
|
|
278
|
+
prev_ts = best.get("last_activity_epoch")
|
|
279
|
+
if isinstance(timestamp, int) and (not isinstance(prev_ts, int) or timestamp > prev_ts):
|
|
280
|
+
best = candidate
|
|
281
|
+
except Exception:
|
|
282
|
+
continue
|
|
283
|
+
|
|
284
|
+
if best:
|
|
285
|
+
return best
|
|
286
|
+
return {"source": "local"}
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def collect_api_metrics(access_token: str, account_id: str, timeout_seconds: int) -> Optional[Dict[str, Any]]:
|
|
290
|
+
payload = request_usage(access_token, account_id, timeout_seconds)
|
|
291
|
+
if not payload:
|
|
292
|
+
return None
|
|
293
|
+
|
|
294
|
+
windows = extract_windows_from_usage_blob(payload)
|
|
295
|
+
nested_plan = None
|
|
296
|
+
for key in ("rate_limit", "rate_limits"):
|
|
297
|
+
value = payload.get(key)
|
|
298
|
+
if isinstance(value, dict):
|
|
299
|
+
nested_plan = value.get("plan_type")
|
|
300
|
+
if nested_plan:
|
|
301
|
+
break
|
|
302
|
+
plan_type = normalize_plan(payload.get("plan_type") or nested_plan)
|
|
303
|
+
last_activity = parse_timestamp(
|
|
304
|
+
payload.get("last_activity_at")
|
|
305
|
+
or payload.get("last_activity")
|
|
306
|
+
or payload.get("updated_at")
|
|
307
|
+
or payload.get("timestamp")
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
if not windows and plan_type == "unknown" and last_activity is None:
|
|
311
|
+
return None
|
|
312
|
+
return {
|
|
313
|
+
"windows": windows,
|
|
314
|
+
"plan_type": plan_type,
|
|
315
|
+
"last_activity_epoch": last_activity,
|
|
316
|
+
"source": "api",
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def main() -> int:
|
|
321
|
+
args = parse_args()
|
|
322
|
+
now_epoch = int(time.time())
|
|
323
|
+
auth_data = load_json(args.auth_file)
|
|
324
|
+
tokens = auth_data.get("tokens")
|
|
325
|
+
if not isinstance(tokens, dict):
|
|
326
|
+
tokens = {}
|
|
327
|
+
|
|
328
|
+
access_token = tokens.get("access_token")
|
|
329
|
+
if not isinstance(access_token, str):
|
|
330
|
+
access_token = ""
|
|
331
|
+
account_id = tokens.get("account_id")
|
|
332
|
+
if not isinstance(account_id, str):
|
|
333
|
+
account_id = ""
|
|
334
|
+
id_token = tokens.get("id_token")
|
|
335
|
+
if not isinstance(id_token, str):
|
|
336
|
+
id_token = ""
|
|
337
|
+
|
|
338
|
+
claims = decode_jwt_payload(id_token)
|
|
339
|
+
email = claims.get("email")
|
|
340
|
+
if not isinstance(email, str):
|
|
341
|
+
email = ""
|
|
342
|
+
|
|
343
|
+
plan_from_claims = normalize_plan(claims.get("chatgpt_plan_type") or claims.get("plan_type"))
|
|
344
|
+
if plan_from_claims == "unknown":
|
|
345
|
+
plan_from_claims = normalize_plan(auth_data.get("chatgpt_plan_type") or auth_data.get("plan_type"))
|
|
346
|
+
|
|
347
|
+
api_metrics = collect_api_metrics(access_token, account_id, args.timeout_seconds)
|
|
348
|
+
metrics = api_metrics if api_metrics is not None else collect_local_metrics(args.data_path)
|
|
349
|
+
|
|
350
|
+
windows = metrics.get("windows")
|
|
351
|
+
if not isinstance(windows, dict):
|
|
352
|
+
windows = {}
|
|
353
|
+
window_5h = pick_window(windows, 300)
|
|
354
|
+
window_week = pick_window(windows, 10080)
|
|
355
|
+
|
|
356
|
+
usage_5h = format_usage(window_5h)
|
|
357
|
+
usage_weekly = format_usage(window_week)
|
|
358
|
+
plan = normalize_plan(metrics.get("plan_type"))
|
|
359
|
+
if plan == "unknown":
|
|
360
|
+
plan = plan_from_claims
|
|
361
|
+
if plan == "unknown":
|
|
362
|
+
plan = "unknown"
|
|
363
|
+
|
|
364
|
+
source = metrics.get("source")
|
|
365
|
+
if source not in ("api", "local"):
|
|
366
|
+
source = "local"
|
|
367
|
+
|
|
368
|
+
last_activity_epoch = metrics.get("last_activity_epoch")
|
|
369
|
+
if not isinstance(last_activity_epoch, int):
|
|
370
|
+
last_activity_epoch = None
|
|
371
|
+
last_activity = format_relative(last_activity_epoch, now_epoch)
|
|
372
|
+
|
|
373
|
+
if email:
|
|
374
|
+
display_email = f"({args.account_name}){email}"
|
|
375
|
+
else:
|
|
376
|
+
display_email = f"({args.account_name})-"
|
|
377
|
+
|
|
378
|
+
out_fields = [
|
|
379
|
+
display_email,
|
|
380
|
+
plan,
|
|
381
|
+
usage_5h,
|
|
382
|
+
usage_weekly,
|
|
383
|
+
last_activity,
|
|
384
|
+
source,
|
|
385
|
+
]
|
|
386
|
+
print("\t".join(sanitize_field(str(x)) for x in out_fields))
|
|
387
|
+
return 0
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
if __name__ == "__main__":
|
|
391
|
+
sys.exit(main())
|
|
@@ -3,14 +3,18 @@ set -euo pipefail
|
|
|
3
3
|
|
|
4
4
|
ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
|
5
5
|
SW="$ROOT/scripts/codex-sw"
|
|
6
|
+
SW_LINK=""
|
|
6
7
|
|
|
7
8
|
bash -n "$SW"
|
|
8
9
|
|
|
9
10
|
TMPBASE="$(mktemp -d /tmp/codex-switcher-test.XXXXXX)"
|
|
10
11
|
STATE="$TMPBASE/state"
|
|
11
|
-
|
|
12
|
+
ENVS="$TMPBASE/envs"
|
|
13
|
+
DEFAULT_HOME="$TMPBASE/default-home"
|
|
12
14
|
BIN="$TMPBASE/bin"
|
|
13
|
-
mkdir -p "$BIN"
|
|
15
|
+
mkdir -p "$BIN" "$DEFAULT_HOME"
|
|
16
|
+
SW_LINK="$BIN/codex-sw-link"
|
|
17
|
+
ln -s "$SW" "$SW_LINK"
|
|
14
18
|
|
|
15
19
|
cleanup() {
|
|
16
20
|
pkill -f "$BIN/fake-codex-app" >/dev/null 2>&1 || true
|
|
@@ -32,7 +36,7 @@ if [[ "${1:-}" == "login" && "${2:-}" == "status" ]]; then
|
|
|
32
36
|
fi
|
|
33
37
|
if [[ "${1:-}" == "login" ]]; then
|
|
34
38
|
mkdir -p "$CODEX_HOME"
|
|
35
|
-
echo '{"auth_mode":"
|
|
39
|
+
echo '{"auth_mode":"chatgpt","tokens":{"access_token":"fake-access","id_token":"fake.jwt.sig"}}' > "$CODEX_HOME/auth.json"
|
|
36
40
|
exit 0
|
|
37
41
|
fi
|
|
38
42
|
if [[ "${1:-}" == "logout" ]]; then
|
|
@@ -58,113 +62,134 @@ exit 0
|
|
|
58
62
|
NPM
|
|
59
63
|
chmod +x "$BIN/npm"
|
|
60
64
|
|
|
65
|
+
cat > "$BIN/curl" <<'CURL'
|
|
66
|
+
#!/usr/bin/env bash
|
|
67
|
+
set -euo pipefail
|
|
68
|
+
mode="${CODEX_SWITCHER_TEST_CURL_MODE:-success}"
|
|
69
|
+
if [[ "$mode" == "success" ]]; then
|
|
70
|
+
cat <<'JSON'
|
|
71
|
+
{"rate_limit":{"plan_type":"plus","primary_window":{"used_percent":40,"limit_window_seconds":18000,"reset_at":"2099-01-01T06:30:00Z"},"secondary_window":{"used_percent":20,"limit_window_seconds":604800,"reset_at":"2099-01-03T08:00:00Z"}},"last_activity_at":"2099-01-01T04:30:00Z"}
|
|
72
|
+
JSON
|
|
73
|
+
exit 0
|
|
74
|
+
fi
|
|
75
|
+
echo "simulated curl failure" >&2
|
|
76
|
+
exit 22
|
|
77
|
+
CURL
|
|
78
|
+
chmod +x "$BIN/curl"
|
|
79
|
+
|
|
61
80
|
export PATH="$BIN:$PATH"
|
|
62
81
|
export CODEX_SWITCHER_STATE_DIR="$STATE"
|
|
63
|
-
export
|
|
82
|
+
export CODEX_SWITCHER_ENVS_DIR="$ENVS"
|
|
83
|
+
export CODEX_SWITCHER_ACCOUNTS_DIR="$STATE/env-accounts"
|
|
64
84
|
export CODEX_SWITCHER_APP_BIN="$BIN/fake-codex-app"
|
|
65
85
|
export CODEX_SWITCHER_LOCK_WAIT_SECONDS=2
|
|
66
|
-
export CODEX_SWITCHER_DEFAULT_HOME="$
|
|
86
|
+
export CODEX_SWITCHER_DEFAULT_HOME="$DEFAULT_HOME"
|
|
67
87
|
export CODEX_SWITCHER_TEST_NPM_LOG="$TMPBASE/npm-args.log"
|
|
68
88
|
export CODEX_SWITCHER_TEST_CODEX_LOG="$TMPBASE/codex-args.log"
|
|
89
|
+
export CODEX_SWITCHER_TEST_CURL_MODE="success"
|
|
69
90
|
: > "$CODEX_SWITCHER_TEST_CODEX_LOG"
|
|
70
91
|
|
|
71
|
-
|
|
72
|
-
echo '{"auth_mode":"chatgpt"}' > "$CODEX_SWITCHER_DEFAULT_HOME/auth.json"
|
|
73
|
-
echo '{"projects":["demo"]}' > "$CODEX_SWITCHER_DEFAULT_HOME/state_5.sqlite"
|
|
74
|
-
echo '{"memo":"persist"}' > "$CODEX_SWITCHER_DEFAULT_HOME/memories/demo.json"
|
|
92
|
+
echo '{"memo":"persist"}' > "$DEFAULT_HOME/shared.json"
|
|
75
93
|
|
|
76
94
|
check_out="$("$SW" check)"
|
|
77
95
|
echo "$check_out" | grep -Eq '^version: [0-9]+\.[0-9]+\.[0-9]+$'
|
|
78
96
|
echo "$check_out" | grep -q "check: ok"
|
|
97
|
+
link_check_out="$("$SW_LINK" check)"
|
|
98
|
+
echo "$link_check_out" | grep -Eq '^version: [0-9]+\.[0-9]+\.[0-9]+$'
|
|
99
|
+
echo "$link_check_out" | grep -q "check: ok"
|
|
79
100
|
init_out="$("$SW" init --dry-run)"
|
|
80
101
|
echo "$init_out" | grep -q "\[dry-run\]"
|
|
81
102
|
"$SW" upgrade
|
|
82
103
|
grep -q "i -g @wangxt0223/codex-switcher@latest --registry https://registry.npmjs.org/" "$CODEX_SWITCHER_TEST_NPM_LOG"
|
|
83
104
|
|
|
84
|
-
"$SW"
|
|
85
|
-
"$SW"
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
set -e
|
|
99
|
-
[[ "$use_no_launch_conflict_rc" -ne 0 ]]
|
|
100
|
-
grep -q "cannot pass codex args with --no-launch" /tmp/codex_sw_use_no_launch_conflict
|
|
101
|
-
|
|
102
|
-
"$SW" login personal
|
|
103
|
-
"$SW" login sync-login --sync
|
|
104
|
-
[[ -f "$PROFILES/sync-login/state_5.sqlite" ]]
|
|
105
|
-
[[ -f "$PROFILES/sync-login/memories/demo.json" ]]
|
|
106
|
-
[[ -f "$PROFILES/sync-login/auth.json" ]]
|
|
107
|
-
|
|
108
|
-
echo '{"auth_mode":"api_key","owner":"personal"}' > "$PROFILES/personal/auth.json"
|
|
109
|
-
echo '{"auth_mode":"api_key","owner":"work"}' > "$PROFILES/work/auth.json"
|
|
110
|
-
echo "from-personal-newer" > "$PROFILES/personal/history.jsonl"
|
|
111
|
-
echo "from-work-older-baseline" > "$PROFILES/work/history.jsonl"
|
|
112
|
-
"$SW" switch work --sync
|
|
113
|
-
[[ "$("$SW" current cli)" == "work" ]]
|
|
114
|
-
grep -q "from-personal-newer" "$PROFILES/work/history.jsonl"
|
|
115
|
-
grep -q '"owner":"work"' "$PROFILES/work/auth.json"
|
|
116
|
-
grep -q '"owner":"personal"' "$PROFILES/personal/auth.json"
|
|
117
|
-
|
|
118
|
-
"$SW" import-default imported
|
|
119
|
-
[[ -f "$PROFILES/imported/state_5.sqlite" ]]
|
|
120
|
-
[[ -f "$PROFILES/imported/memories/demo.json" ]]
|
|
121
|
-
[[ ! -f "$PROFILES/imported/auth.json" ]]
|
|
122
|
-
set +e
|
|
123
|
-
CODEX_HOME="$PROFILES/imported" codex login status >/tmp/codex_sw_imported_login 2>&1
|
|
124
|
-
imported_login_rc=$?
|
|
125
|
-
set -e
|
|
126
|
-
[[ "$imported_login_rc" -ne 0 ]]
|
|
127
|
-
|
|
128
|
-
"$SW" import-default imported-auth --with-auth
|
|
129
|
-
[[ -f "$PROFILES/imported-auth/auth.json" ]]
|
|
130
|
-
CODEX_HOME="$PROFILES/imported-auth" codex login status >/tmp/codex_sw_imported_auth_login 2>&1
|
|
131
|
-
[[ "$?" -eq 0 ]]
|
|
132
|
-
|
|
133
|
-
"$SW" logout work
|
|
134
|
-
|
|
135
|
-
set +e
|
|
136
|
-
"$SW" app open ghost >/tmp/codex_sw_app_open_missing 2>&1
|
|
137
|
-
app_open_missing_rc=$?
|
|
138
|
-
set -e
|
|
139
|
-
[[ "$app_open_missing_rc" -ne 0 ]]
|
|
140
|
-
grep -q "profile 'ghost' not found" /tmp/codex_sw_app_open_missing
|
|
141
|
-
|
|
142
|
-
set +e
|
|
143
|
-
"$SW" app use work >/tmp/codex_sw_app_use_unauthed 2>&1
|
|
144
|
-
app_use_unauthed_rc=$?
|
|
145
|
-
set -e
|
|
146
|
-
[[ "$app_use_unauthed_rc" -ne 0 ]]
|
|
147
|
-
grep -q "profile 'work' is not logged in" /tmp/codex_sw_app_use_unauthed
|
|
105
|
+
[[ "$("$SW" env current cli)" == "default" ]]
|
|
106
|
+
"$SW" account login personal --env default
|
|
107
|
+
"$SW" account login work --env default
|
|
108
|
+
|
|
109
|
+
make_id_token() {
|
|
110
|
+
python3 - "$1" "$2" <<'PY'
|
|
111
|
+
import base64, json, sys
|
|
112
|
+
email = sys.argv[1]
|
|
113
|
+
plan = sys.argv[2]
|
|
114
|
+
header = base64.urlsafe_b64encode(json.dumps({"alg":"none","typ":"JWT"}, separators=(",", ":")).encode()).decode().rstrip("=")
|
|
115
|
+
payload = base64.urlsafe_b64encode(json.dumps({"email":email,"chatgpt_plan_type":plan}, separators=(",", ":")).encode()).decode().rstrip("=")
|
|
116
|
+
print(f"{header}.{payload}.sig")
|
|
117
|
+
PY
|
|
118
|
+
}
|
|
148
119
|
|
|
149
|
-
"$
|
|
120
|
+
personal_id_token="$(make_id_token personal@example.com plus)"
|
|
121
|
+
work_id_token="$(make_id_token work@example.com team)"
|
|
122
|
+
|
|
123
|
+
cat > "$STATE/env-accounts/default/personal/auth.json" <<JSON
|
|
124
|
+
{"auth_mode":"chatgpt","tokens":{"access_token":"token-personal","id_token":"$personal_id_token"}}
|
|
125
|
+
JSON
|
|
126
|
+
cat > "$STATE/env-accounts/default/work/auth.json" <<JSON
|
|
127
|
+
{"auth_mode":"chatgpt","tokens":{"access_token":"token-work","id_token":"$work_id_token"}}
|
|
128
|
+
JSON
|
|
129
|
+
|
|
130
|
+
"$SW" account use personal --env default
|
|
131
|
+
grep -q "token-personal" "$DEFAULT_HOME/auth.json"
|
|
132
|
+
"$SW" account use work --env default
|
|
133
|
+
grep -q "token-work" "$DEFAULT_HOME/auth.json"
|
|
134
|
+
grep -q '{"memo":"persist"}' "$DEFAULT_HOME/shared.json"
|
|
135
|
+
"$SW" account use personal --env default --sync
|
|
136
|
+
grep -q '{"memo":"persist"}' "$DEFAULT_HOME/shared.json"
|
|
137
|
+
|
|
138
|
+
"$SW" env create project --empty
|
|
139
|
+
[[ -d "$ENVS/project/home" ]]
|
|
140
|
+
echo '{"shared":"project"}' > "$ENVS/project/home/shared.json"
|
|
141
|
+
|
|
142
|
+
corp_id_token="$(make_id_token corp@example.com business)"
|
|
143
|
+
dev_id_token="$(make_id_token dev@example.com pro)"
|
|
144
|
+
mkdir -p "$STATE/env-accounts/project/corp" "$STATE/env-accounts/project/dev"
|
|
145
|
+
cat > "$STATE/env-accounts/project/corp/auth.json" <<JSON
|
|
146
|
+
{"auth_mode":"chatgpt","tokens":{"access_token":"token-corp","id_token":"$corp_id_token"}}
|
|
147
|
+
JSON
|
|
148
|
+
cat > "$STATE/env-accounts/project/dev/auth.json" <<JSON
|
|
149
|
+
{"auth_mode":"chatgpt","tokens":{"access_token":"token-dev","id_token":"$dev_id_token"}}
|
|
150
|
+
JSON
|
|
151
|
+
|
|
152
|
+
"$SW" account use corp --env project
|
|
153
|
+
[[ "$("$SW" current cli)" == "project/corp" ]]
|
|
154
|
+
grep -q "token-corp" "$ENVS/project/home/auth.json"
|
|
155
|
+
"$SW" account use dev --env project
|
|
156
|
+
[[ "$("$SW" current cli)" == "project/dev" ]]
|
|
157
|
+
grep -q "token-dev" "$ENVS/project/home/auth.json"
|
|
158
|
+
grep -q '{"shared":"project"}' "$ENVS/project/home/shared.json"
|
|
159
|
+
|
|
160
|
+
"$SW" use corp --no-launch
|
|
161
|
+
[[ "$("$SW" current cli)" == "project/corp" ]]
|
|
162
|
+
|
|
163
|
+
"$SW" account use work --env default --target app
|
|
150
164
|
"$SW" app use work
|
|
151
|
-
[[ "$("$SW" app current)" == "work" ]]
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
"
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
grep -q "
|
|
159
|
-
grep -q "
|
|
160
|
-
|
|
161
|
-
"
|
|
162
|
-
|
|
163
|
-
"
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
grep -q "
|
|
165
|
+
[[ "$("$SW" app current)" == "default/work" ]]
|
|
166
|
+
|
|
167
|
+
"$SW" list >/tmp/codex_sw_list_api
|
|
168
|
+
grep -q "ENV" /tmp/codex_sw_list_api
|
|
169
|
+
grep -q "ACCOUNT" /tmp/codex_sw_list_api
|
|
170
|
+
grep -q "EMAIL" /tmp/codex_sw_list_api
|
|
171
|
+
grep -q "PLAN" /tmp/codex_sw_list_api
|
|
172
|
+
grep -q "5H USAGE" /tmp/codex_sw_list_api
|
|
173
|
+
grep -q "WEEKLY USAGE" /tmp/codex_sw_list_api
|
|
174
|
+
grep -q "LAST ACTIVITY" /tmp/codex_sw_list_api
|
|
175
|
+
grep -q "(personal)personal@example.com" /tmp/codex_sw_list_api
|
|
176
|
+
grep -q "(api)" /tmp/codex_sw_list_api
|
|
177
|
+
grep -q "60% (" /tmp/codex_sw_list_api
|
|
178
|
+
grep -q "80% (" /tmp/codex_sw_list_api
|
|
179
|
+
|
|
180
|
+
"$SW_LINK" list >/tmp/codex_sw_list_symlink
|
|
181
|
+
grep -q "(personal)personal@example.com" /tmp/codex_sw_list_symlink
|
|
182
|
+
grep -q "(api)" /tmp/codex_sw_list_symlink
|
|
183
|
+
|
|
184
|
+
mkdir -p "$ENVS/project/home/sessions/2026/04/12"
|
|
185
|
+
cat > "$ENVS/project/home/sessions/2026/04/12/rollout-test.jsonl" <<'JSONL'
|
|
186
|
+
{"timestamp":"2026-04-12T09:00:00Z","type":"event_msg","payload":{"type":"token_count","rate_limits":{"plan_type":"business","primary":{"used_percent":25,"window_minutes":300,"resets_at":1776004200},"secondary":{"used_percent":70,"window_minutes":10080,"resets_at":1776519000}}}}
|
|
187
|
+
JSONL
|
|
188
|
+
export CODEX_SWITCHER_TEST_CURL_MODE="fail"
|
|
189
|
+
"$SW" list >/tmp/codex_sw_list_local
|
|
190
|
+
grep -q "(local)" /tmp/codex_sw_list_local
|
|
191
|
+
grep -q "75% (" /tmp/codex_sw_list_local
|
|
192
|
+
grep -q "30% (" /tmp/codex_sw_list_local
|
|
168
193
|
|
|
169
194
|
"$SW" app status >/tmp/codex_sw_app_status_1
|
|
170
195
|
[[ "$?" -eq 0 ]]
|
|
@@ -177,24 +202,7 @@ app_status_rc=$?
|
|
|
177
202
|
set -e
|
|
178
203
|
[[ "$app_status_rc" -eq 1 ]]
|
|
179
204
|
|
|
180
|
-
printf '***bad***\n' > "$STATE/current_cli"
|
|
181
|
-
set +e
|
|
182
|
-
"$SW" status >/tmp/codex_sw_status_3
|
|
183
|
-
status_rc=$?
|
|
184
|
-
set -e
|
|
185
|
-
[[ "$status_rc" -eq 2 ]]
|
|
186
|
-
|
|
187
|
-
"$SW" recover
|
|
188
|
-
validate_cli="$("$SW" current cli)"
|
|
189
|
-
[[ -n "$validate_cli" ]]
|
|
190
|
-
|
|
191
205
|
doctor_out="$("$SW" doctor --fix)"
|
|
192
206
|
echo "$doctor_out" | grep -q "doctor --fix: completed"
|
|
193
|
-
check_out="$("$SW" check)"
|
|
194
|
-
echo "$check_out" | grep -q "check: ok"
|
|
195
|
-
|
|
196
|
-
"$SW" remove work --force
|
|
197
|
-
"$SW" list >/tmp/codex_sw_list
|
|
198
|
-
grep -q "personal" /tmp/codex_sw_list
|
|
199
207
|
|
|
200
208
|
echo "smoke-test: ok"
|