wafer-cli 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
wafer/GUIDE.md CHANGED
@@ -1,313 +1,107 @@
1
1
  # Wafer CLI Guide
2
2
 
3
- GPU development toolkit for LLM coding agents.
3
+ GPU development primitives for LLM agents.
4
4
 
5
- ## Quick Start
5
+ ## Quick Start: Cloud GPU (No Setup)
6
6
 
7
- Choose your path based on what GPU access you have:
8
-
9
- ### Path A: I have my own GPU (SSH access)
7
+ Run code on cloud GPUs instantly with workspaces:
10
8
 
11
9
  ```bash
12
- # 1. Set up your target
13
- wafer config targets init ssh \
14
- --name my-gpu \
15
- --host user@192.168.1.100:22 \
16
- --gpu-type H100
17
-
18
- # 2. Test it works
19
- wafer evaluate make-template ./test-kernel
20
- wafer evaluate \
21
- --impl ./test-kernel/kernel.py \
22
- --reference ./test-kernel/reference.py \
23
- --test-cases ./test-kernel/test_cases.json \
24
- --target my-gpu
10
+ wafer login # One-time auth
11
+ wafer workspaces create dev --gpu B200 # Create workspace
12
+ wafer workspaces exec dev -- python -c "import torch; print(torch.cuda.get_device_name(0))"
13
+ wafer workspaces sync dev ./my-project # Sync files
14
+ wafer workspaces exec dev -- python train.py
25
15
  ```
26
16
 
27
- ### Path B: I have RunPod/DigitalOcean API keys
28
-
29
- ```bash
30
- # 1. Set your API key
31
- export WAFER_RUNPOD_API_KEY=your_key_here
32
- # or: export WAFER_AMD_DIGITALOCEAN_API_KEY=your_key_here
33
-
34
- # 2. Create a target (auto-provisions GPUs on demand)
35
- wafer config targets init runpod --gpu MI300X
36
- # or: wafer config targets init digitalocean
17
+ ## Documentation Lookup
37
18
 
38
- # 3. Run evaluation (provisions GPU, runs, cleans up)
39
- wafer evaluate \
40
- --impl kernel.py \
41
- --reference ref.py \
42
- --test-cases tests.json \
43
- --target runpod-mi300x
44
- ```
45
-
46
- ### Path C: I just want to ask questions / analyze traces
19
+ Answer GPU programming questions from indexed documentation.
47
20
 
48
21
  ```bash
49
- # No GPU needed for these workflows
50
-
51
- # Download documentation
52
- wafer corpus download cuda
53
-
54
- # Ask questions
55
- wafer wevin -t ask-docs --corpus cuda "What is warp divergence?"
56
-
57
- # Analyze a PyTorch trace
58
- wafer nvidia perfetto query trace.json \
59
- "SELECT name, dur/1e6 as ms FROM slice WHERE cat='kernel' ORDER BY dur DESC LIMIT 10"
60
- ```
61
-
62
- ## Core Commands
63
-
64
- | Command | Description |
65
- |---------|-------------|
66
- | `wafer wevin` | GPU programming assistant (delegates to specialized agent) |
67
- | `wafer evaluate` | Test kernel correctness and performance |
68
- | `wafer corpus` | Download/manage GPU documentation |
69
- | `wafer workspaces` | Cloud GPU environments |
70
-
71
- ## Profiling Tools
72
-
73
- ```bash
74
- # NVIDIA tools (under wafer nvidia)
75
- wafer nvidia ncu analyze profile.ncu-rep # Nsight Compute analysis
76
- wafer nvidia nsys analyze profile.nsys-rep # Nsight Systems analysis
77
- wafer nvidia perfetto query trace.json "SQL" # Perfetto/PyTorch traces
78
- wafer nvidia tracelens report trace.json # Performance reports
79
-
80
- # AMD tools (under wafer amd)
81
- wafer amd isa analyze kernel.co # ISA analysis
82
- wafer amd rocprof-compute profile -- ./app # ROCm profiling
83
- ```
84
-
85
- ## Common Workflows
86
-
87
- ### 1. Research GPU Concepts
88
-
89
- ```bash
90
- # Download documentation (one-time)
22
+ # Download corpus (one-time)
91
23
  wafer corpus download cuda
92
24
  wafer corpus download cutlass
25
+ wafer corpus download hip
93
26
 
94
- # Ask questions
95
- wafer wevin -t ask-docs --corpus cuda "How do I optimize shared memory usage?"
96
- wafer wevin -t ask-docs --corpus cutlass "What is a TiledMma?"
27
+ # Query documentation
28
+ wafer agent -t ask-docs --corpus cuda "What is warp divergence?"
29
+ wafer agent -t ask-docs --corpus cutlass "What is a TiledMma?"
97
30
  ```
98
31
 
99
- ### 2. Analyze a PyTorch Trace
32
+ ## Trace Analysis
33
+
34
+ Analyze performance traces from NCU, NSYS, or PyTorch profiler.
100
35
 
101
36
  ```bash
102
- # PyTorch profiler outputs Chrome trace format JSON
103
- # Wafer's perfetto tool can analyze these
37
+ # AI-assisted analysis
38
+ wafer agent -t trace-analyze --args trace=./profile.ncu-rep "Why is this kernel slow?"
39
+ wafer agent -t trace-analyze --args trace=./trace.json "What's the bottleneck?"
104
40
 
105
- # List available event categories
41
+ # Direct trace queries (PyTorch/Perfetto JSON)
106
42
  wafer nvidia perfetto tables trace.json
107
-
108
- # Find slowest GPU kernels
109
43
  wafer nvidia perfetto query trace.json \
110
- "SELECT name, dur/1e6 as ms FROM slice WHERE cat='kernel' ORDER BY dur DESC LIMIT 20"
111
-
112
- # Get kernel time breakdown
113
- wafer nvidia perfetto query trace.json \
114
- "SELECT name, SUM(dur)/1e6 as total_ms, COUNT(*) as calls
115
- FROM slice WHERE cat='kernel'
116
- GROUP BY name ORDER BY total_ms DESC LIMIT 20"
117
-
118
- # Or use the trace-analyze template for AI-assisted analysis
119
- wafer wevin -t trace-analyze --args trace=./trace.json "What's the bottleneck?"
120
- ```
121
-
122
- ### 3. Analyze NCU Profiles
44
+ "SELECT name, dur/1e6 as ms FROM slice WHERE cat='kernel' ORDER BY dur DESC LIMIT 10"
123
45
 
124
- ```bash
125
- # Basic analysis with recommendations
46
+ # NCU/NSYS analysis
126
47
  wafer nvidia ncu analyze profile.ncu-rep
127
-
128
- # JSON output for programmatic use
129
- wafer nvidia ncu analyze profile.ncu-rep --json
130
-
131
- # With SASS source correlation
132
- wafer nvidia ncu analyze profile.ncu-rep --include-source
48
+ wafer nvidia nsys analyze profile.nsys-rep
133
49
  ```
134
50
 
135
- ### 4. Evaluate Kernel Performance
51
+ ## Kernel Evaluation
136
52
 
137
- ```bash
138
- # Generate template files
139
- wafer evaluate make-template ./my-kernel
53
+ Test kernel correctness and measure speedup against a reference.
140
54
 
141
- # Edit the generated files:
142
- # kernel.py - Your optimized implementation
143
- # reference.py - Ground truth + input generator
144
- # test_cases.json - Test parameters
55
+ ```bash
56
+ # Using workspaces (no target setup required):
57
+ wafer workspaces create dev --gpu B200
58
+ wafer workspaces exec --sync ./my-kernel dev -- python test_kernel.py
145
59
 
146
- # Run evaluation
60
+ # Or using configured targets (for your own hardware):
61
+ wafer evaluate make-template ./my-kernel
147
62
  wafer evaluate \
148
63
  --impl ./my-kernel/kernel.py \
149
64
  --reference ./my-kernel/reference.py \
150
65
  --test-cases ./my-kernel/test_cases.json \
151
- --target vultr-b200
152
-
153
- # With profiling
154
- wafer evaluate ... --profile
66
+ --target <target-name>
155
67
  ```
156
68
 
157
- ### 5. Optimize a Kernel (AI-assisted)
69
+ For target setup, see `wafer config targets --help`.
70
+
71
+ ## Kernel Optimization (AI-assisted)
72
+
73
+ Iteratively optimize a kernel with evaluation feedback.
158
74
 
159
75
  ```bash
160
- # Use the optimize-kernel template
161
- wafer wevin -t optimize-kernel \
76
+ wafer agent -t optimize-kernel \
162
77
  --args kernel=./my_kernel.cu \
163
78
  --args target=H100 \
164
79
  "Optimize this GEMM for memory bandwidth"
165
80
  ```
166
81
 
167
- ## Configuration
82
+ ## Workspaces
168
83
 
169
- ### Targets (GPU Access)
84
+ Cloud GPU environments with no setup required.
170
85
 
171
- Targets define how to access GPUs. Three types:
172
-
173
- **1. SSH (your own hardware)**
174
- ```bash
175
- wafer config targets init ssh \
176
- --name my-gpu \
177
- --host user@hostname:22 \
178
- --gpu-type H100 \
179
- --ncu # if NCU profiling is available
180
- ```
181
-
182
- **2. RunPod (on-demand cloud GPUs)**
183
86
  ```bash
184
- export WAFER_RUNPOD_API_KEY=your_key # from runpod.io/console/user/settings
185
- wafer config targets init runpod --gpu MI300X # or H100, A100
87
+ wafer workspaces create dev --gpu B200 # Create
88
+ wafer workspaces list # List all
89
+ wafer workspaces sync dev ./project # Sync files
90
+ wafer workspaces exec dev -- ./run.sh # Run commands
91
+ wafer workspaces ssh dev # Interactive SSH
92
+ wafer workspaces delete dev # Cleanup
186
93
  ```
187
94
 
188
- **3. DigitalOcean (AMD MI300X)**
189
- ```bash
190
- export WAFER_AMD_DIGITALOCEAN_API_KEY=your_key
191
- wafer config targets init digitalocean
192
- ```
95
+ See `wafer workspaces --help` for details.
193
96
 
194
- **Managing targets:**
195
- ```bash
196
- wafer config targets list # List all targets
197
- wafer config targets show my-gpu # Show details
198
- wafer config targets default my-gpu # Set default
199
- wafer config targets remove my-gpu # Delete
200
- ```
97
+ ## Command Reference
201
98
 
202
- **Advanced: Manual TOML** (for custom configurations)
203
99
  ```bash
204
- wafer config targets add ~/my-target.toml
205
- ```
206
-
207
- Example TOML:
208
- ```toml
209
- name = "my-gpu"
210
- type = "baremetal"
211
- ssh_target = "user@hostname:22"
212
- ssh_key = "~/.ssh/id_ed25519"
213
- gpu_ids = [0]
214
- gpu_type = "H100"
215
- compute_capability = "9.0"
216
- ncu_available = true
217
- docker_image = "nvcr.io/nvidia/pytorch:25.01-py3"
218
- ```
219
-
220
- ### Workspaces (Cloud GPUs)
221
-
222
- Workspaces provide on-demand cloud GPU access.
223
-
224
- ```bash
225
- # List workspaces
226
- wafer workspaces list
227
-
228
- # Create a workspace
229
- wafer workspaces create my-workspace --gpu H100
230
-
231
- # Get SSH credentials
232
- wafer workspaces attach <workspace-id>
233
-
234
- # Delete when done
235
- wafer workspaces delete <workspace-id>
236
- ```
237
-
238
- ### CLI Configuration
239
-
240
- ```bash
241
- # Initialize config
242
- wafer config init
243
-
244
- # Show current config
245
- wafer config show
246
-
247
- # Set API environment
248
- wafer config set api.environment prod
249
- ```
250
-
251
- ## Templates
252
-
253
- Wafer includes templates for common tasks:
254
-
255
- | Template | Usage |
256
- |----------|-------|
257
- | `ask-docs` | Answer GPU programming questions from documentation |
258
- | `trace-analyze` | Analyze performance traces (NCU, NSYS, PyTorch) |
259
- | `optimize-kernel` | Iteratively optimize a kernel implementation |
260
-
261
- ```bash
262
- # Use a template
263
- wafer wevin -t <template-name> [--args key=value] "your prompt"
264
-
265
- # Examples
266
- wafer wevin -t ask-docs --corpus cuda "What is occupancy?"
267
- wafer wevin -t trace-analyze --args trace=./profile.ncu-rep "Why is this slow?"
268
- wafer wevin -t optimize-kernel --args kernel=./gemm.cu "Make this 2x faster"
269
- ```
270
-
271
- ## Getting Help
272
-
273
- ```bash
274
- # General help
275
- wafer --help
276
-
277
- # Command-specific help
278
- wafer evaluate --help
279
- wafer nvidia ncu --help
280
- wafer wevin --help
281
-
282
- # This guide
283
- wafer guide
284
- ```
285
-
286
- ## Quick Reference
287
-
288
- ```bash
289
- # Authentication
290
- wafer login # Login via GitHub OAuth
291
- wafer logout # Remove credentials
292
- wafer whoami # Show current user
293
- wafer whoami --verify # Verify token is valid
294
-
295
- # Documentation
296
- wafer corpus list # Show available/downloaded corpora
297
- wafer corpus download cuda # Download CUDA docs
298
- wafer corpus path cuda # Get path to corpus
299
-
300
- # Analysis
301
- wafer nvidia ncu analyze *.ncu-rep # NCU profile analysis
302
- wafer nvidia nsys analyze *.nsys-rep # NSYS timeline analysis
303
- wafer nvidia perfetto query f.json "SQL" # Perfetto SQL queries
304
-
305
- # Evaluation
306
- wafer evaluate make-template ./dir # Generate template files
307
- wafer evaluate --impl ... --reference ... --test-cases ...
308
-
309
- # AI Assistant
310
- wafer wevin "your question" # Interactive assistant
311
- wafer wevin -t ask-docs --corpus cuda "question" # Docs lookup
312
- wafer wevin -s "single turn question" # Non-interactive
100
+ wafer corpus list|download|path # Manage documentation corpora
101
+ wafer workspaces # Cloud GPU environments (no setup)
102
+ wafer evaluate # Test kernel correctness/performance
103
+ wafer nvidia ncu|nsys|perfetto # NVIDIA profiling tools
104
+ wafer amd isa|rocprof-compute # AMD profiling tools
105
+ wafer agent -t <template> # AI-assisted workflows
106
+ wafer config targets # Configure your own GPU targets
313
107
  ```
wafer/analytics.py ADDED
@@ -0,0 +1,307 @@
1
+ """PostHog analytics for Wafer CLI.
2
+
3
+ Tracks CLI command usage and user activity for product analytics.
4
+ Mirrors the analytics implementation in apps/wevin-extension/src/services/analytics.ts.
5
+
6
+ Usage:
7
+ from .analytics import track_command, identify_user, shutdown_analytics
8
+
9
+ # Track a command execution
10
+ track_command("evaluate", {"subcommand": "kernelbench", "outcome": "success"})
11
+
12
+ # Identify user after login
13
+ identify_user("user-id", "user@example.com")
14
+ """
15
+
16
+ import atexit
17
+ import platform
18
+ import sys
19
+ import uuid
20
+ from pathlib import Path
21
+ from typing import Any
22
+
23
+ # PostHog configuration - same as wevin-extension
24
+ POSTHOG_API_KEY = "phc_9eDjkY72ud9o4l1mA1Gr1dnRT1yx71rP3XY9z66teFh"
25
+ POSTHOG_HOST = "https://us.i.posthog.com"
26
+
27
+ # Anonymous ID storage
28
+ ANONYMOUS_ID_FILE = Path.home() / ".wafer" / ".analytics_id"
29
+
30
+ # Global state
31
+ _posthog_client: Any = None
32
+ _distinct_id: str | None = None
33
+ _initialized: bool = False
34
+
35
+
36
+ def _get_anonymous_id() -> str:
37
+ """Get or create anonymous ID for users who aren't logged in."""
38
+ if ANONYMOUS_ID_FILE.exists():
39
+ return ANONYMOUS_ID_FILE.read_text().strip()
40
+
41
+ # Generate new anonymous ID
42
+ anonymous_id = f"anon_{uuid.uuid4().hex}"
43
+ ANONYMOUS_ID_FILE.parent.mkdir(parents=True, exist_ok=True)
44
+ ANONYMOUS_ID_FILE.write_text(anonymous_id)
45
+ return anonymous_id
46
+
47
+
48
+ def _get_user_id_from_credentials() -> tuple[str | None, str | None]:
49
+ """Get user ID and email from stored credentials.
50
+
51
+ Returns:
52
+ Tuple of (user_id, email), both may be None if not logged in.
53
+ """
54
+ # Import here to avoid circular imports
55
+ from .auth import load_credentials, verify_token
56
+
57
+ creds = load_credentials()
58
+ if not creds:
59
+ return None, None
60
+
61
+ # Try to get user info from token
62
+ try:
63
+ user_info = verify_token(creds.access_token)
64
+ return user_info.user_id, user_info.email or creds.email
65
+ except Exception:
66
+ # Token verification failed, use email from credentials if available
67
+ return None, creds.email
68
+
69
+
70
+ def _is_analytics_enabled() -> bool:
71
+ """Check if analytics is enabled via preferences.
72
+
73
+ Returns True by default, respects user preference in config.
74
+ """
75
+ from .global_config import get_preferences
76
+
77
+ try:
78
+ prefs = get_preferences()
79
+ return getattr(prefs, "analytics_enabled", True)
80
+ except Exception:
81
+ # Default to enabled if we can't read preferences
82
+ return True
83
+
84
+
85
+ def init_analytics() -> bool:
86
+ """Initialize PostHog client.
87
+
88
+ Returns:
89
+ True if initialization succeeded, False otherwise.
90
+ """
91
+ global _posthog_client, _distinct_id, _initialized
92
+
93
+ if _initialized:
94
+ return _posthog_client is not None
95
+
96
+ _initialized = True
97
+
98
+ # Check if analytics is enabled
99
+ if not _is_analytics_enabled():
100
+ return False
101
+
102
+ try:
103
+ from posthog import Posthog
104
+
105
+ _posthog_client = Posthog(
106
+ api_key=POSTHOG_API_KEY,
107
+ host=POSTHOG_HOST,
108
+ # Flush immediately for CLI - commands are short-lived
109
+ flush_at=1,
110
+ flush_interval=1,
111
+ # Disable debug logging
112
+ debug=False,
113
+ )
114
+
115
+ # Set up distinct ID - prefer authenticated user, fall back to anonymous
116
+ user_id, email = _get_user_id_from_credentials()
117
+ if user_id:
118
+ _distinct_id = user_id
119
+ # Identify the user with their email
120
+ if email:
121
+ _posthog_client.identify(
122
+ distinct_id=user_id,
123
+ properties={
124
+ "email": email,
125
+ "auth_provider": "github",
126
+ },
127
+ )
128
+ else:
129
+ _distinct_id = _get_anonymous_id()
130
+
131
+ # Register shutdown handler to flush events
132
+ atexit.register(shutdown_analytics)
133
+
134
+ return True
135
+
136
+ except ImportError:
137
+ # PostHog not installed - analytics disabled
138
+ return False
139
+ except Exception:
140
+ # Any other error - fail silently, don't break CLI
141
+ return False
142
+
143
+
144
+ def shutdown_analytics() -> None:
145
+ """Shutdown PostHog client and flush pending events."""
146
+ global _posthog_client
147
+
148
+ if _posthog_client is not None:
149
+ try:
150
+ _posthog_client.flush()
151
+ _posthog_client.shutdown()
152
+ except Exception:
153
+ pass # Fail silently on shutdown
154
+ _posthog_client = None
155
+
156
+
157
+ def identify_user(user_id: str, email: str | None = None) -> None:
158
+ """Identify a user after login.
159
+
160
+ Args:
161
+ user_id: Supabase user ID
162
+ email: User's email address
163
+ """
164
+ global _distinct_id
165
+
166
+ if not init_analytics():
167
+ return
168
+
169
+ if _posthog_client is None:
170
+ return
171
+
172
+ _distinct_id = user_id
173
+
174
+ try:
175
+ properties: dict[str, Any] = {"auth_provider": "github"}
176
+ if email:
177
+ properties["email"] = email
178
+
179
+ _posthog_client.identify(
180
+ distinct_id=user_id,
181
+ properties=properties,
182
+ )
183
+ _posthog_client.flush()
184
+ except Exception:
185
+ pass # Fail silently
186
+
187
+
188
+ def reset_user_identity() -> None:
189
+ """Reset user identity after logout."""
190
+ global _distinct_id
191
+
192
+ _distinct_id = _get_anonymous_id()
193
+
194
+
195
+ def get_distinct_id() -> str:
196
+ """Get current distinct ID for tracking."""
197
+ global _distinct_id
198
+
199
+ if _distinct_id is None:
200
+ user_id, _ = _get_user_id_from_credentials()
201
+ _distinct_id = user_id or _get_anonymous_id()
202
+
203
+ return _distinct_id
204
+
205
+
206
+ def _get_cli_version() -> str:
207
+ """Get CLI version from package metadata."""
208
+ try:
209
+ from importlib.metadata import version
210
+
211
+ return version("wafer-cli")
212
+ except Exception:
213
+ return "unknown"
214
+
215
+
216
+ def _get_base_properties() -> dict[str, Any]:
217
+ """Get base properties included with all events."""
218
+ return {
219
+ "platform": "cli",
220
+ "tool_id": "cli",
221
+ "cli_version": _get_cli_version(),
222
+ "os": platform.system().lower(),
223
+ "os_version": platform.release(),
224
+ "python_version": platform.python_version(),
225
+ }
226
+
227
+
228
+ def track_event(event_name: str, properties: dict[str, Any] | None = None) -> None:
229
+ """Track a generic event.
230
+
231
+ Args:
232
+ event_name: Name of the event to track
233
+ properties: Additional properties to include
234
+ """
235
+ if not init_analytics():
236
+ return
237
+
238
+ if _posthog_client is None:
239
+ return
240
+
241
+ try:
242
+ event_properties = _get_base_properties()
243
+ if properties:
244
+ event_properties.update(properties)
245
+
246
+ _posthog_client.capture(
247
+ distinct_id=get_distinct_id(),
248
+ event=event_name,
249
+ properties=event_properties,
250
+ )
251
+ except Exception:
252
+ pass # Fail silently
253
+
254
+
255
+ def track_command(
256
+ command: str,
257
+ subcommand: str | None = None,
258
+ outcome: str = "success",
259
+ duration_ms: int | None = None,
260
+ properties: dict[str, Any] | None = None,
261
+ ) -> None:
262
+ """Track a CLI command execution.
263
+
264
+ This event counts towards DAU in the internal dashboard.
265
+
266
+ Args:
267
+ command: The main command name (e.g., "evaluate", "agent")
268
+ subcommand: Optional subcommand (e.g., "kernelbench")
269
+ outcome: "success" or "error"
270
+ duration_ms: Command execution time in milliseconds
271
+ properties: Additional properties to include
272
+ """
273
+ event_properties: dict[str, Any] = {
274
+ "command": command,
275
+ "outcome": outcome,
276
+ }
277
+
278
+ if subcommand:
279
+ event_properties["subcommand"] = subcommand
280
+
281
+ if duration_ms is not None:
282
+ event_properties["duration_ms"] = duration_ms
283
+
284
+ if properties:
285
+ event_properties.update(properties)
286
+
287
+ track_event("cli_command_executed", event_properties)
288
+
289
+
290
+ def track_login(user_id: str, email: str | None = None) -> None:
291
+ """Track user login event.
292
+
293
+ Args:
294
+ user_id: Supabase user ID
295
+ email: User's email address
296
+ """
297
+ # First identify the user
298
+ identify_user(user_id, email)
299
+
300
+ # Then track the login event
301
+ track_event("cli_user_signed_in", {"user_id": user_id})
302
+
303
+
304
+ def track_logout() -> None:
305
+ """Track user logout event."""
306
+ track_event("cli_user_signed_out")
307
+ reset_user_identity()
wafer/auth.py CHANGED
@@ -287,7 +287,7 @@ if (accessToken) {
287
287
  self.end_headers()
288
288
 
289
289
 
290
- def browser_login(timeout: int = 120) -> tuple[str, str | None]:
290
+ def browser_login(timeout: int = 120, port: int | None = None) -> tuple[str, str | None]:
291
291
  """Open browser for GitHub OAuth and return tokens.
292
292
 
293
293
  Starts a local HTTP server, opens browser to Supabase OAuth,
@@ -295,6 +295,7 @@ def browser_login(timeout: int = 120) -> tuple[str, str | None]:
295
295
 
296
296
  Args:
297
297
  timeout: Seconds to wait for callback (default 120)
298
+ port: Port for callback server. If None, finds a free port (default None)
298
299
 
299
300
  Returns:
300
301
  Tuple of (access_token, refresh_token). refresh_token may be None.
@@ -303,7 +304,8 @@ def browser_login(timeout: int = 120) -> tuple[str, str | None]:
303
304
  TimeoutError: If no callback received within timeout
304
305
  RuntimeError: If OAuth flow failed
305
306
  """
306
- port = _find_free_port()
307
+ if port is None:
308
+ port = _find_free_port()
307
309
  redirect_uri = f"http://localhost:{port}/callback"
308
310
  supabase_url = get_supabase_url()
309
311