copilot-sdk-supercharged 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. copilot_sdk_supercharged-1.0.0/.gitignore +171 -0
  2. copilot_sdk_supercharged-1.0.0/PKG-INFO +508 -0
  3. copilot_sdk_supercharged-1.0.0/README.md +478 -0
  4. copilot_sdk_supercharged-1.0.0/copilot/__init__.py +73 -0
  5. copilot_sdk_supercharged-1.0.0/copilot/client.py +1547 -0
  6. copilot_sdk_supercharged-1.0.0/copilot/generated/__init__.py +0 -0
  7. copilot_sdk_supercharged-1.0.0/copilot/generated/session_events.py +1005 -0
  8. copilot_sdk_supercharged-1.0.0/copilot/jsonrpc.py +307 -0
  9. copilot_sdk_supercharged-1.0.0/copilot/py.typed +1 -0
  10. copilot_sdk_supercharged-1.0.0/copilot/sdk_protocol_version.py +19 -0
  11. copilot_sdk_supercharged-1.0.0/copilot/session.py +510 -0
  12. copilot_sdk_supercharged-1.0.0/copilot/tools.py +212 -0
  13. copilot_sdk_supercharged-1.0.0/copilot/types.py +1012 -0
  14. copilot_sdk_supercharged-1.0.0/copilot_sdk_supercharged.egg-info/PKG-INFO +508 -0
  15. copilot_sdk_supercharged-1.0.0/copilot_sdk_supercharged.egg-info/SOURCES.txt +41 -0
  16. copilot_sdk_supercharged-1.0.0/copilot_sdk_supercharged.egg-info/dependency_links.txt +1 -0
  17. copilot_sdk_supercharged-1.0.0/copilot_sdk_supercharged.egg-info/requires.txt +11 -0
  18. copilot_sdk_supercharged-1.0.0/copilot_sdk_supercharged.egg-info/top_level.txt +1 -0
  19. copilot_sdk_supercharged-1.0.0/e2e/__init__.py +1 -0
  20. copilot_sdk_supercharged-1.0.0/e2e/conftest.py +46 -0
  21. copilot_sdk_supercharged-1.0.0/e2e/test_ask_user.py +118 -0
  22. copilot_sdk_supercharged-1.0.0/e2e/test_client.py +181 -0
  23. copilot_sdk_supercharged-1.0.0/e2e/test_compaction.py +90 -0
  24. copilot_sdk_supercharged-1.0.0/e2e/test_hooks.py +138 -0
  25. copilot_sdk_supercharged-1.0.0/e2e/test_mcp_and_agents.py +156 -0
  26. copilot_sdk_supercharged-1.0.0/e2e/test_permissions.py +173 -0
  27. copilot_sdk_supercharged-1.0.0/e2e/test_session.py +478 -0
  28. copilot_sdk_supercharged-1.0.0/e2e/test_skills.py +114 -0
  29. copilot_sdk_supercharged-1.0.0/e2e/test_tools.py +126 -0
  30. copilot_sdk_supercharged-1.0.0/e2e/test_tools_unit.py +286 -0
  31. copilot_sdk_supercharged-1.0.0/e2e/testharness/__init__.py +13 -0
  32. copilot_sdk_supercharged-1.0.0/e2e/testharness/context.py +140 -0
  33. copilot_sdk_supercharged-1.0.0/e2e/testharness/helper.py +163 -0
  34. copilot_sdk_supercharged-1.0.0/e2e/testharness/proxy.py +112 -0
  35. copilot_sdk_supercharged-1.0.0/pyproject.toml +88 -0
  36. copilot_sdk_supercharged-1.0.0/scripts/build-wheels.mjs +369 -0
  37. copilot_sdk_supercharged-1.0.0/setup.cfg +4 -0
  38. copilot_sdk_supercharged-1.0.0/setup.py +11 -0
  39. copilot_sdk_supercharged-1.0.0/test-requirements.txt +5 -0
  40. copilot_sdk_supercharged-1.0.0/test_client.py +149 -0
  41. copilot_sdk_supercharged-1.0.0/test_event_forward_compatibility.py +64 -0
  42. copilot_sdk_supercharged-1.0.0/test_jsonrpc.py +267 -0
  43. copilot_sdk_supercharged-1.0.0/uv.lock +579 -0
@@ -0,0 +1,171 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ # Ruff and ty cache
163
+ .ruff_cache/
164
+ .ty_cache/
165
+
166
+ # Build script caches
167
+ .cli-cache/
168
+ .build-temp/
169
+
170
+ # Bundled CLI binary (only in platform wheels, not in repo)
171
+ copilot/bin/
@@ -0,0 +1,508 @@
1
+ Metadata-Version: 2.4
2
+ Name: copilot-sdk-supercharged
3
+ Version: 1.0.0
4
+ Summary: GitHub Copilot SDK Supercharged - Python SDK for programmatic control of GitHub Copilot CLI. Supports 21 languages.
5
+ Author: jeremiahjordanisaacson
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/jeremiahjordanisaacson/copilot-sdk-supercharged
8
+ Project-URL: Repository, https://github.com/jeremiahjordanisaacson/copilot-sdk-supercharged
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.9
14
+ Classifier: Programming Language :: Python :: 3.10
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Requires-Python: >=3.8
18
+ Description-Content-Type: text/markdown
19
+ Requires-Dist: python-dateutil>=2.9.0.post0
20
+ Requires-Dist: pydantic>=2.0
21
+ Requires-Dist: typing-extensions>=4.0.0
22
+ Provides-Extra: dev
23
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
24
+ Requires-Dist: ty>=0.0.2; extra == "dev"
25
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
26
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
27
+ Requires-Dist: pytest-timeout>=2.0.0; extra == "dev"
28
+ Requires-Dist: httpx>=0.24.0; extra == "dev"
29
+ Dynamic: requires-python
30
+
31
+ # Copilot Python SDK
32
+
33
+ Python SDK for programmatic control of GitHub Copilot CLI via JSON-RPC.
34
+
35
+ > **Note:** This SDK is in technical preview and may change in breaking ways.
36
+
37
+ ## Installation
38
+
39
+ ```bash
40
+ pip install -e ".[dev]"
41
+ # or
42
+ uv pip install -e ".[dev]"
43
+ ```
44
+
45
+ ## Quick Start
46
+
47
+ ```python
48
+ import asyncio
49
+ from copilot import CopilotClient
50
+
51
+ async def main():
52
+ # Create and start client
53
+ client = CopilotClient()
54
+ await client.start()
55
+
56
+ # Create a session
57
+ session = await client.create_session({"model": "gpt-5"})
58
+
59
+ # Wait for response using session.idle event
60
+ done = asyncio.Event()
61
+
62
+ def on_event(event):
63
+ if event.type.value == "assistant.message":
64
+ print(event.data.content)
65
+ elif event.type.value == "session.idle":
66
+ done.set()
67
+
68
+ session.on(on_event)
69
+
70
+ # Send a message and wait for completion
71
+ await session.send({"prompt": "What is 2+2?"})
72
+ await done.wait()
73
+
74
+ # Clean up
75
+ await session.destroy()
76
+ await client.stop()
77
+
78
+ asyncio.run(main())
79
+ ```
80
+
81
+ ## Features
82
+
83
+ - ✅ Full JSON-RPC protocol support
84
+ - ✅ stdio and TCP transports
85
+ - ✅ Real-time streaming events
86
+ - ✅ Session history with `get_messages()`
87
+ - ✅ Type hints throughout
88
+ - ✅ Async/await native
89
+
90
+ ## API Reference
91
+
92
+ ### CopilotClient
93
+
94
+ ```python
95
+ client = CopilotClient({
96
+ "cli_path": "copilot", # Optional: path to CLI executable
97
+ "cli_url": None, # Optional: URL of existing server (e.g., "localhost:8080")
98
+ "log_level": "info", # Optional: log level (default: "info")
99
+ "auto_start": True, # Optional: auto-start server (default: True)
100
+ "auto_restart": True, # Optional: auto-restart on crash (default: True)
101
+ })
102
+ await client.start()
103
+
104
+ session = await client.create_session({"model": "gpt-5"})
105
+
106
+ def on_event(event):
107
+ print(f"Event: {event['type']}")
108
+
109
+ session.on(on_event)
110
+ await session.send({"prompt": "Hello!"})
111
+
112
+ # ... wait for events ...
113
+
114
+ await session.destroy()
115
+ await client.stop()
116
+ ```
117
+
118
+ **CopilotClient Options:**
119
+
120
+ - `cli_path` (str): Path to CLI executable (default: "copilot" or `COPILOT_CLI_PATH` env var)
121
+ - `cli_url` (str): URL of existing CLI server (e.g., `"localhost:8080"`, `"http://127.0.0.1:9000"`, or just `"8080"`). When provided, the client will not spawn a CLI process.
122
+ - `cwd` (str): Working directory for CLI process
123
+ - `port` (int): Server port for TCP mode (default: 0 for random)
124
+ - `use_stdio` (bool): Use stdio transport instead of TCP (default: True)
125
+ - `log_level` (str): Log level (default: "info")
126
+ - `auto_start` (bool): Auto-start server on first use (default: True)
127
+ - `auto_restart` (bool): Auto-restart on crash (default: True)
128
+ - `github_token` (str): GitHub token for authentication. When provided, takes priority over other auth methods.
129
+ - `use_logged_in_user` (bool): Whether to use logged-in user for authentication (default: True, but False when `github_token` is provided). Cannot be used with `cli_url`.
130
+
131
+ **SessionConfig Options (for `create_session`):**
132
+
133
+ - `model` (str): Model to use ("gpt-5", "claude-sonnet-4.5", etc.). **Required when using custom provider.**
134
+ - `reasoning_effort` (str): Reasoning effort level for models that support it ("low", "medium", "high", "xhigh"). Use `list_models()` to check which models support this option.
135
+ - `session_id` (str): Custom session ID
136
+ - `tools` (list): Custom tools exposed to the CLI
137
+ - `system_message` (dict): System message configuration
138
+ - `streaming` (bool): Enable streaming delta events
139
+ - `provider` (dict): Custom API provider configuration (BYOK). See [Custom Providers](#custom-providers) section.
140
+ - `infinite_sessions` (dict): Automatic context compaction configuration
141
+ - `on_user_input_request` (callable): Handler for user input requests from the agent (enables ask_user tool). See [User Input Requests](#user-input-requests) section.
142
+ - `hooks` (dict): Hook handlers for session lifecycle events. See [Session Hooks](#session-hooks) section.
143
+
144
+ **Session Lifecycle Methods:**
145
+
146
+ ```python
147
+ # Get the session currently displayed in TUI (TUI+server mode only)
148
+ session_id = await client.get_foreground_session_id()
149
+
150
+ # Request TUI to display a specific session (TUI+server mode only)
151
+ await client.set_foreground_session_id("session-123")
152
+
153
+ # Subscribe to all lifecycle events
154
+ def on_lifecycle(event):
155
+ print(f"{event.type}: {event.sessionId}")
156
+
157
+ unsubscribe = client.on(on_lifecycle)
158
+
159
+ # Subscribe to specific event type
160
+ unsubscribe = client.on("session.foreground", lambda e: print(f"Foreground: {e.sessionId}"))
161
+
162
+ # Later, to stop receiving events:
163
+ unsubscribe()
164
+ ```
165
+
166
+ **Lifecycle Event Types:**
167
+ - `session.created` - A new session was created
168
+ - `session.deleted` - A session was deleted
169
+ - `session.updated` - A session was updated
170
+ - `session.foreground` - A session became the foreground session in TUI
171
+ - `session.background` - A session is no longer the foreground session
172
+
173
+ ### Tools
174
+
175
+ Define tools with automatic JSON schema generation using the `@define_tool` decorator and Pydantic models:
176
+
177
+ ```python
178
+ from pydantic import BaseModel, Field
179
+ from copilot import CopilotClient, define_tool
180
+
181
+ class LookupIssueParams(BaseModel):
182
+ id: str = Field(description="Issue identifier")
183
+
184
+ @define_tool(description="Fetch issue details from our tracker")
185
+ async def lookup_issue(params: LookupIssueParams) -> str:
186
+ issue = await fetch_issue(params.id)
187
+ return issue.summary
188
+
189
+ session = await client.create_session({
190
+ "model": "gpt-5",
191
+ "tools": [lookup_issue],
192
+ })
193
+ ```
194
+
195
+ > **Note:** When using `from __future__ import annotations`, define Pydantic models at module level (not inside functions).
196
+
197
+ **Low-level API (without Pydantic):**
198
+
199
+ For users who prefer manual schema definition:
200
+
201
+ ```python
202
+ from copilot import CopilotClient, Tool
203
+
204
+ async def lookup_issue(invocation):
205
+ issue_id = invocation["arguments"]["id"]
206
+ issue = await fetch_issue(issue_id)
207
+ return {
208
+ "textResultForLlm": issue.summary,
209
+ "resultType": "success",
210
+ "sessionLog": f"Fetched issue {issue_id}",
211
+ }
212
+
213
+ session = await client.create_session({
214
+ "model": "gpt-5",
215
+ "tools": [
216
+ Tool(
217
+ name="lookup_issue",
218
+ description="Fetch issue details from our tracker",
219
+ parameters={
220
+ "type": "object",
221
+ "properties": {
222
+ "id": {"type": "string", "description": "Issue identifier"},
223
+ },
224
+ "required": ["id"],
225
+ },
226
+ handler=lookup_issue,
227
+ )
228
+ ],
229
+ })
230
+ ```
231
+
232
+ The SDK automatically handles `tool.call`, executes your handler (sync or async), and responds with the final result when the tool completes.
233
+
234
+ ## Image Support
235
+
236
+ The SDK supports image attachments via the `attachments` parameter. You can attach images by providing their file path:
237
+
238
+ ```python
239
+ await session.send({
240
+ "prompt": "What's in this image?",
241
+ "attachments": [
242
+ {
243
+ "type": "file",
244
+ "path": "/path/to/image.jpg",
245
+ }
246
+ ]
247
+ })
248
+ ```
249
+
250
+ Supported image formats include JPG, PNG, GIF, and other common image types. The agent's `view` tool can also read images directly from the filesystem, so you can also ask questions like:
251
+
252
+ ```python
253
+ await session.send({"prompt": "What does the most recent jpg in this directory portray?"})
254
+ ```
255
+
256
+ ## Streaming
257
+
258
+ Enable streaming to receive assistant response chunks as they're generated:
259
+
260
+ ```python
261
+ import asyncio
262
+ from copilot import CopilotClient
263
+
264
+ async def main():
265
+ client = CopilotClient()
266
+ await client.start()
267
+
268
+ session = await client.create_session({
269
+ "model": "gpt-5",
270
+ "streaming": True
271
+ })
272
+
273
+ # Use asyncio.Event to wait for completion
274
+ done = asyncio.Event()
275
+
276
+ def on_event(event):
277
+ if event.type.value == "assistant.message_delta":
278
+ # Streaming message chunk - print incrementally
279
+ delta = event.data.delta_content or ""
280
+ print(delta, end="", flush=True)
281
+ elif event.type.value == "assistant.reasoning_delta":
282
+ # Streaming reasoning chunk (if model supports reasoning)
283
+ delta = event.data.delta_content or ""
284
+ print(delta, end="", flush=True)
285
+ elif event.type.value == "assistant.message":
286
+ # Final message - complete content
287
+ print("\n--- Final message ---")
288
+ print(event.data.content)
289
+ elif event.type.value == "assistant.reasoning":
290
+ # Final reasoning content (if model supports reasoning)
291
+ print("--- Reasoning ---")
292
+ print(event.data.content)
293
+ elif event.type.value == "session.idle":
294
+ # Session finished processing
295
+ done.set()
296
+
297
+ session.on(on_event)
298
+ await session.send({"prompt": "Tell me a short story"})
299
+ await done.wait() # Wait for streaming to complete
300
+
301
+ await session.destroy()
302
+ await client.stop()
303
+
304
+ asyncio.run(main())
305
+ ```
306
+
307
+ When `streaming=True`:
308
+
309
+ - `assistant.message_delta` events are sent with `delta_content` containing incremental text
310
+ - `assistant.reasoning_delta` events are sent with `delta_content` for reasoning/chain-of-thought (model-dependent)
311
+ - Accumulate `delta_content` values to build the full response progressively
312
+ - The final `assistant.message` and `assistant.reasoning` events contain the complete content
313
+
314
+ Note: `assistant.message` and `assistant.reasoning` (final events) are always sent regardless of streaming setting.
315
+
316
+ ## Infinite Sessions
317
+
318
+ By default, sessions use **infinite sessions** which automatically manage context window limits through background compaction and persist state to a workspace directory.
319
+
320
+ ```python
321
+ # Default: infinite sessions enabled with default thresholds
322
+ session = await client.create_session({"model": "gpt-5"})
323
+
324
+ # Access the workspace path for checkpoints and files
325
+ print(session.workspace_path)
326
+ # => ~/.copilot/session-state/{session_id}/
327
+
328
+ # Custom thresholds
329
+ session = await client.create_session({
330
+ "model": "gpt-5",
331
+ "infinite_sessions": {
332
+ "enabled": True,
333
+ "background_compaction_threshold": 0.80, # Start compacting at 80% context usage
334
+ "buffer_exhaustion_threshold": 0.95, # Block at 95% until compaction completes
335
+ },
336
+ })
337
+
338
+ # Disable infinite sessions
339
+ session = await client.create_session({
340
+ "model": "gpt-5",
341
+ "infinite_sessions": {"enabled": False},
342
+ })
343
+ ```
344
+
345
+ When enabled, sessions emit compaction events:
346
+
347
+ - `session.compaction_start` - Background compaction started
348
+ - `session.compaction_complete` - Compaction finished (includes token counts)
349
+
350
+ ## Custom Providers
351
+
352
+ The SDK supports custom OpenAI-compatible API providers (BYOK - Bring Your Own Key), including local providers like Ollama. When using a custom provider, you must specify the `model` explicitly.
353
+
354
+ **ProviderConfig fields:**
355
+
356
+ - `type` (str): Provider type - `"openai"`, `"azure"`, or `"anthropic"` (default: `"openai"`)
357
+ - `base_url` (str): API endpoint URL (required)
358
+ - `api_key` (str): API key (optional for local providers like Ollama)
359
+ - `bearer_token` (str): Bearer token for authentication (takes precedence over `api_key`)
360
+ - `wire_api` (str): API format for OpenAI/Azure - `"completions"` or `"responses"` (default: `"completions"`)
361
+ - `azure` (dict): Azure-specific options with `api_version` (default: `"2024-10-21"`)
362
+
363
+ **Example with Ollama:**
364
+
365
+ ```python
366
+ session = await client.create_session({
367
+ "model": "deepseek-coder-v2:16b", # Required when using custom provider
368
+ "provider": {
369
+ "type": "openai",
370
+ "base_url": "http://localhost:11434/v1", # Ollama endpoint
371
+ # api_key not required for Ollama
372
+ },
373
+ })
374
+
375
+ await session.send({"prompt": "Hello!"})
376
+ ```
377
+
378
+ **Example with custom OpenAI-compatible API:**
379
+
380
+ ```python
381
+ import os
382
+
383
+ session = await client.create_session({
384
+ "model": "gpt-4",
385
+ "provider": {
386
+ "type": "openai",
387
+ "base_url": "https://my-api.example.com/v1",
388
+ "api_key": os.environ["MY_API_KEY"],
389
+ },
390
+ })
391
+ ```
392
+
393
+ **Example with Azure OpenAI:**
394
+
395
+ ```python
396
+ import os
397
+
398
+ session = await client.create_session({
399
+ "model": "gpt-4",
400
+ "provider": {
401
+ "type": "azure", # Must be "azure" for Azure endpoints, NOT "openai"
402
+ "base_url": "https://my-resource.openai.azure.com", # Just the host, no path
403
+ "api_key": os.environ["AZURE_OPENAI_KEY"],
404
+ "azure": {
405
+ "api_version": "2024-10-21",
406
+ },
407
+ },
408
+ })
409
+ ```
410
+
411
+ > **Important notes:**
412
+ > - When using a custom provider, the `model` parameter is **required**. The SDK will throw an error if no model is specified.
413
+ > - For Azure OpenAI endpoints (`*.openai.azure.com`), you **must** use `type: "azure"`, not `type: "openai"`.
414
+ > - The `base_url` should be just the host (e.g., `https://my-resource.openai.azure.com`). Do **not** include `/openai/v1` in the URL - the SDK handles path construction automatically.
415
+
416
+ ## User Input Requests
417
+
418
+ Enable the agent to ask questions to the user using the `ask_user` tool by providing an `on_user_input_request` handler:
419
+
420
+ ```python
421
+ async def handle_user_input(request, invocation):
422
+ # request["question"] - The question to ask
423
+ # request.get("choices") - Optional list of choices for multiple choice
424
+ # request.get("allowFreeform", True) - Whether freeform input is allowed
425
+
426
+ print(f"Agent asks: {request['question']}")
427
+ if request.get("choices"):
428
+ print(f"Choices: {', '.join(request['choices'])}")
429
+
430
+ # Return the user's response
431
+ return {
432
+ "answer": "User's answer here",
433
+ "wasFreeform": True, # Whether the answer was freeform (not from choices)
434
+ }
435
+
436
+ session = await client.create_session({
437
+ "model": "gpt-5",
438
+ "on_user_input_request": handle_user_input,
439
+ })
440
+ ```
441
+
442
+ ## Session Hooks
443
+
444
+ Hook into session lifecycle events by providing handlers in the `hooks` configuration:
445
+
446
+ ```python
447
+ async def on_pre_tool_use(input, invocation):
448
+ print(f"About to run tool: {input['toolName']}")
449
+ # Return permission decision and optionally modify args
450
+ return {
451
+ "permissionDecision": "allow", # "allow", "deny", or "ask"
452
+ "modifiedArgs": input.get("toolArgs"), # Optionally modify tool arguments
453
+ "additionalContext": "Extra context for the model",
454
+ }
455
+
456
+ async def on_post_tool_use(input, invocation):
457
+ print(f"Tool {input['toolName']} completed")
458
+ return {
459
+ "additionalContext": "Post-execution notes",
460
+ }
461
+
462
+ async def on_user_prompt_submitted(input, invocation):
463
+ print(f"User prompt: {input['prompt']}")
464
+ return {
465
+ "modifiedPrompt": input["prompt"], # Optionally modify the prompt
466
+ }
467
+
468
+ async def on_session_start(input, invocation):
469
+ print(f"Session started from: {input['source']}") # "startup", "resume", "new"
470
+ return {
471
+ "additionalContext": "Session initialization context",
472
+ }
473
+
474
+ async def on_session_end(input, invocation):
475
+ print(f"Session ended: {input['reason']}")
476
+
477
+ async def on_error_occurred(input, invocation):
478
+ print(f"Error in {input['errorContext']}: {input['error']}")
479
+ return {
480
+ "errorHandling": "retry", # "retry", "skip", or "abort"
481
+ }
482
+
483
+ session = await client.create_session({
484
+ "model": "gpt-5",
485
+ "hooks": {
486
+ "on_pre_tool_use": on_pre_tool_use,
487
+ "on_post_tool_use": on_post_tool_use,
488
+ "on_user_prompt_submitted": on_user_prompt_submitted,
489
+ "on_session_start": on_session_start,
490
+ "on_session_end": on_session_end,
491
+ "on_error_occurred": on_error_occurred,
492
+ },
493
+ })
494
+ ```
495
+
496
+ **Available hooks:**
497
+
498
+ - `on_pre_tool_use` - Intercept tool calls before execution. Can allow/deny or modify arguments.
499
+ - `on_post_tool_use` - Process tool results after execution. Can modify results or add context.
500
+ - `on_user_prompt_submitted` - Intercept user prompts. Can modify the prompt before processing.
501
+ - `on_session_start` - Run logic when a session starts or resumes.
502
+ - `on_session_end` - Cleanup or logging when session ends.
503
+ - `on_error_occurred` - Handle errors with retry/skip/abort strategies.
504
+
505
+ ## Requirements
506
+
507
+ - Python 3.9+
508
+ - GitHub Copilot CLI installed and accessible