codex-sdk-python 0.98.0__tar.gz → 0.101.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/PKG-INFO +6 -5
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/README.md +4 -3
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/pyproject.toml +2 -5
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/__init__.py +1 -11
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/app_server.py +228 -16
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/integrations/pydantic_ai_model.py +122 -81
- codex_sdk_python-0.101.0/src/codex_sdk/tool_envelope.py +524 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/abort.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/codex.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/config_overrides.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/events.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/exceptions.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/exec.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/hooks.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/integrations/__init__.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/integrations/pydantic_ai.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/items.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/options.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/telemetry.py +0 -0
- {codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/thread.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: codex-sdk-python
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.101.0
|
|
4
4
|
Summary: Python SDK for the Codex CLI agent with async threads, streaming events, and structured outputs
|
|
5
5
|
Keywords: codex,sdk,python,api,cli,agent,async,streaming
|
|
6
6
|
Author: Vectorfy Co
|
|
@@ -18,7 +18,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
18
18
|
Classifier: Programming Language :: Python :: 3.12
|
|
19
19
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
20
20
|
Classifier: Topic :: Software Development :: Build Tools
|
|
21
|
-
Requires-Dist: logfire
|
|
21
|
+
Requires-Dist: logfire ; extra == 'logfire'
|
|
22
22
|
Requires-Dist: pydantic>=2 ; extra == 'pydantic'
|
|
23
23
|
Requires-Dist: pydantic-ai ; python_full_version >= '3.10' and extra == 'pydantic-ai'
|
|
24
24
|
Maintainer: Vectorfy Co
|
|
@@ -44,7 +44,7 @@ Embed the Codex agent in Python workflows. This SDK wraps the bundled `codex` CL
|
|
|
44
44
|
<td><strong>Lifecycle</strong></td>
|
|
45
45
|
<td>
|
|
46
46
|
<a href="#ci-cd"><img src="https://img.shields.io/badge/CI%2FCD-Active-16a34a?style=flat&logo=githubactions&logoColor=white" alt="CI/CD badge" /></a>
|
|
47
|
-
<img src="https://img.shields.io/badge/Release-0.
|
|
47
|
+
<img src="https://img.shields.io/badge/Release-0.101.0-6b7280?style=flat&logo=pypi&logoColor=white" alt="Release 0.101.0 badge" />
|
|
48
48
|
<a href="#license"><img src="https://img.shields.io/badge/License-Apache--2.0-0f766e?style=flat&logo=apache&logoColor=white" alt="License badge" /></a>
|
|
49
49
|
</td>
|
|
50
50
|
</tr>
|
|
@@ -410,8 +410,9 @@ Supported target triples:
|
|
|
410
410
|
- macOS: `x86_64-apple-darwin`, `aarch64-apple-darwin`
|
|
411
411
|
- Windows: `x86_64-pc-windows-msvc`, `aarch64-pc-windows-msvc`
|
|
412
412
|
|
|
413
|
-
If you are working from source and the vendor directory is missing, run
|
|
414
|
-
|
|
413
|
+
If you are working from source and the vendor directory is missing, run
|
|
414
|
+
`python scripts/setup_binary.py` to fetch and assemble the platform `@openai/codex`
|
|
415
|
+
artifacts into `src/codex_sdk/vendor/`.
|
|
415
416
|
|
|
416
417
|
<a id="auth"></a>
|
|
417
418
|
## 
|
|
@@ -8,7 +8,7 @@ Embed the Codex agent in Python workflows. This SDK wraps the bundled `codex` CL
|
|
|
8
8
|
<td><strong>Lifecycle</strong></td>
|
|
9
9
|
<td>
|
|
10
10
|
<a href="#ci-cd"><img src="https://img.shields.io/badge/CI%2FCD-Active-16a34a?style=flat&logo=githubactions&logoColor=white" alt="CI/CD badge" /></a>
|
|
11
|
-
<img src="https://img.shields.io/badge/Release-0.
|
|
11
|
+
<img src="https://img.shields.io/badge/Release-0.101.0-6b7280?style=flat&logo=pypi&logoColor=white" alt="Release 0.101.0 badge" />
|
|
12
12
|
<a href="#license"><img src="https://img.shields.io/badge/License-Apache--2.0-0f766e?style=flat&logo=apache&logoColor=white" alt="License badge" /></a>
|
|
13
13
|
</td>
|
|
14
14
|
</tr>
|
|
@@ -374,8 +374,9 @@ Supported target triples:
|
|
|
374
374
|
- macOS: `x86_64-apple-darwin`, `aarch64-apple-darwin`
|
|
375
375
|
- Windows: `x86_64-pc-windows-msvc`, `aarch64-pc-windows-msvc`
|
|
376
376
|
|
|
377
|
-
If you are working from source and the vendor directory is missing, run
|
|
378
|
-
|
|
377
|
+
If you are working from source and the vendor directory is missing, run
|
|
378
|
+
`python scripts/setup_binary.py` to fetch and assemble the platform `@openai/codex`
|
|
379
|
+
artifacts into `src/codex_sdk/vendor/`.
|
|
379
380
|
|
|
380
381
|
<a id="auth"></a>
|
|
381
382
|
## 
|
|
@@ -4,7 +4,7 @@ build-backend = "uv_build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "codex-sdk-python"
|
|
7
|
-
version = "0.
|
|
7
|
+
version = "0.101.0"
|
|
8
8
|
description = "Python SDK for the Codex CLI agent with async threads, streaming events, and structured outputs"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = {text = "Apache-2.0"}
|
|
@@ -40,10 +40,7 @@ pydantic-ai = [
|
|
|
40
40
|
"pydantic-ai; python_version >= '3.10'",
|
|
41
41
|
]
|
|
42
42
|
logfire = [
|
|
43
|
-
|
|
44
|
-
# `logfire-api` distribution. Avoid the unrelated legacy `logfire` package
|
|
45
|
-
# on PyPI which can shadow it and break imports.
|
|
46
|
-
"logfire-api>=4",
|
|
43
|
+
"logfire",
|
|
47
44
|
]
|
|
48
45
|
|
|
49
46
|
[dependency-groups]
|
|
@@ -41,11 +41,6 @@ from .exceptions import (
|
|
|
41
41
|
from .hooks import ThreadHooks
|
|
42
42
|
from .items import (
|
|
43
43
|
AgentMessageItem,
|
|
44
|
-
CollabAgentState,
|
|
45
|
-
CollabAgentStatus,
|
|
46
|
-
CollabTool,
|
|
47
|
-
CollabToolCallItem,
|
|
48
|
-
CollabToolCallStatus,
|
|
49
44
|
CommandExecutionItem,
|
|
50
45
|
CommandExecutionStatus,
|
|
51
46
|
ErrorItem,
|
|
@@ -82,7 +77,7 @@ from .thread import (
|
|
|
82
77
|
Turn,
|
|
83
78
|
)
|
|
84
79
|
|
|
85
|
-
__version__ = "0.
|
|
80
|
+
__version__ = "0.101.0"
|
|
86
81
|
|
|
87
82
|
__all__ = [
|
|
88
83
|
"AbortController",
|
|
@@ -122,7 +117,6 @@ __all__ = [
|
|
|
122
117
|
"CommandExecutionItem",
|
|
123
118
|
"FileChangeItem",
|
|
124
119
|
"McpToolCallItem",
|
|
125
|
-
"CollabToolCallItem",
|
|
126
120
|
"McpToolCallItemResult",
|
|
127
121
|
"McpToolCallItemError",
|
|
128
122
|
"WebSearchItem",
|
|
@@ -132,10 +126,6 @@ __all__ = [
|
|
|
132
126
|
"PatchChangeKind",
|
|
133
127
|
"PatchApplyStatus",
|
|
134
128
|
"McpToolCallStatus",
|
|
135
|
-
"CollabToolCallStatus",
|
|
136
|
-
"CollabTool",
|
|
137
|
-
"CollabAgentStatus",
|
|
138
|
-
"CollabAgentState",
|
|
139
129
|
"TodoItem",
|
|
140
130
|
"CodexOptions",
|
|
141
131
|
"ThreadOptions",
|
|
@@ -636,14 +636,41 @@ class AppServerClient:
|
|
|
636
636
|
"""
|
|
637
637
|
return await self._request_dict("thread/unarchive", {"threadId": thread_id})
|
|
638
638
|
|
|
639
|
-
async def thread_compact_start(
|
|
639
|
+
async def thread_compact_start(
|
|
640
|
+
self, thread_id: str, *, instructions: Optional[str] = None
|
|
641
|
+
) -> Dict[str, Any]:
|
|
640
642
|
"""
|
|
641
643
|
Starts a compaction operation for the specified thread on the app-server.
|
|
642
644
|
|
|
645
|
+
Args:
|
|
646
|
+
thread_id: Identifier of the thread to compact.
|
|
647
|
+
instructions: Optional server hint for compaction behavior.
|
|
648
|
+
|
|
643
649
|
Returns:
|
|
644
650
|
dict: The app-server's result payload for the compaction start request.
|
|
645
651
|
"""
|
|
646
|
-
|
|
652
|
+
payload: Dict[str, Any] = {"thread_id": thread_id}
|
|
653
|
+
if instructions is not None:
|
|
654
|
+
payload["instructions"] = instructions
|
|
655
|
+
return await self._request_dict("thread/compact/start", _coerce_keys(payload))
|
|
656
|
+
|
|
657
|
+
async def thread_background_terminals_clean(
|
|
658
|
+
self, thread_id: str, *, terminal_ids: Sequence[str]
|
|
659
|
+
) -> Dict[str, Any]:
|
|
660
|
+
"""
|
|
661
|
+
Clean up background terminal sessions for a thread.
|
|
662
|
+
|
|
663
|
+
Args:
|
|
664
|
+
thread_id: Identifier of the thread.
|
|
665
|
+
terminal_ids: Terminal ids to clean.
|
|
666
|
+
|
|
667
|
+
Returns:
|
|
668
|
+
App-server response payload.
|
|
669
|
+
"""
|
|
670
|
+
payload = {"thread_id": thread_id, "terminal_ids": list(terminal_ids)}
|
|
671
|
+
return await self._request_dict(
|
|
672
|
+
"thread/backgroundTerminals/clean", _coerce_keys(payload)
|
|
673
|
+
)
|
|
647
674
|
|
|
648
675
|
async def thread_rollback(
|
|
649
676
|
self, thread_id: str, *, num_turns: int
|
|
@@ -741,43 +768,74 @@ class AppServerClient:
|
|
|
741
768
|
payload["cwds"] = [str(path) for path in cwds]
|
|
742
769
|
return await self._request_dict("skills/list", _coerce_keys(payload))
|
|
743
770
|
|
|
744
|
-
async def skills_remote_read(
|
|
771
|
+
async def skills_remote_read(
|
|
772
|
+
self, *, params: Optional[Mapping[str, Any]] = None
|
|
773
|
+
) -> Dict[str, Any]:
|
|
745
774
|
"""
|
|
746
775
|
Read remote skills metadata from the app server.
|
|
747
776
|
|
|
777
|
+
Args:
|
|
778
|
+
params: Optional request payload.
|
|
779
|
+
|
|
748
780
|
Returns:
|
|
749
781
|
result (Dict[str, Any]): The app-server response payload for the `skills/remote/read` request.
|
|
750
782
|
"""
|
|
751
|
-
|
|
783
|
+
payload = _coerce_keys(dict(params)) if params is not None else {}
|
|
784
|
+
return await self._request_dict("skills/remote/read", payload)
|
|
752
785
|
|
|
753
786
|
async def skills_remote_write(
|
|
754
|
-
self,
|
|
787
|
+
self,
|
|
788
|
+
*,
|
|
789
|
+
hazelnut_id: Optional[str] = None,
|
|
790
|
+
is_preload: Optional[bool] = None,
|
|
791
|
+
params: Optional[Mapping[str, Any]] = None,
|
|
755
792
|
) -> Dict[str, Any]:
|
|
756
793
|
"""
|
|
757
|
-
Start a remote skill write operation
|
|
794
|
+
Start a remote skill write operation.
|
|
758
795
|
|
|
759
796
|
Args:
|
|
760
|
-
hazelnut_id:
|
|
761
|
-
is_preload:
|
|
797
|
+
hazelnut_id: Optional Hazelnut identifier.
|
|
798
|
+
is_preload: Optional preload flag.
|
|
799
|
+
params: Optional raw request payload.
|
|
762
800
|
|
|
763
801
|
Returns:
|
|
764
802
|
Result returned by the app-server for the "skills/remote/write" request.
|
|
765
803
|
"""
|
|
766
|
-
payload
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
804
|
+
payload: Dict[str, Any] = {}
|
|
805
|
+
if params is not None:
|
|
806
|
+
payload.update(_coerce_keys(dict(params)))
|
|
807
|
+
if hazelnut_id is not None:
|
|
808
|
+
payload["hazelnut_id"] = hazelnut_id
|
|
809
|
+
if is_preload is not None:
|
|
810
|
+
payload["is_preload"] = is_preload
|
|
811
|
+
return await self._request_dict("skills/remote/write", payload)
|
|
812
|
+
|
|
813
|
+
async def skills_config_write(
|
|
814
|
+
self,
|
|
815
|
+
*,
|
|
816
|
+
path: Optional[str] = None,
|
|
817
|
+
enabled: Optional[bool] = None,
|
|
818
|
+
params: Optional["SkillsConfigWriteRequest"] = None,
|
|
819
|
+
) -> Dict[str, Any]:
|
|
770
820
|
"""
|
|
771
|
-
Set
|
|
821
|
+
Set skill configuration state.
|
|
772
822
|
|
|
773
823
|
Args:
|
|
774
|
-
path:
|
|
775
|
-
enabled:
|
|
824
|
+
path: Optional configuration path identifying the skill.
|
|
825
|
+
enabled: Optional enabled state for the skill.
|
|
826
|
+
params: Optional typed request payload for evolving protocol fields.
|
|
776
827
|
|
|
777
828
|
Returns:
|
|
778
829
|
The app-server response as a dictionary.
|
|
779
830
|
"""
|
|
780
|
-
|
|
831
|
+
# TODO(app-server-schema): tighten request shape after protocol stabilizes.
|
|
832
|
+
payload: Dict[str, Any] = {}
|
|
833
|
+
if params is not None:
|
|
834
|
+
payload.update(_coerce_keys(dict(params)))
|
|
835
|
+
if path is not None:
|
|
836
|
+
payload["path"] = path
|
|
837
|
+
if enabled is not None:
|
|
838
|
+
payload["enabled"] = enabled
|
|
781
839
|
return await self._request_dict("skills/config/write", payload)
|
|
782
840
|
|
|
783
841
|
async def turn_start(
|
|
@@ -848,6 +906,23 @@ class AppServerClient:
|
|
|
848
906
|
"turn/interrupt", {"threadId": thread_id, "turnId": turn_id}
|
|
849
907
|
)
|
|
850
908
|
|
|
909
|
+
async def turn_steer(
|
|
910
|
+
self, thread_id: str, turn_id: str, *, prompt: str
|
|
911
|
+
) -> Dict[str, Any]:
|
|
912
|
+
"""
|
|
913
|
+
Send steering guidance to an in-progress turn.
|
|
914
|
+
|
|
915
|
+
Args:
|
|
916
|
+
thread_id: Identifier of the thread.
|
|
917
|
+
turn_id: Identifier of the turn.
|
|
918
|
+
prompt: Steering prompt text.
|
|
919
|
+
|
|
920
|
+
Returns:
|
|
921
|
+
App-server response payload.
|
|
922
|
+
"""
|
|
923
|
+
payload = {"thread_id": thread_id, "turn_id": turn_id, "prompt": prompt}
|
|
924
|
+
return await self._request_dict("turn/steer", _coerce_keys(payload))
|
|
925
|
+
|
|
851
926
|
async def model_list(
|
|
852
927
|
self, *, cursor: Optional[str] = None, limit: Optional[int] = None
|
|
853
928
|
) -> Dict[str, Any]:
|
|
@@ -874,6 +949,26 @@ class AppServerClient:
|
|
|
874
949
|
"""List supported collaboration modes from the app-server."""
|
|
875
950
|
return await self._request_dict("collaborationMode/list", {})
|
|
876
951
|
|
|
952
|
+
async def experimental_feature_list(
|
|
953
|
+
self, *, cursor: Optional[str] = None, limit: Optional[int] = None
|
|
954
|
+
) -> Dict[str, Any]:
|
|
955
|
+
"""
|
|
956
|
+
List experimental features available from the app-server.
|
|
957
|
+
|
|
958
|
+
Args:
|
|
959
|
+
cursor: Optional pagination cursor.
|
|
960
|
+
limit: Optional page size.
|
|
961
|
+
|
|
962
|
+
Returns:
|
|
963
|
+
App-server response payload.
|
|
964
|
+
"""
|
|
965
|
+
params: Dict[str, Any] = {}
|
|
966
|
+
if cursor is not None:
|
|
967
|
+
params["cursor"] = cursor
|
|
968
|
+
if limit is not None:
|
|
969
|
+
params["limit"] = limit
|
|
970
|
+
return await self._request_dict("experimentalFeature/list", params or None)
|
|
971
|
+
|
|
877
972
|
async def command_exec(
|
|
878
973
|
self,
|
|
879
974
|
*,
|
|
@@ -938,6 +1033,103 @@ class AppServerClient:
|
|
|
938
1033
|
"account/read", {"refreshToken": refresh_token} if refresh_token else None
|
|
939
1034
|
)
|
|
940
1035
|
|
|
1036
|
+
async def account_chatgpt_auth_tokens_refresh(
|
|
1037
|
+
self, *, params: Mapping[str, Any]
|
|
1038
|
+
) -> Dict[str, Any]:
|
|
1039
|
+
"""
|
|
1040
|
+
Refresh ChatGPT auth tokens via the app-server.
|
|
1041
|
+
|
|
1042
|
+
Args:
|
|
1043
|
+
params: Refresh payload (snake_case or camelCase keys are accepted).
|
|
1044
|
+
|
|
1045
|
+
Returns:
|
|
1046
|
+
App-server response payload.
|
|
1047
|
+
"""
|
|
1048
|
+
return await self._request_dict(
|
|
1049
|
+
"account/chatgptAuthTokens/refresh", _coerce_keys(dict(params))
|
|
1050
|
+
)
|
|
1051
|
+
|
|
1052
|
+
async def item_tool_call(self, *, params: "ItemToolCallRequest") -> Dict[str, Any]:
|
|
1053
|
+
"""
|
|
1054
|
+
Send an item tool-call payload.
|
|
1055
|
+
|
|
1056
|
+
Args:
|
|
1057
|
+
params: Typed request payload for `item/tool/call`.
|
|
1058
|
+
|
|
1059
|
+
Returns:
|
|
1060
|
+
App-server response payload.
|
|
1061
|
+
"""
|
|
1062
|
+
# TODO(app-server-schema): tighten request shape after protocol stabilizes.
|
|
1063
|
+
return await self._request_dict("item/tool/call", _coerce_keys(dict(params)))
|
|
1064
|
+
|
|
1065
|
+
async def item_tool_request_user_input(
|
|
1066
|
+
self, *, params: Mapping[str, Any]
|
|
1067
|
+
) -> Dict[str, Any]:
|
|
1068
|
+
"""
|
|
1069
|
+
Send an item request-user-input payload.
|
|
1070
|
+
|
|
1071
|
+
Args:
|
|
1072
|
+
params: Request payload for `item/tool/requestUserInput`.
|
|
1073
|
+
|
|
1074
|
+
Returns:
|
|
1075
|
+
App-server response payload.
|
|
1076
|
+
"""
|
|
1077
|
+
return await self._request_dict(
|
|
1078
|
+
"item/tool/requestUserInput", _coerce_keys(dict(params))
|
|
1079
|
+
)
|
|
1080
|
+
|
|
1081
|
+
async def item_command_execution_request_approval(
|
|
1082
|
+
self, *, params: Mapping[str, Any]
|
|
1083
|
+
) -> Dict[str, Any]:
|
|
1084
|
+
"""
|
|
1085
|
+
Send an item command-execution approval payload.
|
|
1086
|
+
|
|
1087
|
+
Args:
|
|
1088
|
+
params: Request payload for `item/commandExecution/requestApproval`.
|
|
1089
|
+
|
|
1090
|
+
Returns:
|
|
1091
|
+
App-server response payload.
|
|
1092
|
+
"""
|
|
1093
|
+
return await self._request_dict(
|
|
1094
|
+
"item/commandExecution/requestApproval", _coerce_keys(dict(params))
|
|
1095
|
+
)
|
|
1096
|
+
|
|
1097
|
+
async def item_file_change_request_approval(
|
|
1098
|
+
self, *, params: Mapping[str, Any]
|
|
1099
|
+
) -> Dict[str, Any]:
|
|
1100
|
+
"""
|
|
1101
|
+
Send an item file-change approval payload.
|
|
1102
|
+
|
|
1103
|
+
Args:
|
|
1104
|
+
params: Request payload for `item/fileChange/requestApproval`.
|
|
1105
|
+
|
|
1106
|
+
Returns:
|
|
1107
|
+
App-server response payload.
|
|
1108
|
+
"""
|
|
1109
|
+
return await self._request_dict(
|
|
1110
|
+
"item/fileChange/requestApproval", _coerce_keys(dict(params))
|
|
1111
|
+
)
|
|
1112
|
+
|
|
1113
|
+
async def mock_experimental_method(
|
|
1114
|
+
self, *, params: Optional[Mapping[str, Any]] = None
|
|
1115
|
+
) -> Dict[str, Any]:
|
|
1116
|
+
"""
|
|
1117
|
+
Call a mock experimental app-server endpoint.
|
|
1118
|
+
|
|
1119
|
+
Args:
|
|
1120
|
+
params: Optional request payload.
|
|
1121
|
+
|
|
1122
|
+
Returns:
|
|
1123
|
+
App-server response payload.
|
|
1124
|
+
"""
|
|
1125
|
+
if not self._options.experimental_api_enabled:
|
|
1126
|
+
raise CodexError(
|
|
1127
|
+
"`mock/experimentalMethod` requires "
|
|
1128
|
+
"AppServerOptions(experimental_api_enabled=True)."
|
|
1129
|
+
)
|
|
1130
|
+
payload = _coerce_keys(dict(params)) if params is not None else {}
|
|
1131
|
+
return await self._request_dict("mock/experimentalMethod", payload)
|
|
1132
|
+
|
|
941
1133
|
async def feedback_upload(
|
|
942
1134
|
self,
|
|
943
1135
|
*,
|
|
@@ -1109,6 +1301,26 @@ class AppServerSkillInput(TypedDict):
|
|
|
1109
1301
|
path: str
|
|
1110
1302
|
|
|
1111
1303
|
|
|
1304
|
+
class SkillsConfigWriteRequest(TypedDict, total=False):
|
|
1305
|
+
"""Typed payload for `skills/config/write` requests."""
|
|
1306
|
+
|
|
1307
|
+
path: str
|
|
1308
|
+
enabled: bool
|
|
1309
|
+
mode: str
|
|
1310
|
+
|
|
1311
|
+
|
|
1312
|
+
class ItemToolCallRequest(TypedDict, total=False):
|
|
1313
|
+
"""Typed payload for `item/tool/call` requests."""
|
|
1314
|
+
|
|
1315
|
+
name: str
|
|
1316
|
+
tool_name: str
|
|
1317
|
+
toolName: str
|
|
1318
|
+
tool_call_id: str
|
|
1319
|
+
toolCallId: str
|
|
1320
|
+
arguments: Mapping[str, Any]
|
|
1321
|
+
args: Mapping[str, Any]
|
|
1322
|
+
|
|
1323
|
+
|
|
1112
1324
|
AppServerUserInput = Union[
|
|
1113
1325
|
AppServerTextInput,
|
|
1114
1326
|
AppServerImageInput,
|
{codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/integrations/pydantic_ai_model.py
RENAMED
|
@@ -13,9 +13,10 @@ validation), while Codex behaves like a "backend model" that emits either:
|
|
|
13
13
|
from __future__ import annotations
|
|
14
14
|
|
|
15
15
|
import json
|
|
16
|
+
import logging
|
|
16
17
|
from base64 import b64encode
|
|
17
18
|
from contextlib import asynccontextmanager
|
|
18
|
-
from dataclasses import
|
|
19
|
+
from dataclasses import asdict, dataclass, is_dataclass
|
|
19
20
|
from datetime import datetime, timezone
|
|
20
21
|
from typing import Any, AsyncIterator, Dict, List, Optional, Sequence
|
|
21
22
|
|
|
@@ -37,7 +38,7 @@ try:
|
|
|
37
38
|
from pydantic_ai.profiles import ModelProfile, ModelProfileSpec
|
|
38
39
|
from pydantic_ai.settings import ModelSettings
|
|
39
40
|
from pydantic_ai.tools import ToolDefinition
|
|
40
|
-
from pydantic_ai.usage import
|
|
41
|
+
from pydantic_ai.usage import RequestUsage
|
|
41
42
|
except ImportError as exc: # pragma: no cover
|
|
42
43
|
raise ImportError(
|
|
43
44
|
"pydantic-ai is required for codex_sdk.integrations.pydantic_ai_model; "
|
|
@@ -45,6 +46,9 @@ except ImportError as exc: # pragma: no cover
|
|
|
45
46
|
) from exc
|
|
46
47
|
|
|
47
48
|
|
|
49
|
+
logger = logging.getLogger(__name__)
|
|
50
|
+
|
|
51
|
+
|
|
48
52
|
@dataclass(frozen=True)
|
|
49
53
|
class _ToolCallEnvelope:
|
|
50
54
|
"""Parsed tool-call envelope returned by Codex `--output-schema` turns."""
|
|
@@ -127,59 +131,49 @@ def _render_tool_definitions(
|
|
|
127
131
|
The rendered, trimmed multi-line string describing the tools.
|
|
128
132
|
"""
|
|
129
133
|
lines: List[str] = []
|
|
130
|
-
|
|
131
|
-
lines.append("Function tools:")
|
|
132
|
-
for tool in function_tools:
|
|
133
|
-
lines.append(f"- {tool.name}")
|
|
134
|
-
if tool.description:
|
|
135
|
-
lines.append(f" description: {tool.description}")
|
|
136
|
-
lines.append(f" kind: {tool.kind}")
|
|
137
|
-
lines.append(
|
|
138
|
-
f" parameters_json_schema: {_json_dumps(tool.parameters_json_schema)}"
|
|
139
|
-
)
|
|
140
|
-
if tool.outer_typed_dict_key:
|
|
141
|
-
lines.append(f" outer_typed_dict_key: {tool.outer_typed_dict_key}")
|
|
142
|
-
if tool.strict is not None:
|
|
143
|
-
lines.append(f" strict: {str(tool.strict).lower()}")
|
|
144
|
-
if getattr(tool, "sequential", False):
|
|
145
|
-
lines.append(" sequential: true")
|
|
146
|
-
metadata = getattr(tool, "metadata", None)
|
|
147
|
-
if metadata is not None:
|
|
148
|
-
lines.append(f" metadata: {_json_dumps(metadata)}")
|
|
149
|
-
timeout = getattr(tool, "timeout", None)
|
|
150
|
-
if timeout is not None:
|
|
151
|
-
lines.append(f" timeout: {timeout}")
|
|
152
|
-
|
|
134
|
+
lines.extend(_render_tool_section("Function tools:", function_tools))
|
|
153
135
|
if output_tools:
|
|
154
136
|
if lines:
|
|
155
137
|
lines.append("")
|
|
156
|
-
lines.
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
lines.append(f"- {tool.name}")
|
|
161
|
-
if tool.description:
|
|
162
|
-
lines.append(f" description: {tool.description}")
|
|
163
|
-
lines.append(f" kind: {tool.kind}")
|
|
164
|
-
lines.append(
|
|
165
|
-
f" parameters_json_schema: {_json_dumps(tool.parameters_json_schema)}"
|
|
138
|
+
lines.extend(
|
|
139
|
+
_render_tool_section(
|
|
140
|
+
"Output tools (use ONE of these to finish when text is not allowed):",
|
|
141
|
+
output_tools,
|
|
166
142
|
)
|
|
167
|
-
|
|
168
|
-
lines.append(f" outer_typed_dict_key: {tool.outer_typed_dict_key}")
|
|
169
|
-
if tool.strict is not None:
|
|
170
|
-
lines.append(f" strict: {str(tool.strict).lower()}")
|
|
171
|
-
if getattr(tool, "sequential", False):
|
|
172
|
-
lines.append(" sequential: true")
|
|
173
|
-
metadata = getattr(tool, "metadata", None)
|
|
174
|
-
if metadata is not None:
|
|
175
|
-
lines.append(f" metadata: {_json_dumps(metadata)}")
|
|
176
|
-
timeout = getattr(tool, "timeout", None)
|
|
177
|
-
if timeout is not None:
|
|
178
|
-
lines.append(f" timeout: {timeout}")
|
|
143
|
+
)
|
|
179
144
|
|
|
180
145
|
return "\n".join(lines).strip()
|
|
181
146
|
|
|
182
147
|
|
|
148
|
+
def _render_tool_section(title: str, tools: Sequence[ToolDefinition]) -> List[str]:
|
|
149
|
+
"""Render one tool section to prompt lines."""
|
|
150
|
+
if not tools:
|
|
151
|
+
return []
|
|
152
|
+
|
|
153
|
+
lines: List[str] = [title]
|
|
154
|
+
for tool in tools:
|
|
155
|
+
lines.append(f"- {tool.name}")
|
|
156
|
+
if tool.description:
|
|
157
|
+
lines.append(f" description: {tool.description}")
|
|
158
|
+
lines.append(f" kind: {tool.kind}")
|
|
159
|
+
lines.append(
|
|
160
|
+
f" parameters_json_schema: {_json_dumps(tool.parameters_json_schema)}"
|
|
161
|
+
)
|
|
162
|
+
if tool.outer_typed_dict_key:
|
|
163
|
+
lines.append(f" outer_typed_dict_key: {tool.outer_typed_dict_key}")
|
|
164
|
+
if tool.strict is not None:
|
|
165
|
+
lines.append(f" strict: {str(tool.strict).lower()}")
|
|
166
|
+
if getattr(tool, "sequential", False):
|
|
167
|
+
lines.append(" sequential: true")
|
|
168
|
+
metadata = getattr(tool, "metadata", None)
|
|
169
|
+
if metadata is not None:
|
|
170
|
+
lines.append(f" metadata: {_json_dumps(metadata)}")
|
|
171
|
+
timeout = getattr(tool, "timeout", None)
|
|
172
|
+
if timeout is not None:
|
|
173
|
+
lines.append(f" timeout: {timeout}")
|
|
174
|
+
return lines
|
|
175
|
+
|
|
176
|
+
|
|
183
177
|
def _tool_calls_from_envelope(output: Any) -> List[_ToolCallEnvelope]:
|
|
184
178
|
"""Extract tool call envelopes from a Codex JSON turn output."""
|
|
185
179
|
if not isinstance(output, dict):
|
|
@@ -290,24 +284,37 @@ def _now_utc() -> datetime:
|
|
|
290
284
|
return datetime.now(timezone.utc)
|
|
291
285
|
|
|
292
286
|
|
|
293
|
-
@dataclass
|
|
294
287
|
class CodexStreamedResponse(StreamedResponse):
|
|
295
288
|
"""Minimal streamed response wrapper for Codex model provider."""
|
|
296
289
|
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
290
|
+
def __init__(
|
|
291
|
+
self,
|
|
292
|
+
*,
|
|
293
|
+
model_request_parameters: ModelRequestParameters,
|
|
294
|
+
model_name: str,
|
|
295
|
+
provider_name: Optional[str],
|
|
296
|
+
parts: Sequence[Any],
|
|
297
|
+
thread_id: str,
|
|
298
|
+
usage: RequestUsage,
|
|
299
|
+
) -> None:
|
|
300
|
+
"""Create a streamed response wrapper around precomputed Codex output.
|
|
306
301
|
|
|
307
302
|
Args:
|
|
308
|
-
|
|
303
|
+
model_request_parameters: The request parameters used for the call.
|
|
304
|
+
model_name: The model identifier reported to PydanticAI.
|
|
305
|
+
provider_name: Optional provider/system identifier (e.g. "openai").
|
|
306
|
+
parts: Collected response parts (text/tool-call parts).
|
|
307
|
+
thread_id: Codex thread identifier for debugging/traceability.
|
|
308
|
+
usage: Request usage totals for the response.
|
|
309
309
|
"""
|
|
310
|
-
|
|
310
|
+
# `StreamedResponse` requires model_request_parameters and owns the parts manager.
|
|
311
|
+
super().__init__(model_request_parameters=model_request_parameters)
|
|
312
|
+
self._model_name = model_name
|
|
313
|
+
self._provider_name = provider_name
|
|
314
|
+
self._parts = parts
|
|
315
|
+
self._thread_id = thread_id
|
|
316
|
+
self._usage = usage
|
|
317
|
+
self._timestamp = _now_utc()
|
|
311
318
|
|
|
312
319
|
async def _get_event_iterator(
|
|
313
320
|
self,
|
|
@@ -334,6 +341,14 @@ class CodexStreamedResponse(StreamedResponse):
|
|
|
334
341
|
tool_call_id=part.tool_call_id,
|
|
335
342
|
)
|
|
336
343
|
else:
|
|
344
|
+
logger.debug(
|
|
345
|
+
"Skipping unsupported streamed part",
|
|
346
|
+
extra={
|
|
347
|
+
"vendor_part_id": index,
|
|
348
|
+
"part_type": type(part).__name__,
|
|
349
|
+
"part_kind": getattr(part, "part_kind", None),
|
|
350
|
+
},
|
|
351
|
+
)
|
|
337
352
|
event = None
|
|
338
353
|
|
|
339
354
|
if event is not None:
|
|
@@ -344,14 +359,16 @@ class CodexStreamedResponse(StreamedResponse):
|
|
|
344
359
|
Construct a complete model response from events received so far.
|
|
345
360
|
|
|
346
361
|
Returns:
|
|
347
|
-
ModelResponse: Contains collected parts, the model name, timestamp, usage,
|
|
362
|
+
ModelResponse: Contains collected parts, the model name, timestamp, usage,
|
|
363
|
+
and `provider_details` with the Codex thread_id.
|
|
348
364
|
"""
|
|
349
365
|
return ModelResponse(
|
|
350
366
|
parts=self._parts_manager.get_parts(),
|
|
351
367
|
model_name=self.model_name,
|
|
368
|
+
provider_name=self.provider_name,
|
|
352
369
|
timestamp=self.timestamp,
|
|
353
370
|
usage=self.usage(),
|
|
354
|
-
|
|
371
|
+
provider_details={"thread_id": self._thread_id},
|
|
355
372
|
)
|
|
356
373
|
|
|
357
374
|
@property
|
|
@@ -364,6 +381,16 @@ class CodexStreamedResponse(StreamedResponse):
|
|
|
364
381
|
"""
|
|
365
382
|
return self._model_name
|
|
366
383
|
|
|
384
|
+
@property
|
|
385
|
+
def provider_name(self) -> Optional[str]:
|
|
386
|
+
"""
|
|
387
|
+
Return the provider/system name for the response, if set.
|
|
388
|
+
|
|
389
|
+
PydanticAI models can optionally report a provider separate from the model name.
|
|
390
|
+
For Codex, this is typically the `system` identifier used for routing/metadata.
|
|
391
|
+
"""
|
|
392
|
+
return self._provider_name
|
|
393
|
+
|
|
367
394
|
@property
|
|
368
395
|
def timestamp(self) -> datetime:
|
|
369
396
|
"""
|
|
@@ -376,7 +403,11 @@ class CodexStreamedResponse(StreamedResponse):
|
|
|
376
403
|
|
|
377
404
|
|
|
378
405
|
class CodexModel(Model):
|
|
379
|
-
"""Use Codex CLI as a PydanticAI model provider via structured output.
|
|
406
|
+
"""Use Codex CLI as a PydanticAI model provider via structured output.
|
|
407
|
+
|
|
408
|
+
Subclasses can override `prepare_request()` to mutate model settings and
|
|
409
|
+
request parameters before a Codex turn is executed.
|
|
410
|
+
"""
|
|
380
411
|
|
|
381
412
|
def __init__(
|
|
382
413
|
self,
|
|
@@ -439,12 +470,20 @@ class CodexModel(Model):
|
|
|
439
470
|
"""Return the provider system identifier (vendor name)."""
|
|
440
471
|
return self._system
|
|
441
472
|
|
|
473
|
+
def prepare_request(
|
|
474
|
+
self,
|
|
475
|
+
model_settings: Optional[ModelSettings],
|
|
476
|
+
model_request_parameters: ModelRequestParameters,
|
|
477
|
+
) -> tuple[Optional[ModelSettings], ModelRequestParameters]:
|
|
478
|
+
"""Hook to customize request settings/parameters before execution."""
|
|
479
|
+
return model_settings, model_request_parameters
|
|
480
|
+
|
|
442
481
|
async def _run_codex_request(
|
|
443
482
|
self,
|
|
444
483
|
messages: list[ModelMessage],
|
|
445
484
|
model_settings: Optional[ModelSettings],
|
|
446
485
|
model_request_parameters: ModelRequestParameters,
|
|
447
|
-
) -> tuple[List[Any],
|
|
486
|
+
) -> tuple[List[Any], RequestUsage, str, ModelRequestParameters]:
|
|
448
487
|
"""
|
|
449
488
|
Run a Codex thread for the given conversation and request parameters, and parse the JSON envelope into response parts.
|
|
450
489
|
|
|
@@ -463,10 +502,10 @@ class CodexModel(Model):
|
|
|
463
502
|
- `model_request_parameters`: The (possibly customized) request parameters
|
|
464
503
|
actually used for the request.
|
|
465
504
|
"""
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
model_request_parameters
|
|
505
|
+
model_settings, model_request_parameters = self.prepare_request(
|
|
506
|
+
model_settings, model_request_parameters
|
|
469
507
|
)
|
|
508
|
+
del model_settings
|
|
470
509
|
|
|
471
510
|
tool_defs = [
|
|
472
511
|
*model_request_parameters.function_tools,
|
|
@@ -520,16 +559,14 @@ class CodexModel(Model):
|
|
|
520
559
|
prompt, output_schema=output_schema, turn_options=TurnOptions()
|
|
521
560
|
)
|
|
522
561
|
|
|
523
|
-
usage =
|
|
562
|
+
usage = RequestUsage()
|
|
524
563
|
if parsed_turn.turn.usage is not None:
|
|
525
564
|
cached = parsed_turn.turn.usage.cached_input_tokens
|
|
526
|
-
details = {"cached_input_tokens": cached} if cached else
|
|
527
|
-
usage =
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
total_tokens=parsed_turn.turn.usage.input_tokens
|
|
532
|
-
+ parsed_turn.turn.usage.output_tokens,
|
|
565
|
+
details = {"cached_input_tokens": cached} if cached else {}
|
|
566
|
+
usage = RequestUsage(
|
|
567
|
+
input_tokens=parsed_turn.turn.usage.input_tokens,
|
|
568
|
+
cache_read_tokens=cached,
|
|
569
|
+
output_tokens=parsed_turn.turn.usage.output_tokens,
|
|
533
570
|
details=details,
|
|
534
571
|
)
|
|
535
572
|
|
|
@@ -568,7 +605,8 @@ class CodexModel(Model):
|
|
|
568
605
|
model_request_parameters: Request-specific parameters that influence Codex execution.
|
|
569
606
|
|
|
570
607
|
Returns:
|
|
571
|
-
ModelResponse containing the generated parts, usage information, the model name,
|
|
608
|
+
ModelResponse containing the generated parts, usage information, the model name,
|
|
609
|
+
and provider_details with the Codex `thread_id`.
|
|
572
610
|
"""
|
|
573
611
|
parts, usage, thread_id, _ = await self._run_codex_request(
|
|
574
612
|
messages,
|
|
@@ -579,7 +617,8 @@ class CodexModel(Model):
|
|
|
579
617
|
parts=parts,
|
|
580
618
|
usage=usage,
|
|
581
619
|
model_name=self.model_name,
|
|
582
|
-
|
|
620
|
+
provider_name=self.system,
|
|
621
|
+
provider_details={"thread_id": thread_id},
|
|
583
622
|
)
|
|
584
623
|
|
|
585
624
|
@asynccontextmanager
|
|
@@ -600,15 +639,17 @@ class CodexModel(Model):
|
|
|
600
639
|
Returns:
|
|
601
640
|
An async iterator that yields a single StreamedResponse (CodexStreamedResponse) containing the model name, response parts, usage information, and the Codex thread identifier.
|
|
602
641
|
"""
|
|
603
|
-
parts, usage, thread_id,
|
|
642
|
+
parts, usage, thread_id, used_params = await self._run_codex_request(
|
|
604
643
|
messages,
|
|
605
644
|
model_settings,
|
|
606
645
|
model_request_parameters,
|
|
607
646
|
)
|
|
608
647
|
streamed = CodexStreamedResponse(
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
648
|
+
model_request_parameters=used_params,
|
|
649
|
+
model_name=self.model_name,
|
|
650
|
+
provider_name=self.system,
|
|
651
|
+
parts=parts,
|
|
652
|
+
thread_id=thread_id,
|
|
653
|
+
usage=usage,
|
|
613
654
|
)
|
|
614
655
|
yield streamed
|
|
@@ -0,0 +1,524 @@
|
|
|
1
|
+
"""Shared helpers for model-driven tool-calling envelopes.
|
|
2
|
+
|
|
3
|
+
This module standardizes how Codex structured output is interpreted for
|
|
4
|
+
host-managed tool loops.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
from base64 import b64encode
|
|
12
|
+
from dataclasses import asdict, dataclass, is_dataclass
|
|
13
|
+
from typing import Any, Dict, Literal, Mapping, Optional, Sequence, Tuple, Union
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass(frozen=True)
|
|
19
|
+
class ToolCallEnvelope:
|
|
20
|
+
"""One planned tool call emitted by the model envelope."""
|
|
21
|
+
|
|
22
|
+
tool_call_id: str
|
|
23
|
+
tool_name: str
|
|
24
|
+
arguments_json: str
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass(frozen=True)
|
|
28
|
+
class ToolPlan:
|
|
29
|
+
"""Parsed planner output.
|
|
30
|
+
|
|
31
|
+
kind:
|
|
32
|
+
- ``tool_calls`` when one or more tool calls are requested.
|
|
33
|
+
- ``final`` when no tool call is requested and final text is provided.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
kind: Literal["tool_calls", "final"]
|
|
37
|
+
calls: Tuple[ToolCallEnvelope, ...] = ()
|
|
38
|
+
content: str = ""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ToolPlanValidationError(ValueError):
|
|
42
|
+
"""Raised for malformed envelope output or invalid tool-call plans."""
|
|
43
|
+
|
|
44
|
+
def __init__(self, code: str, message: str):
|
|
45
|
+
"""Initialize a validation error with a stable error code."""
|
|
46
|
+
super().__init__(message)
|
|
47
|
+
self.code = code
|
|
48
|
+
self.message = message
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
ToolChoice = Optional[Union[str, Mapping[str, Any]]]
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def jsonable(value: Any) -> Any:
|
|
55
|
+
"""Convert common rich objects into JSON-serializable values."""
|
|
56
|
+
if is_dataclass(value) and not isinstance(value, type):
|
|
57
|
+
return asdict(value)
|
|
58
|
+
|
|
59
|
+
if hasattr(value, "model_dump") and callable(value.model_dump):
|
|
60
|
+
return value.model_dump(mode="json")
|
|
61
|
+
|
|
62
|
+
if isinstance(value, bytes):
|
|
63
|
+
return {"type": "bytes", "base64": b64encode(value).decode("ascii")}
|
|
64
|
+
|
|
65
|
+
if isinstance(value, dict):
|
|
66
|
+
return {str(key): jsonable(item) for key, item in value.items()}
|
|
67
|
+
|
|
68
|
+
if isinstance(value, (list, tuple)):
|
|
69
|
+
return [jsonable(item) for item in value]
|
|
70
|
+
|
|
71
|
+
return value
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def json_dumps(value: Any) -> str:
|
|
75
|
+
"""Canonical JSON dump used in prompts and normalized tool arguments."""
|
|
76
|
+
try:
|
|
77
|
+
return json.dumps(
|
|
78
|
+
jsonable(value),
|
|
79
|
+
ensure_ascii=False,
|
|
80
|
+
separators=(",", ":"),
|
|
81
|
+
sort_keys=True,
|
|
82
|
+
)
|
|
83
|
+
except TypeError as exc:
|
|
84
|
+
logger.error(
|
|
85
|
+
"Failed to serialize value to JSON",
|
|
86
|
+
extra={"value_type": type(value).__name__, "error": str(exc)},
|
|
87
|
+
)
|
|
88
|
+
raise
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def build_envelope_schema(tool_names: Sequence[str]) -> Dict[str, Any]:
|
|
92
|
+
"""Build constrained schema for planner output.
|
|
93
|
+
|
|
94
|
+
Output shape:
|
|
95
|
+
{"tool_calls": [...], "final": "..."}
|
|
96
|
+
"""
|
|
97
|
+
name_schema: Dict[str, Any] = {"type": "string"}
|
|
98
|
+
if tool_names:
|
|
99
|
+
name_schema = {"type": "string", "enum": list(tool_names)}
|
|
100
|
+
|
|
101
|
+
return {
|
|
102
|
+
"type": "object",
|
|
103
|
+
"properties": {
|
|
104
|
+
"tool_calls": {
|
|
105
|
+
"type": "array",
|
|
106
|
+
"items": {
|
|
107
|
+
"type": "object",
|
|
108
|
+
"properties": {
|
|
109
|
+
"id": {"type": "string"},
|
|
110
|
+
"name": name_schema,
|
|
111
|
+
"arguments": {"type": "string"},
|
|
112
|
+
},
|
|
113
|
+
"required": ["id", "name", "arguments"],
|
|
114
|
+
"additionalProperties": False,
|
|
115
|
+
},
|
|
116
|
+
},
|
|
117
|
+
"final": {"type": "string"},
|
|
118
|
+
},
|
|
119
|
+
"required": ["tool_calls", "final"],
|
|
120
|
+
"additionalProperties": False,
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def parse_tool_plan(output: Any) -> ToolPlan:
|
|
125
|
+
"""Parse raw envelope output into a normalized ``ToolPlan``."""
|
|
126
|
+
if not isinstance(output, dict):
|
|
127
|
+
raise ToolPlanValidationError(
|
|
128
|
+
"invalid_envelope", "Planner output must be a JSON object."
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
if "tool_calls" not in output or "final" not in output:
|
|
132
|
+
raise ToolPlanValidationError(
|
|
133
|
+
"invalid_envelope",
|
|
134
|
+
"Planner output must contain both `tool_calls` and `final`.",
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
raw_calls = output.get("tool_calls")
|
|
138
|
+
final = output.get("final")
|
|
139
|
+
if not isinstance(raw_calls, list):
|
|
140
|
+
raise ToolPlanValidationError(
|
|
141
|
+
"invalid_envelope", "`tool_calls` must be an array."
|
|
142
|
+
)
|
|
143
|
+
if not isinstance(final, str):
|
|
144
|
+
raise ToolPlanValidationError("invalid_envelope", "`final` must be a string.")
|
|
145
|
+
|
|
146
|
+
calls = []
|
|
147
|
+
for index, call in enumerate(raw_calls):
|
|
148
|
+
if not isinstance(call, dict):
|
|
149
|
+
raise ToolPlanValidationError(
|
|
150
|
+
"invalid_envelope", f"tool_calls[{index}] must be an object."
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
tool_call_id = call.get("id")
|
|
154
|
+
tool_name = call.get("name")
|
|
155
|
+
arguments = call.get("arguments")
|
|
156
|
+
if not isinstance(tool_call_id, str):
|
|
157
|
+
raise ToolPlanValidationError(
|
|
158
|
+
"invalid_envelope", f"tool_calls[{index}].id must be a string."
|
|
159
|
+
)
|
|
160
|
+
if not isinstance(tool_name, str):
|
|
161
|
+
raise ToolPlanValidationError(
|
|
162
|
+
"invalid_envelope", f"tool_calls[{index}].name must be a string."
|
|
163
|
+
)
|
|
164
|
+
if not isinstance(arguments, str):
|
|
165
|
+
raise ToolPlanValidationError(
|
|
166
|
+
"invalid_envelope",
|
|
167
|
+
f"tool_calls[{index}].arguments must be a JSON string.",
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
calls.append(
|
|
171
|
+
ToolCallEnvelope(
|
|
172
|
+
tool_call_id=tool_call_id,
|
|
173
|
+
tool_name=tool_name,
|
|
174
|
+
arguments_json=arguments,
|
|
175
|
+
)
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
if calls and final.strip():
|
|
179
|
+
raise ToolPlanValidationError(
|
|
180
|
+
"invalid_envelope",
|
|
181
|
+
"Planner output cannot contain both non-empty `final` and `tool_calls`.",
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
if calls:
|
|
185
|
+
return ToolPlan(kind="tool_calls", calls=tuple(calls), content="")
|
|
186
|
+
|
|
187
|
+
return ToolPlan(kind="final", calls=(), content=final)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def validate_tool_plan(
|
|
191
|
+
plan: ToolPlan,
|
|
192
|
+
*,
|
|
193
|
+
tool_schemas: Mapping[str, Any],
|
|
194
|
+
tool_choice: ToolChoice = None,
|
|
195
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
196
|
+
max_tool_calls: Optional[int] = None,
|
|
197
|
+
strict_schema_validation: bool = True,
|
|
198
|
+
fallback_id_prefix: str = "call",
|
|
199
|
+
) -> ToolPlan:
|
|
200
|
+
"""Validate and normalize a parsed tool plan.
|
|
201
|
+
|
|
202
|
+
Normalizations:
|
|
203
|
+
- Rewrites argument JSON into canonical form.
|
|
204
|
+
- Replaces missing/blank tool-call IDs with deterministic fallback IDs.
|
|
205
|
+
"""
|
|
206
|
+
choice_mode, choice_name = _normalize_tool_choice(tool_choice)
|
|
207
|
+
|
|
208
|
+
if plan.kind == "final":
|
|
209
|
+
if choice_mode in {"required", "function"}:
|
|
210
|
+
raise ToolPlanValidationError(
|
|
211
|
+
"tool_choice_mismatch",
|
|
212
|
+
"Model returned final text while tool_choice requires a tool call.",
|
|
213
|
+
)
|
|
214
|
+
return plan
|
|
215
|
+
|
|
216
|
+
calls = list(plan.calls)
|
|
217
|
+
if not calls:
|
|
218
|
+
raise ToolPlanValidationError(
|
|
219
|
+
"invalid_envelope", "`tool_calls` plan must contain at least one call."
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
if choice_mode == "none":
|
|
223
|
+
raise ToolPlanValidationError(
|
|
224
|
+
"tool_choice_mismatch",
|
|
225
|
+
"Model emitted tool calls while tool_choice is `none`.",
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
if max_tool_calls is not None and len(calls) > max_tool_calls:
|
|
229
|
+
raise ToolPlanValidationError(
|
|
230
|
+
"too_many_tool_calls",
|
|
231
|
+
f"Model emitted {len(calls)} tool calls (max {max_tool_calls}).",
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
if parallel_tool_calls is False and len(calls) > 1:
|
|
235
|
+
raise ToolPlanValidationError(
|
|
236
|
+
"parallel_tool_calls_disabled",
|
|
237
|
+
"Model emitted multiple tool calls while parallel_tool_calls is false.",
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
normalized_calls = []
|
|
241
|
+
for index, call in enumerate(calls):
|
|
242
|
+
call_id = (
|
|
243
|
+
call.tool_call_id.strip() if isinstance(call.tool_call_id, str) else ""
|
|
244
|
+
)
|
|
245
|
+
if not call_id:
|
|
246
|
+
call_id = f"{fallback_id_prefix}_{index}"
|
|
247
|
+
|
|
248
|
+
tool_name = call.tool_name.strip()
|
|
249
|
+
if not tool_name:
|
|
250
|
+
raise ToolPlanValidationError(
|
|
251
|
+
"unknown_tool", f"tool_calls[{index}] has empty tool name."
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
if choice_mode == "function" and choice_name and tool_name != choice_name:
|
|
255
|
+
raise ToolPlanValidationError(
|
|
256
|
+
"tool_choice_mismatch",
|
|
257
|
+
(
|
|
258
|
+
"Model emitted tool "
|
|
259
|
+
f"`{tool_name}` while tool_choice requires `{choice_name}`."
|
|
260
|
+
),
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
schema = tool_schemas.get(tool_name)
|
|
264
|
+
if schema is None:
|
|
265
|
+
raise ToolPlanValidationError(
|
|
266
|
+
"unknown_tool", f"Model requested undeclared tool `{tool_name}`."
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
try:
|
|
270
|
+
parsed_args = json.loads(call.arguments_json)
|
|
271
|
+
except json.JSONDecodeError as exc:
|
|
272
|
+
raise ToolPlanValidationError(
|
|
273
|
+
"invalid_tool_arguments",
|
|
274
|
+
f"Tool `{tool_name}` arguments are not valid JSON: {exc.msg}",
|
|
275
|
+
) from exc
|
|
276
|
+
|
|
277
|
+
if not isinstance(parsed_args, dict):
|
|
278
|
+
raise ToolPlanValidationError(
|
|
279
|
+
"invalid_tool_arguments",
|
|
280
|
+
f"Tool `{tool_name}` arguments must decode to a JSON object.",
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
if strict_schema_validation:
|
|
284
|
+
_validate_json_schema(parsed_args, schema, path=f"tool:{tool_name}")
|
|
285
|
+
|
|
286
|
+
normalized_calls.append(
|
|
287
|
+
ToolCallEnvelope(
|
|
288
|
+
tool_call_id=call_id,
|
|
289
|
+
tool_name=tool_name,
|
|
290
|
+
arguments_json=json_dumps(parsed_args),
|
|
291
|
+
)
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
return ToolPlan(kind="tool_calls", calls=tuple(normalized_calls), content="")
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _normalize_tool_choice(tool_choice: ToolChoice) -> Tuple[str, Optional[str]]:
|
|
298
|
+
"""Normalize tool choice into a `(mode, function_name)` tuple."""
|
|
299
|
+
if tool_choice is None:
|
|
300
|
+
return "auto", None
|
|
301
|
+
|
|
302
|
+
if isinstance(tool_choice, str):
|
|
303
|
+
mode = tool_choice.strip().lower()
|
|
304
|
+
if mode in {"auto", "none", "required"}:
|
|
305
|
+
return mode, None
|
|
306
|
+
raise ToolPlanValidationError(
|
|
307
|
+
"invalid_tool_choice",
|
|
308
|
+
f"Unsupported tool_choice string `{tool_choice}`.",
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
if not isinstance(tool_choice, Mapping):
|
|
312
|
+
raise ToolPlanValidationError(
|
|
313
|
+
"invalid_tool_choice", "tool_choice must be a string or object."
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
choice_type = tool_choice.get("type")
|
|
317
|
+
if choice_type != "function":
|
|
318
|
+
raise ToolPlanValidationError(
|
|
319
|
+
"invalid_tool_choice",
|
|
320
|
+
"tool_choice object must use type=`function`.",
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
function = tool_choice.get("function")
|
|
324
|
+
if not isinstance(function, Mapping):
|
|
325
|
+
raise ToolPlanValidationError(
|
|
326
|
+
"invalid_tool_choice", "tool_choice.function must be an object."
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
name = function.get("name")
|
|
330
|
+
if not isinstance(name, str) or not name.strip():
|
|
331
|
+
raise ToolPlanValidationError(
|
|
332
|
+
"invalid_tool_choice",
|
|
333
|
+
"tool_choice.function.name must be a non-empty string.",
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
return "function", name.strip()
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def _validate_json_schema(value: Any, schema: Any, *, path: str) -> None:
|
|
340
|
+
"""Validate a value against a subset of JSON Schema used for tool arguments."""
|
|
341
|
+
if not isinstance(schema, Mapping):
|
|
342
|
+
return
|
|
343
|
+
|
|
344
|
+
# Generic enum support.
|
|
345
|
+
enum_values = schema.get("enum")
|
|
346
|
+
if isinstance(enum_values, list) and value not in enum_values:
|
|
347
|
+
raise ToolPlanValidationError(
|
|
348
|
+
"invalid_tool_arguments",
|
|
349
|
+
f"{path}: value {value!r} is not in enum {enum_values!r}.",
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
if "const" in schema and value != schema["const"]:
|
|
353
|
+
raise ToolPlanValidationError(
|
|
354
|
+
"invalid_tool_arguments",
|
|
355
|
+
f"{path}: value {value!r} does not match const {schema['const']!r}.",
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
schema_type = schema.get("type")
|
|
359
|
+
resolved_type: Optional[str] = None
|
|
360
|
+
if isinstance(schema_type, list):
|
|
361
|
+
# Support basic union form, e.g. ["string", "null"].
|
|
362
|
+
matched_types = [t for t in schema_type if _matches_type(value, t)]
|
|
363
|
+
if matched_types:
|
|
364
|
+
if "object" in matched_types and isinstance(value, dict):
|
|
365
|
+
resolved_type = "object"
|
|
366
|
+
elif "array" in matched_types and isinstance(value, list):
|
|
367
|
+
resolved_type = "array"
|
|
368
|
+
else:
|
|
369
|
+
first = matched_types[0]
|
|
370
|
+
resolved_type = first if isinstance(first, str) else None
|
|
371
|
+
else:
|
|
372
|
+
raise ToolPlanValidationError(
|
|
373
|
+
"invalid_tool_arguments",
|
|
374
|
+
f"{path}: expected one of types {schema_type!r}.",
|
|
375
|
+
)
|
|
376
|
+
elif isinstance(schema_type, str):
|
|
377
|
+
_require_type(value, schema_type, path=path)
|
|
378
|
+
resolved_type = schema_type
|
|
379
|
+
|
|
380
|
+
# Composite schemas.
|
|
381
|
+
any_of = schema.get("anyOf")
|
|
382
|
+
if isinstance(any_of, list) and any_of:
|
|
383
|
+
if not _validate_any_of(value, any_of, path=path):
|
|
384
|
+
raise ToolPlanValidationError(
|
|
385
|
+
"invalid_tool_arguments", f"{path}: no anyOf branch matched."
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
one_of = schema.get("oneOf")
|
|
389
|
+
if isinstance(one_of, list) and one_of:
|
|
390
|
+
matches = 0
|
|
391
|
+
for branch in one_of:
|
|
392
|
+
try:
|
|
393
|
+
_validate_json_schema(value, branch, path=path)
|
|
394
|
+
except ToolPlanValidationError:
|
|
395
|
+
continue
|
|
396
|
+
matches += 1
|
|
397
|
+
if matches != 1:
|
|
398
|
+
raise ToolPlanValidationError(
|
|
399
|
+
"invalid_tool_arguments",
|
|
400
|
+
f"{path}: expected exactly one oneOf branch match, got {matches}.",
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
all_of = schema.get("allOf")
|
|
404
|
+
if isinstance(all_of, list):
|
|
405
|
+
for branch in all_of:
|
|
406
|
+
_validate_json_schema(value, branch, path=path)
|
|
407
|
+
|
|
408
|
+
if resolved_type == "object":
|
|
409
|
+
_validate_object_schema(value, schema, path=path)
|
|
410
|
+
elif resolved_type == "array":
|
|
411
|
+
_validate_array_schema(value, schema, path=path)
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
def _validate_any_of(value: Any, branches: Sequence[Any], *, path: str) -> bool:
|
|
415
|
+
"""Return `True` when at least one branch validates without errors."""
|
|
416
|
+
for branch in branches:
|
|
417
|
+
try:
|
|
418
|
+
_validate_json_schema(value, branch, path=path)
|
|
419
|
+
except ToolPlanValidationError:
|
|
420
|
+
continue
|
|
421
|
+
return True
|
|
422
|
+
return False
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
def _validate_object_schema(
|
|
426
|
+
value: Any, schema: Mapping[str, Any], *, path: str
|
|
427
|
+
) -> None:
|
|
428
|
+
"""Validate object constraints (`required`, properties, additionalProperties)."""
|
|
429
|
+
if not isinstance(value, dict):
|
|
430
|
+
raise ToolPlanValidationError(
|
|
431
|
+
"invalid_tool_arguments", f"{path}: expected object."
|
|
432
|
+
)
|
|
433
|
+
|
|
434
|
+
required = schema.get("required")
|
|
435
|
+
if isinstance(required, list):
|
|
436
|
+
for field in required:
|
|
437
|
+
if isinstance(field, str) and field not in value:
|
|
438
|
+
raise ToolPlanValidationError(
|
|
439
|
+
"invalid_tool_arguments",
|
|
440
|
+
f"{path}: missing required field `{field}`.",
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
properties = schema.get("properties")
|
|
444
|
+
property_schemas: Mapping[str, Any] = (
|
|
445
|
+
properties if isinstance(properties, Mapping) else {}
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
additional_properties = schema.get("additionalProperties", True)
|
|
449
|
+
if additional_properties is False:
|
|
450
|
+
unknown = [key for key in value.keys() if key not in property_schemas]
|
|
451
|
+
if unknown:
|
|
452
|
+
raise ToolPlanValidationError(
|
|
453
|
+
"invalid_tool_arguments",
|
|
454
|
+
f"{path}: unexpected fields {unknown!r}.",
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
for key, item in value.items():
|
|
458
|
+
prop_schema = property_schemas.get(key)
|
|
459
|
+
if prop_schema is not None:
|
|
460
|
+
_validate_json_schema(item, prop_schema, path=f"{path}.{key}")
|
|
461
|
+
elif isinstance(additional_properties, Mapping):
|
|
462
|
+
_validate_json_schema(
|
|
463
|
+
item,
|
|
464
|
+
additional_properties,
|
|
465
|
+
path=f"{path}.{key}",
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
def _validate_array_schema(value: Any, schema: Mapping[str, Any], *, path: str) -> None:
|
|
470
|
+
"""Validate array constraints (`items`, `minItems`, `maxItems`)."""
|
|
471
|
+
if not isinstance(value, list):
|
|
472
|
+
raise ToolPlanValidationError(
|
|
473
|
+
"invalid_tool_arguments", f"{path}: expected array."
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
items_schema = schema.get("items")
|
|
477
|
+
if items_schema is not None:
|
|
478
|
+
for index, item in enumerate(value):
|
|
479
|
+
_validate_json_schema(item, items_schema, path=f"{path}[{index}]")
|
|
480
|
+
|
|
481
|
+
min_items = schema.get("minItems")
|
|
482
|
+
if isinstance(min_items, int) and len(value) < min_items:
|
|
483
|
+
raise ToolPlanValidationError(
|
|
484
|
+
"invalid_tool_arguments",
|
|
485
|
+
f"{path}: expected at least {min_items} items.",
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
max_items = schema.get("maxItems")
|
|
489
|
+
if isinstance(max_items, int) and len(value) > max_items:
|
|
490
|
+
raise ToolPlanValidationError(
|
|
491
|
+
"invalid_tool_arguments",
|
|
492
|
+
f"{path}: expected at most {max_items} items.",
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
def _matches_type(value: Any, schema_type: Any) -> bool:
|
|
497
|
+
"""Return whether a value satisfies a JSON-schema primitive type label."""
|
|
498
|
+
if not isinstance(schema_type, str):
|
|
499
|
+
return False
|
|
500
|
+
if schema_type == "null":
|
|
501
|
+
return value is None
|
|
502
|
+
if schema_type == "boolean":
|
|
503
|
+
return isinstance(value, bool)
|
|
504
|
+
if schema_type == "integer":
|
|
505
|
+
return isinstance(value, int) and not isinstance(value, bool)
|
|
506
|
+
if schema_type == "number":
|
|
507
|
+
return (isinstance(value, int) and not isinstance(value, bool)) or isinstance(
|
|
508
|
+
value, float
|
|
509
|
+
)
|
|
510
|
+
if schema_type == "string":
|
|
511
|
+
return isinstance(value, str)
|
|
512
|
+
if schema_type == "array":
|
|
513
|
+
return isinstance(value, list)
|
|
514
|
+
if schema_type == "object":
|
|
515
|
+
return isinstance(value, dict)
|
|
516
|
+
return True
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def _require_type(value: Any, schema_type: str, *, path: str) -> None:
|
|
520
|
+
"""Raise when a value does not satisfy a required JSON-schema type."""
|
|
521
|
+
if not _matches_type(value, schema_type):
|
|
522
|
+
raise ToolPlanValidationError(
|
|
523
|
+
"invalid_tool_arguments", f"{path}: expected type `{schema_type}`."
|
|
524
|
+
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{codex_sdk_python-0.98.0 → codex_sdk_python-0.101.0}/src/codex_sdk/integrations/pydantic_ai.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|