agentex-sdk 0.4.17__py3-none-any.whl → 0.4.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentex/_version.py +1 -1
- agentex/lib/core/services/adk/providers/openai.py +35 -55
- agentex/lib/sdk/fastacp/fastacp.py +0 -1
- {agentex_sdk-0.4.17.dist-info → agentex_sdk-0.4.18.dist-info}/METADATA +1 -1
- {agentex_sdk-0.4.17.dist-info → agentex_sdk-0.4.18.dist-info}/RECORD +8 -8
- {agentex_sdk-0.4.17.dist-info → agentex_sdk-0.4.18.dist-info}/WHEEL +0 -0
- {agentex_sdk-0.4.17.dist-info → agentex_sdk-0.4.18.dist-info}/entry_points.txt +0 -0
- {agentex_sdk-0.4.17.dist-info → agentex_sdk-0.4.18.dist-info}/licenses/LICENSE +0 -0
agentex/_version.py
CHANGED
@@ -358,6 +358,7 @@ class OpenAIService:
|
|
358
358
|
},
|
359
359
|
) as span:
|
360
360
|
heartbeat_if_in_workflow("run agent auto send")
|
361
|
+
|
361
362
|
async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
|
362
363
|
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
|
363
364
|
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
|
@@ -395,9 +396,12 @@ class OpenAIService:
|
|
395
396
|
result = await Runner.run(
|
396
397
|
starting_agent=agent, input=input_list, previous_response_id=previous_response_id
|
397
398
|
)
|
398
|
-
|
399
|
-
|
400
|
-
|
399
|
+
else:
|
400
|
+
result = await Runner.run(starting_agent=agent, input=input_list)
|
401
|
+
|
402
|
+
if span:
|
403
|
+
span.output = {
|
404
|
+
"new_items": [
|
401
405
|
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
|
402
406
|
for item in result.new_items
|
403
407
|
],
|
@@ -427,6 +431,7 @@ class OpenAIService:
|
|
427
431
|
|
428
432
|
elif item.type == "tool_call_item":
|
429
433
|
tool_call_item = item.raw_item
|
434
|
+
|
430
435
|
# Extract tool call information using the helper method
|
431
436
|
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
|
432
437
|
tool_call_map[call_id] = tool_call_item
|
@@ -552,15 +557,9 @@ class OpenAIService:
|
|
552
557
|
) as span:
|
553
558
|
heartbeat_if_in_workflow("run agent streamed")
|
554
559
|
|
555
|
-
async with mcp_server_context(
|
556
|
-
mcp_server_params, mcp_timeout_seconds
|
557
|
-
) as servers:
|
560
|
+
async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
|
558
561
|
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
|
559
|
-
handoffs = (
|
560
|
-
[Agent(**handoff.model_dump()) for handoff in handoffs]
|
561
|
-
if handoffs
|
562
|
-
else []
|
563
|
-
)
|
562
|
+
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
|
564
563
|
agent_kwargs = {
|
565
564
|
"name": agent_name,
|
566
565
|
"instructions": agent_instructions,
|
@@ -573,9 +572,7 @@ class OpenAIService:
|
|
573
572
|
"tool_use_behavior": tool_use_behavior,
|
574
573
|
}
|
575
574
|
if model_settings is not None:
|
576
|
-
agent_kwargs["model_settings"] = (
|
577
|
-
model_settings.to_oai_model_settings()
|
578
|
-
)
|
575
|
+
agent_kwargs["model_settings"] = model_settings.to_oai_model_settings()
|
579
576
|
if input_guardrails is not None:
|
580
577
|
agent_kwargs["input_guardrails"] = input_guardrails
|
581
578
|
if output_guardrails is not None:
|
@@ -603,9 +600,7 @@ class OpenAIService:
|
|
603
600
|
if span:
|
604
601
|
span.output = {
|
605
602
|
"new_items": [
|
606
|
-
item.raw_item.model_dump()
|
607
|
-
if isinstance(item.raw_item, BaseModel)
|
608
|
-
else item.raw_item
|
603
|
+
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
|
609
604
|
for item in result.new_items
|
610
605
|
],
|
611
606
|
"final_output": result.final_output,
|
@@ -738,6 +733,7 @@ class OpenAIService:
|
|
738
733
|
if event.type == "run_item_stream_event":
|
739
734
|
if event.item.type == "tool_call_item":
|
740
735
|
tool_call_item = event.item.raw_item
|
736
|
+
|
741
737
|
# Extract tool call information using the helper method
|
742
738
|
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
|
743
739
|
tool_call_map[call_id] = tool_call_item
|
@@ -750,12 +746,10 @@ class OpenAIService:
|
|
750
746
|
)
|
751
747
|
|
752
748
|
# Create tool request using streaming context (immediate completion)
|
753
|
-
async with (
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
) as streaming_context
|
758
|
-
):
|
749
|
+
async with self.streaming_service.streaming_task_message_context(
|
750
|
+
task_id=task_id,
|
751
|
+
initial_content=tool_request_content,
|
752
|
+
) as streaming_context:
|
759
753
|
# The message has already been persisted, but we still need to send an upda
|
760
754
|
await streaming_context.stream_update(
|
761
755
|
update=StreamTaskMessageFull(
|
@@ -781,12 +775,9 @@ class OpenAIService:
|
|
781
775
|
)
|
782
776
|
|
783
777
|
# Create tool response using streaming context (immediate completion)
|
784
|
-
async with (
|
785
|
-
|
786
|
-
|
787
|
-
initial_content=tool_response_content
|
788
|
-
) as streaming_context
|
789
|
-
):
|
778
|
+
async with self.streaming_service.streaming_task_message_context(
|
779
|
+
task_id=task_id, initial_content=tool_response_content
|
780
|
+
) as streaming_context:
|
790
781
|
# The message has already been persisted, but we still need to send an update
|
791
782
|
await streaming_context.stream_update(
|
792
783
|
update=StreamTaskMessageFull(
|
@@ -812,14 +803,10 @@ class OpenAIService:
|
|
812
803
|
),
|
813
804
|
)
|
814
805
|
# Open the streaming context
|
815
|
-
item_id_to_streaming_context[
|
816
|
-
item_id
|
817
|
-
] = await streaming_context.open()
|
806
|
+
item_id_to_streaming_context[item_id] = await streaming_context.open()
|
818
807
|
unclosed_item_ids.add(item_id)
|
819
808
|
else:
|
820
|
-
streaming_context = item_id_to_streaming_context[
|
821
|
-
item_id
|
822
|
-
]
|
809
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
823
810
|
|
824
811
|
# Stream the delta through the streaming service
|
825
812
|
await streaming_context.stream_update(
|
@@ -849,14 +836,10 @@ class OpenAIService:
|
|
849
836
|
),
|
850
837
|
)
|
851
838
|
# Open the streaming context
|
852
|
-
item_id_to_streaming_context[
|
853
|
-
item_id
|
854
|
-
] = await streaming_context.open()
|
839
|
+
item_id_to_streaming_context[item_id] = await streaming_context.open()
|
855
840
|
unclosed_item_ids.add(item_id)
|
856
841
|
else:
|
857
|
-
streaming_context = item_id_to_streaming_context[
|
858
|
-
item_id
|
859
|
-
]
|
842
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
860
843
|
|
861
844
|
# Stream the summary delta through the streaming service
|
862
845
|
await streaming_context.stream_update(
|
@@ -890,14 +873,10 @@ class OpenAIService:
|
|
890
873
|
),
|
891
874
|
)
|
892
875
|
# Open the streaming context
|
893
|
-
item_id_to_streaming_context[
|
894
|
-
item_id
|
895
|
-
] = await streaming_context.open()
|
876
|
+
item_id_to_streaming_context[item_id] = await streaming_context.open()
|
896
877
|
unclosed_item_ids.add(item_id)
|
897
878
|
else:
|
898
|
-
streaming_context = item_id_to_streaming_context[
|
899
|
-
item_id
|
900
|
-
]
|
879
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
901
880
|
|
902
881
|
# Stream the content delta through the streaming service
|
903
882
|
await streaming_context.stream_update(
|
@@ -925,6 +904,7 @@ class OpenAIService:
|
|
925
904
|
# to close the streaming context, but they do!!!
|
926
905
|
# They output both a ResponseReasoningSummaryTextDoneEvent and a ResponseReasoningSummaryPartDoneEvent
|
927
906
|
# I have no idea why they do this.
|
907
|
+
|
928
908
|
elif isinstance(event.data, ResponseReasoningTextDoneEvent):
|
929
909
|
# Handle reasoning content text completion
|
930
910
|
item_id = event.data.item_id
|
@@ -940,9 +920,7 @@ class OpenAIService:
|
|
940
920
|
|
941
921
|
# Finish the streaming context (sends DONE event and updates message)
|
942
922
|
if item_id in item_id_to_streaming_context:
|
943
|
-
streaming_context = item_id_to_streaming_context[
|
944
|
-
item_id
|
945
|
-
]
|
923
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
946
924
|
await streaming_context.close()
|
947
925
|
if item_id in unclosed_item_ids:
|
948
926
|
unclosed_item_ids.remove(item_id)
|
@@ -952,17 +930,17 @@ class OpenAIService:
|
|
952
930
|
# Create a copy to avoid modifying set during iteration
|
953
931
|
remaining_items = list(unclosed_item_ids)
|
954
932
|
for item_id in remaining_items:
|
955
|
-
if (
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
]
|
933
|
+
if (
|
934
|
+
item_id in unclosed_item_ids and item_id in item_id_to_streaming_context
|
935
|
+
): # Check if still unclosed
|
936
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
960
937
|
await streaming_context.close()
|
961
938
|
unclosed_item_ids.discard(item_id)
|
962
939
|
|
963
940
|
except InputGuardrailTripwireTriggered as e:
|
964
941
|
# Handle guardrail trigger by sending a rejection message
|
965
942
|
rejection_message = "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question."
|
943
|
+
|
966
944
|
# Try to extract rejection message from the guardrail result
|
967
945
|
if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"):
|
968
946
|
output_info = getattr(e.guardrail_result.output, "output_info", {})
|
@@ -993,6 +971,7 @@ class OpenAIService:
|
|
993
971
|
type="full",
|
994
972
|
),
|
995
973
|
)
|
974
|
+
|
996
975
|
# Re-raise to let the activity handle it
|
997
976
|
raise
|
998
977
|
|
@@ -1030,6 +1009,7 @@ class OpenAIService:
|
|
1030
1009
|
type="full",
|
1031
1010
|
),
|
1032
1011
|
)
|
1012
|
+
|
1033
1013
|
# Re-raise to let the activity handle it
|
1034
1014
|
raise
|
1035
1015
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: agentex-sdk
|
3
|
-
Version: 0.4.
|
3
|
+
Version: 0.4.18
|
4
4
|
Summary: The official Python library for the agentex API
|
5
5
|
Project-URL: Homepage, https://github.com/scaleapi/agentex-python
|
6
6
|
Project-URL: Repository, https://github.com/scaleapi/agentex-python
|
@@ -11,7 +11,7 @@ agentex/_resource.py,sha256=S1t7wmR5WUvoDIhZjo_x-E7uoTJBynJ3d8tPJMQYdjw,1106
|
|
11
11
|
agentex/_response.py,sha256=Tb9zazsnemO2rTxWtBjAD5WBqlhli5ZaXGbiKgdu5DE,28794
|
12
12
|
agentex/_streaming.py,sha256=FNGJExRCF-vTRUZHFKUfoAWFhDGOB3XbioVCF37Jr7E,10104
|
13
13
|
agentex/_types.py,sha256=lO491FSd7vM_uBp7-TvItbauEAH8SsEPYcyNO_5lKGM,7297
|
14
|
-
agentex/_version.py,sha256=
|
14
|
+
agentex/_version.py,sha256=8azpxMAVbxSJfOSmVjFkPPS49oSMFsAqfcYO7jXRKZg,159
|
15
15
|
agentex/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
16
|
agentex/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
|
17
17
|
agentex/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
|
@@ -134,7 +134,7 @@ agentex/lib/core/services/adk/acp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeR
|
|
134
134
|
agentex/lib/core/services/adk/acp/acp.py,sha256=ONbGc8HWNCxv5IJaItY-fvDZIwlh3d4jSmH9GGQo7q8,10971
|
135
135
|
agentex/lib/core/services/adk/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
136
136
|
agentex/lib/core/services/adk/providers/litellm.py,sha256=EKzus_xohNW-85V5hwvd1WqUd3ebv2wc9vDIWO2t1Mw,10044
|
137
|
-
agentex/lib/core/services/adk/providers/openai.py,sha256=
|
137
|
+
agentex/lib/core/services/adk/providers/openai.py,sha256=pXbqSBPxvnOV_h3c-iaUXFgkmNRyu97C1xGIFMOw8zY,51832
|
138
138
|
agentex/lib/core/services/adk/providers/sgp.py,sha256=9gm-sPNQ_OSTaBzL0onerKhokPk_ZHndaKNO-z16wyQ,3676
|
139
139
|
agentex/lib/core/services/adk/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
140
140
|
agentex/lib/core/services/adk/utils/templating.py,sha256=eaXSFq31Y9p5pRD6J6SL4QdTFtxy81dilbF2XXc2JYQ,1889
|
@@ -183,7 +183,7 @@ agentex/lib/sdk/config/local_development_config.py,sha256=b1AZsOVo1RoHKbk8Nm5nC8
|
|
183
183
|
agentex/lib/sdk/config/project_config.py,sha256=CGH_r9KbnSFMj2CnBkZnfg41L2o0TeVNz6MwBDKPT_U,3642
|
184
184
|
agentex/lib/sdk/config/validation.py,sha256=QGAlAzlVJiWRlIksqxNS-JSwkk8Z4gXMSFUJc4qPrIQ,8989
|
185
185
|
agentex/lib/sdk/fastacp/__init__.py,sha256=UvAdexdnfb4z0F4a2sfXROFyh9EjH89kf3AxHPybzCM,75
|
186
|
-
agentex/lib/sdk/fastacp/fastacp.py,sha256=
|
186
|
+
agentex/lib/sdk/fastacp/fastacp.py,sha256=RM89_4_G2ZtIybPeMg641cw4ixFn4rZHy260FGSAa6o,3770
|
187
187
|
agentex/lib/sdk/fastacp/base/base_acp_server.py,sha256=mVi8uSOZtf70rogS7xDJwHU1hF_5Fv7S8383rUN0kK8,15604
|
188
188
|
agentex/lib/sdk/fastacp/base/constants.py,sha256=W4vpJ-5NML7239JyqzUWdu2IypIl8Cey8CS41KR2Vk0,519
|
189
189
|
agentex/lib/sdk/fastacp/impl/agentic_base_acp.py,sha256=LWLAlHrs-2Lc2UICBAEFL8c3JwTA6oxPnzUzW0qQWSA,2694
|
@@ -304,8 +304,8 @@ agentex/types/messages/batch_update_params.py,sha256=Ug5CThbD49a8j4qucg04OdmVrp_
|
|
304
304
|
agentex/types/messages/batch_update_response.py,sha256=TbSBe6SuPzjXXWSj-nRjT1JHGBooTshHQQDa1AixQA8,278
|
305
305
|
agentex/types/shared/__init__.py,sha256=IKs-Qn5Yja0kFh1G1kDqYZo43qrOu1hSoxlPdN-85dI,149
|
306
306
|
agentex/types/shared/delete_response.py,sha256=8qH3zvQXaOHYQSHyXi7UQxdR4miTzR7V9K4zXVsiUyk,215
|
307
|
-
agentex_sdk-0.4.
|
308
|
-
agentex_sdk-0.4.
|
309
|
-
agentex_sdk-0.4.
|
310
|
-
agentex_sdk-0.4.
|
311
|
-
agentex_sdk-0.4.
|
307
|
+
agentex_sdk-0.4.18.dist-info/METADATA,sha256=yYmjMxPlZKhAlm1spqPQnhGhUycSwqSgHazP7ou4KPM,15095
|
308
|
+
agentex_sdk-0.4.18.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
|
309
|
+
agentex_sdk-0.4.18.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
|
310
|
+
agentex_sdk-0.4.18.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
|
311
|
+
agentex_sdk-0.4.18.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|