agentex-sdk 0.6.2__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agentex/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "agentex"
4
- __version__ = "0.6.2" # x-release-please-version
4
+ __version__ = "0.6.4" # x-release-please-version
@@ -392,7 +392,7 @@ class OpenAIModule:
392
392
 
393
393
  @deprecated(
394
394
  "Use the OpenAI Agents SDK integration with Temporal instead. "
395
- "See examples in tutorials/10_agentic/10_temporal/ for migration guidance."
395
+ "See examples in tutorials/10_async/10_temporal/ for migration guidance."
396
396
  )
397
397
  async def run_agent_streamed_auto_send(
398
398
  self,
@@ -426,7 +426,7 @@ class OpenAIModule:
426
426
 
427
427
  .. deprecated::
428
428
  Use the OpenAI Agents SDK integration with Temporal instead.
429
- See examples in tutorials/10_agentic/10_temporal/ for migration guidance.
429
+ See examples in tutorials/10_async/10_temporal/ for migration guidance.
430
430
 
431
431
  Args:
432
432
  task_id: The ID of the task to run the agent for.
@@ -55,9 +55,8 @@ class TemporalStreamingHooks(RunHooks):
55
55
  Power users can ignore this class and subclass agents.RunHooks directly for full control.
56
56
 
57
57
  Note:
58
- Tool arguments are not available in hooks due to OpenAI SDK architecture.
59
- The SDK's hook signature doesn't include tool arguments - they're only passed
60
- to the actual tool function. This is why arguments={} in ToolRequestContent.
58
+ Tool arguments are extracted from the ToolContext's tool_arguments field,
59
+ which contains a JSON string of the arguments passed to the tool.
61
60
 
62
61
  Attributes:
63
62
  task_id: The AgentEx task ID for routing streamed events
@@ -108,21 +107,30 @@ class TemporalStreamingHooks(RunHooks):
108
107
  async def on_tool_start(self, context: RunContextWrapper, agent: Agent, tool: Tool) -> None: # noqa: ARG002
109
108
  """Stream tool request when a tool starts execution.
110
109
 
111
- Extracts the tool_call_id from the context and streams a ToolRequestContent
112
- message to the UI showing that the tool is about to execute.
113
-
114
- Note: Tool arguments are not available in the hook context due to OpenAI SDK
115
- design. The hook signature doesn't include tool arguments - they're passed
116
- directly to the tool function instead. We send an empty dict as a placeholder.
110
+ Extracts the tool_call_id and tool_arguments from the context and streams a
111
+ ToolRequestContent message to the UI showing that the tool is about to execute.
117
112
 
118
113
  Args:
119
- context: The run context wrapper (will be a ToolContext with tool_call_id)
114
+ context: The run context wrapper (will be a ToolContext with tool_call_id and tool_arguments)
120
115
  agent: The agent executing the tool
121
116
  tool: The tool being executed
122
117
  """
118
+ import json
119
+
123
120
  tool_context = context if isinstance(context, ToolContext) else None
124
121
  tool_call_id = tool_context.tool_call_id if tool_context else f"call_{id(tool)}"
125
122
 
123
+ # Extract tool arguments from context
124
+ tool_arguments = {}
125
+ if tool_context and hasattr(tool_context, 'tool_arguments'):
126
+ try:
127
+ # tool_arguments is a JSON string, parse it
128
+ tool_arguments = json.loads(tool_context.tool_arguments)
129
+ except (json.JSONDecodeError, TypeError):
130
+ # If parsing fails, log and use empty dict
131
+ logger.warning(f"Failed to parse tool arguments: {tool_context.tool_arguments}")
132
+ tool_arguments = {}
133
+
126
134
  await workflow.execute_activity_method(
127
135
  stream_lifecycle_content,
128
136
  args=[
@@ -131,7 +139,7 @@ class TemporalStreamingHooks(RunHooks):
131
139
  author="agent",
132
140
  tool_call_id=tool_call_id,
133
141
  name=tool.name,
134
- arguments={}, # Not available in hook context - SDK limitation
142
+ arguments=tool_arguments, # Now properly extracted from context
135
143
  ),
136
144
  ],
137
145
  start_to_close_timeout=self.timeout,
@@ -102,10 +102,22 @@ def _serialize_item(item: Any) -> dict[str, Any]:
102
102
  class TemporalStreamingModel(Model):
103
103
  """Custom model implementation with streaming support."""
104
104
 
105
- def __init__(self, model_name: str = "gpt-4o", _use_responses_api: bool = True):
106
- """Initialize the streaming model with OpenAI client and model name."""
107
- # Match the default behavior with no retries (Temporal handles retries)
108
- self.client = AsyncOpenAI(max_retries=0)
105
+ def __init__(
106
+ self,
107
+ model_name: str = "gpt-4o",
108
+ _use_responses_api: bool = True,
109
+ openai_client: Optional[AsyncOpenAI] = None,
110
+ ):
111
+ """Initialize the streaming model with OpenAI client and model name.
112
+
113
+ Args:
114
+ model_name: The name of the OpenAI model to use (default: "gpt-4o")
115
+ _use_responses_api: Internal flag for responses API (deprecated, always True)
116
+ openai_client: Optional custom AsyncOpenAI client. If not provided, a default
117
+ client with max_retries=0 will be created (since Temporal handles retries)
118
+ """
119
+ # Use provided client or create default (Temporal handles retries)
120
+ self.client = openai_client if openai_client is not None else AsyncOpenAI(max_retries=0)
109
121
  self.model_name = model_name
110
122
  # Always use Responses API for all models
111
123
  self.use_responses_api = True
@@ -114,7 +126,7 @@ class TemporalStreamingModel(Model):
114
126
  agentex_client = create_async_agentex_client()
115
127
  self.tracer = AsyncTracer(agentex_client)
116
128
 
117
- logger.info(f"[TemporalStreamingModel] Initialized model={self.model_name}, use_responses_api={self.use_responses_api}, tracer=initialized")
129
+ logger.info(f"[TemporalStreamingModel] Initialized model={self.model_name}, use_responses_api={self.use_responses_api}, custom_client={openai_client is not None}, tracer=initialized")
118
130
 
119
131
  def _non_null_or_not_given(self, value: Any) -> Any:
120
132
  """Convert None to NOT_GIVEN sentinel, matching OpenAI SDK pattern."""
@@ -345,9 +357,19 @@ class TemporalStreamingModel(Model):
345
357
  reasoning_param = {
346
358
  "effort": model_settings.reasoning.effort,
347
359
  }
348
- # Add generate_summary if specified and not None
349
- if hasattr(model_settings.reasoning, 'generate_summary') and model_settings.reasoning.generate_summary is not None:
350
- reasoning_param["summary"] = model_settings.reasoning.generate_summary
360
+ # Add summary if specified (check both 'summary' and 'generate_summary' for compatibility)
361
+ summary_value = None
362
+ if hasattr(model_settings.reasoning, 'summary') and model_settings.reasoning.summary is not None:
363
+ summary_value = model_settings.reasoning.summary
364
+ elif (
365
+ hasattr(model_settings.reasoning, 'generate_summary')
366
+ and model_settings.reasoning.generate_summary is not None
367
+ ):
368
+ summary_value = model_settings.reasoning.generate_summary
369
+
370
+ if summary_value is not None:
371
+ reasoning_param["summary"] = summary_value
372
+
351
373
  logger.debug(f"[TemporalStreamingModel] Using reasoning param: {reasoning_param}")
352
374
  return reasoning_param
353
375
 
@@ -667,9 +689,34 @@ class TemporalStreamingModel(Model):
667
689
  output_index = getattr(event, 'output_index', 0)
668
690
 
669
691
  if item and getattr(item, 'type', None) == 'reasoning':
670
- logger.debug(f"[TemporalStreamingModel] Reasoning item completed")
671
- # Don't close the context here - let it stay open for more reasoning events
672
- # It will be closed when we send the final update or at the end
692
+ if reasoning_context and reasoning_summaries:
693
+ logger.debug(f"[TemporalStreamingModel] Reasoning itme completed, sending final update")
694
+ try:
695
+ # Send a full message update with the complete reasoning content
696
+ complete_reasoning_content = ReasoningContent(
697
+ author="agent",
698
+ summary=reasoning_summaries, # Use accumulated summaries
699
+ content=reasoning_contents if reasoning_contents else [],
700
+ type="reasoning",
701
+ style="static",
702
+ )
703
+
704
+ await reasoning_context.stream_update(
705
+ update=StreamTaskMessageFull(
706
+ parent_task_message=reasoning_context.task_message,
707
+ content=complete_reasoning_content,
708
+ type="full",
709
+ ),
710
+ )
711
+
712
+ # Close the reasoning context after sending the final update
713
+ # This matches the reference implementation pattern
714
+ await reasoning_context.close()
715
+ reasoning_context = None
716
+ logger.debug(f"[TemporalStreamingModel] Closed reasoning context after final update")
717
+ except Exception as e:
718
+ logger.warning(f"Failed to send reasoning part done update: {e}")
719
+
673
720
  elif item and getattr(item, 'type', None) == 'function_call':
674
721
  # Function call completed - add to output
675
722
  if output_index in function_calls_in_progress:
@@ -696,34 +743,8 @@ class TemporalStreamingModel(Model):
696
743
  current_reasoning_summary = ""
697
744
 
698
745
  elif isinstance(event, ResponseReasoningSummaryPartDoneEvent):
699
- # Reasoning part completed - send final update and close if this is the last part
700
- if reasoning_context and reasoning_summaries:
701
- logger.debug(f"[TemporalStreamingModel] Reasoning part completed, sending final update")
702
- try:
703
- # Send a full message update with the complete reasoning content
704
- complete_reasoning_content = ReasoningContent(
705
- author="agent",
706
- summary=reasoning_summaries, # Use accumulated summaries
707
- content=reasoning_contents if reasoning_contents else [],
708
- type="reasoning",
709
- style="static",
710
- )
711
-
712
- await reasoning_context.stream_update(
713
- update=StreamTaskMessageFull(
714
- parent_task_message=reasoning_context.task_message,
715
- content=complete_reasoning_content,
716
- type="full",
717
- ),
718
- )
719
-
720
- # Close the reasoning context after sending the final update
721
- # This matches the reference implementation pattern
722
- await reasoning_context.close()
723
- reasoning_context = None
724
- logger.debug(f"[TemporalStreamingModel] Closed reasoning context after final update")
725
- except Exception as e:
726
- logger.warning(f"Failed to send reasoning part done update: {e}")
746
+ # Reasoning part completed - ResponseOutputItemDoneEvent will handle the final update
747
+ logger.debug(f"[TemporalStreamingModel] Reasoning part completed")
727
748
 
728
749
  elif isinstance(event, ResponseCompletedEvent):
729
750
  # Response completed
@@ -830,10 +851,16 @@ class TemporalStreamingModel(Model):
830
851
  class TemporalStreamingModelProvider(ModelProvider):
831
852
  """Custom model provider that returns a streaming-capable model."""
832
853
 
833
- def __init__(self):
834
- """Initialize the provider."""
854
+ def __init__(self, openai_client: Optional[AsyncOpenAI] = None):
855
+ """Initialize the provider.
856
+
857
+ Args:
858
+ openai_client: Optional custom AsyncOpenAI client to use for all models.
859
+ If not provided, each model will create its own default client.
860
+ """
835
861
  super().__init__()
836
- logger.info("[TemporalStreamingModelProvider] Initialized")
862
+ self.openai_client = openai_client
863
+ logger.info(f"[TemporalStreamingModelProvider] Initialized, custom_client={openai_client is not None}")
837
864
 
838
865
  @override
839
866
  def get_model(self, model_name: Union[str, None]) -> Model:
@@ -848,5 +875,5 @@ class TemporalStreamingModelProvider(ModelProvider):
848
875
  # Use the provided model_name or default to gpt-4o
849
876
  actual_model = model_name if model_name else "gpt-4o"
850
877
  logger.info(f"[TemporalStreamingModelProvider] Creating TemporalStreamingModel for model_name: {actual_model}")
851
- model = TemporalStreamingModel(model_name=actual_model)
878
+ model = TemporalStreamingModel(model_name=actual_model, openai_client=self.openai_client)
852
879
  return model
@@ -23,6 +23,7 @@ from agents import (
23
23
  TResponseInputItem,
24
24
  AgentOutputSchemaBase,
25
25
  )
26
+ from openai import AsyncOpenAI
26
27
  from openai.types.responses import ResponsePromptParam
27
28
  from agents.models.openai_responses import OpenAIResponsesModel
28
29
  from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
@@ -86,17 +87,25 @@ class TemporalTracingModelProvider(OpenAIProvider):
86
87
  the context interceptor enabled.
87
88
  """
88
89
 
89
- def __init__(self, *args, **kwargs):
90
+ def __init__(self, openai_client: Optional[AsyncOpenAI] = None, **kwargs):
90
91
  """Initialize the tracing model provider.
91
92
 
92
- Accepts all the same arguments as OpenAIProvider.
93
+ Args:
94
+ openai_client: Optional custom AsyncOpenAI client. If provided, this client
95
+ will be used for all model calls. If not provided, OpenAIProvider
96
+ will create a default client.
97
+ **kwargs: All other arguments are passed to OpenAIProvider.
93
98
  """
94
- super().__init__(*args, **kwargs)
99
+ # Pass openai_client to parent if provided
100
+ if openai_client is not None:
101
+ super().__init__(openai_client=openai_client, **kwargs)
102
+ else:
103
+ super().__init__(**kwargs)
95
104
 
96
105
  # Initialize tracer for all models
97
106
  agentex_client = create_async_agentex_client()
98
107
  self._tracer = AsyncTracer(agentex_client)
99
- logger.info("[TemporalTracingModelProvider] Initialized with AgentEx tracer")
108
+ logger.info(f"[TemporalTracingModelProvider] Initialized with AgentEx tracer, custom_client={openai_client is not None}")
100
109
 
101
110
  @override
102
111
  def get_model(self, model_name: Optional[str]) -> Model:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: agentex-sdk
3
- Version: 0.6.2
3
+ Version: 0.6.4
4
4
  Summary: The official Python library for the agentex API
5
5
  Project-URL: Homepage, https://github.com/scaleapi/scale-agentex-python
6
6
  Project-URL: Repository, https://github.com/scaleapi/scale-agentex-python
@@ -11,7 +11,7 @@ agentex/_resource.py,sha256=S1t7wmR5WUvoDIhZjo_x-E7uoTJBynJ3d8tPJMQYdjw,1106
11
11
  agentex/_response.py,sha256=Tb9zazsnemO2rTxWtBjAD5WBqlhli5ZaXGbiKgdu5DE,28794
12
12
  agentex/_streaming.py,sha256=p-m2didLkbw_VBZsP4QqeIPc2haAdGZmB0BOU3gUM2A,10153
13
13
  agentex/_types.py,sha256=F6X63N7bOstytAtVqJ9Yl7T_JbR9Od2MJfZ_iK5DqOY,7237
14
- agentex/_version.py,sha256=e3De3d0qz0jkC87-IeW5SrqByX5rqRyZ0CQ6lTUa5EM,159
14
+ agentex/_version.py,sha256=aqKdT0Jt6xBNgHPcCkDvPAaRPHGW9fWqLx7ayeP03X0,159
15
15
  agentex/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  agentex/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
17
17
  agentex/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
@@ -43,7 +43,7 @@ agentex/lib/adk/_modules/tracing.py,sha256=LZvItZg2ALZZMvJasrsGEBgss4wnXGMnL_lw0
43
43
  agentex/lib/adk/providers/__init__.py,sha256=bOS-D_lXV3QXRtGKrUvsYb2ZAcZG51ZAtHdfiHgYN-M,306
44
44
  agentex/lib/adk/providers/_modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
45
  agentex/lib/adk/providers/_modules/litellm.py,sha256=6eOdEpd1g9ZljQHXtIokjGymlFpnaw2g8ow11hldovo,9470
46
- agentex/lib/adk/providers/_modules/openai.py,sha256=aJqt6jfX80xcviXryrppd4D83Vi-C8gPspdUQujOWdU,23683
46
+ agentex/lib/adk/providers/_modules/openai.py,sha256=rguTE4uGLodqr8zx5GRRtSkcWC9llNA1kUFKF0yhdK0,23679
47
47
  agentex/lib/adk/providers/_modules/sgp.py,sha256=x64axb0oVmVh5W8hwpnMMPqxad2HySf2DYHPxRNwjck,3208
48
48
  agentex/lib/adk/providers/_modules/sync_provider.py,sha256=9RQ_gMICiUN97bvnbVnVypiKoalCeiUiqonn3nKAjwU,26324
49
49
  agentex/lib/adk/utils/__init__.py,sha256=7f6ayV0_fqyw5cwzVANNcZWGJZ-vrrYtZ0qi7KKBRFs,130
@@ -167,12 +167,12 @@ agentex/lib/core/temporal/plugins/openai_agents/README.md,sha256=FzvW3xM7dCvdxq2
167
167
  agentex/lib/core/temporal/plugins/openai_agents/__init__.py,sha256=ZLHVA3LAkPDvRQlSLnJlPs0sDzqy6sUtwlapoG02Hyw,3174
168
168
  agentex/lib/core/temporal/plugins/openai_agents/hooks/__init__.py,sha256=soOuozGd7H9uWSwwjA60psUGtRR4xQ6e4wfoQbr73Fo,483
169
169
  agentex/lib/core/temporal/plugins/openai_agents/hooks/activities.py,sha256=APpAOZQ90kbhaNpJ6pAeZ2mxxcPx37AchCuLJzeGzB0,3088
170
- agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py,sha256=iATUCspNLweCGuG8WGd29wK8_SiZPlor5P3G9p5nvH0,7853
170
+ agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py,sha256=qbB6RLPlveEIIfziXZBRMkaPJQMQBPsovIX_yM4QuWU,8100
171
171
  agentex/lib/core/temporal/plugins/openai_agents/interceptors/__init__.py,sha256=hrj6lRPi9nb_HAohRK4oPnaji69QQ6brj-Wu2q0mU0s,521
172
172
  agentex/lib/core/temporal/plugins/openai_agents/interceptors/context_interceptor.py,sha256=sBLJonJJ5Ke1BJIlzbqtGeO5p8NIbvftbEYQbjgeZCE,7256
173
173
  agentex/lib/core/temporal/plugins/openai_agents/models/__init__.py,sha256=FeTt91JkSfYLlCTdrVFpjcQ0asbQyCd6Rl5efqZkslo,791
174
- agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py,sha256=GpEYqXdf_fYEQVti-hxGtdsQ2XnG5AOg8aFvGH00z3c,41755
175
- agentex/lib/core/temporal/plugins/openai_agents/models/temporal_tracing_model.py,sha256=BrBX4rClOq3g_V2_UlKJVRtX8E_hu8dYiQiX-js-pto,16913
174
+ agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py,sha256=_lHjIlHsqAbFqsj8PXgqyUbdsr6gy7Yvot-YnLxj4fc,42962
175
+ agentex/lib/core/temporal/plugins/openai_agents/models/temporal_tracing_model.py,sha256=BiuIhSvyNfocwMYQtxOoqgMpyJsMHLkyXzYPYnw4ChA,17458
176
176
  agentex/lib/core/temporal/plugins/openai_agents/tests/__init__.py,sha256=suEVJuonfBoVZ3IqdO0UMn0hkFFzDqRoso0VEOit-KQ,80
177
177
  agentex/lib/core/temporal/plugins/openai_agents/tests/conftest.py,sha256=oMI_3dVn6DoiLgCjRVUeQE_Z2Gz3tGTwPxTQ1krjKSE,7692
178
178
  agentex/lib/core/temporal/plugins/openai_agents/tests/test_streaming_model.py,sha256=w8rkQbn3j_f9GZSXHl5j8FMDPaBQ3wED-BFzxQaeBIc,32909
@@ -330,8 +330,8 @@ agentex/types/messages/batch_update_params.py,sha256=Ug5CThbD49a8j4qucg04OdmVrp_
330
330
  agentex/types/messages/batch_update_response.py,sha256=TbSBe6SuPzjXXWSj-nRjT1JHGBooTshHQQDa1AixQA8,278
331
331
  agentex/types/shared/__init__.py,sha256=IKs-Qn5Yja0kFh1G1kDqYZo43qrOu1hSoxlPdN-85dI,149
332
332
  agentex/types/shared/delete_response.py,sha256=8qH3zvQXaOHYQSHyXi7UQxdR4miTzR7V9K4zXVsiUyk,215
333
- agentex_sdk-0.6.2.dist-info/METADATA,sha256=BmFDFE0X8Rhv-8mMdZBIDPOR0Bt3vrvA9mHndpVFUuY,15375
334
- agentex_sdk-0.6.2.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
335
- agentex_sdk-0.6.2.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
336
- agentex_sdk-0.6.2.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
337
- agentex_sdk-0.6.2.dist-info/RECORD,,
333
+ agentex_sdk-0.6.4.dist-info/METADATA,sha256=jsfcoPc8yA4e9Ve522MCmyeYbI6JjE3K-igsLjtRDso,15375
334
+ agentex_sdk-0.6.4.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
335
+ agentex_sdk-0.6.4.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
336
+ agentex_sdk-0.6.4.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
337
+ agentex_sdk-0.6.4.dist-info/RECORD,,