agentex-sdk 0.6.3__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agentex/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "agentex"
4
- __version__ = "0.6.3" # x-release-please-version
4
+ __version__ = "0.6.4" # x-release-please-version
@@ -357,9 +357,19 @@ class TemporalStreamingModel(Model):
357
357
  reasoning_param = {
358
358
  "effort": model_settings.reasoning.effort,
359
359
  }
360
- # Add generate_summary if specified and not None
361
- if hasattr(model_settings.reasoning, 'generate_summary') and model_settings.reasoning.generate_summary is not None:
362
- reasoning_param["summary"] = model_settings.reasoning.generate_summary
360
+ # Add summary if specified (check both 'summary' and 'generate_summary' for compatibility)
361
+ summary_value = None
362
+ if hasattr(model_settings.reasoning, 'summary') and model_settings.reasoning.summary is not None:
363
+ summary_value = model_settings.reasoning.summary
364
+ elif (
365
+ hasattr(model_settings.reasoning, 'generate_summary')
366
+ and model_settings.reasoning.generate_summary is not None
367
+ ):
368
+ summary_value = model_settings.reasoning.generate_summary
369
+
370
+ if summary_value is not None:
371
+ reasoning_param["summary"] = summary_value
372
+
363
373
  logger.debug(f"[TemporalStreamingModel] Using reasoning param: {reasoning_param}")
364
374
  return reasoning_param
365
375
 
@@ -679,9 +689,34 @@ class TemporalStreamingModel(Model):
679
689
  output_index = getattr(event, 'output_index', 0)
680
690
 
681
691
  if item and getattr(item, 'type', None) == 'reasoning':
682
- logger.debug(f"[TemporalStreamingModel] Reasoning item completed")
683
- # Don't close the context here - let it stay open for more reasoning events
684
- # It will be closed when we send the final update or at the end
692
+ if reasoning_context and reasoning_summaries:
693
+ logger.debug(f"[TemporalStreamingModel] Reasoning itme completed, sending final update")
694
+ try:
695
+ # Send a full message update with the complete reasoning content
696
+ complete_reasoning_content = ReasoningContent(
697
+ author="agent",
698
+ summary=reasoning_summaries, # Use accumulated summaries
699
+ content=reasoning_contents if reasoning_contents else [],
700
+ type="reasoning",
701
+ style="static",
702
+ )
703
+
704
+ await reasoning_context.stream_update(
705
+ update=StreamTaskMessageFull(
706
+ parent_task_message=reasoning_context.task_message,
707
+ content=complete_reasoning_content,
708
+ type="full",
709
+ ),
710
+ )
711
+
712
+ # Close the reasoning context after sending the final update
713
+ # This matches the reference implementation pattern
714
+ await reasoning_context.close()
715
+ reasoning_context = None
716
+ logger.debug(f"[TemporalStreamingModel] Closed reasoning context after final update")
717
+ except Exception as e:
718
+ logger.warning(f"Failed to send reasoning part done update: {e}")
719
+
685
720
  elif item and getattr(item, 'type', None) == 'function_call':
686
721
  # Function call completed - add to output
687
722
  if output_index in function_calls_in_progress:
@@ -708,34 +743,8 @@ class TemporalStreamingModel(Model):
708
743
  current_reasoning_summary = ""
709
744
 
710
745
  elif isinstance(event, ResponseReasoningSummaryPartDoneEvent):
711
- # Reasoning part completed - send final update and close if this is the last part
712
- if reasoning_context and reasoning_summaries:
713
- logger.debug(f"[TemporalStreamingModel] Reasoning part completed, sending final update")
714
- try:
715
- # Send a full message update with the complete reasoning content
716
- complete_reasoning_content = ReasoningContent(
717
- author="agent",
718
- summary=reasoning_summaries, # Use accumulated summaries
719
- content=reasoning_contents if reasoning_contents else [],
720
- type="reasoning",
721
- style="static",
722
- )
723
-
724
- await reasoning_context.stream_update(
725
- update=StreamTaskMessageFull(
726
- parent_task_message=reasoning_context.task_message,
727
- content=complete_reasoning_content,
728
- type="full",
729
- ),
730
- )
731
-
732
- # Close the reasoning context after sending the final update
733
- # This matches the reference implementation pattern
734
- await reasoning_context.close()
735
- reasoning_context = None
736
- logger.debug(f"[TemporalStreamingModel] Closed reasoning context after final update")
737
- except Exception as e:
738
- logger.warning(f"Failed to send reasoning part done update: {e}")
746
+ # Reasoning part completed - ResponseOutputItemDoneEvent will handle the final update
747
+ logger.debug(f"[TemporalStreamingModel] Reasoning part completed")
739
748
 
740
749
  elif isinstance(event, ResponseCompletedEvent):
741
750
  # Response completed
@@ -842,10 +851,16 @@ class TemporalStreamingModel(Model):
842
851
  class TemporalStreamingModelProvider(ModelProvider):
843
852
  """Custom model provider that returns a streaming-capable model."""
844
853
 
845
- def __init__(self):
846
- """Initialize the provider."""
854
+ def __init__(self, openai_client: Optional[AsyncOpenAI] = None):
855
+ """Initialize the provider.
856
+
857
+ Args:
858
+ openai_client: Optional custom AsyncOpenAI client to use for all models.
859
+ If not provided, each model will create its own default client.
860
+ """
847
861
  super().__init__()
848
- logger.info("[TemporalStreamingModelProvider] Initialized")
862
+ self.openai_client = openai_client
863
+ logger.info(f"[TemporalStreamingModelProvider] Initialized, custom_client={openai_client is not None}")
849
864
 
850
865
  @override
851
866
  def get_model(self, model_name: Union[str, None]) -> Model:
@@ -860,5 +875,5 @@ class TemporalStreamingModelProvider(ModelProvider):
860
875
  # Use the provided model_name or default to gpt-4o
861
876
  actual_model = model_name if model_name else "gpt-4o"
862
877
  logger.info(f"[TemporalStreamingModelProvider] Creating TemporalStreamingModel for model_name: {actual_model}")
863
- model = TemporalStreamingModel(model_name=actual_model)
878
+ model = TemporalStreamingModel(model_name=actual_model, openai_client=self.openai_client)
864
879
  return model
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: agentex-sdk
3
- Version: 0.6.3
3
+ Version: 0.6.4
4
4
  Summary: The official Python library for the agentex API
5
5
  Project-URL: Homepage, https://github.com/scaleapi/scale-agentex-python
6
6
  Project-URL: Repository, https://github.com/scaleapi/scale-agentex-python
@@ -11,7 +11,7 @@ agentex/_resource.py,sha256=S1t7wmR5WUvoDIhZjo_x-E7uoTJBynJ3d8tPJMQYdjw,1106
11
11
  agentex/_response.py,sha256=Tb9zazsnemO2rTxWtBjAD5WBqlhli5ZaXGbiKgdu5DE,28794
12
12
  agentex/_streaming.py,sha256=p-m2didLkbw_VBZsP4QqeIPc2haAdGZmB0BOU3gUM2A,10153
13
13
  agentex/_types.py,sha256=F6X63N7bOstytAtVqJ9Yl7T_JbR9Od2MJfZ_iK5DqOY,7237
14
- agentex/_version.py,sha256=c2fQYF_Uula9lejrW5sHf4Mfcdn6IMrnCDZlp3EDgno,159
14
+ agentex/_version.py,sha256=aqKdT0Jt6xBNgHPcCkDvPAaRPHGW9fWqLx7ayeP03X0,159
15
15
  agentex/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  agentex/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
17
17
  agentex/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
@@ -171,7 +171,7 @@ agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py,sha256=qbB6RLPlve
171
171
  agentex/lib/core/temporal/plugins/openai_agents/interceptors/__init__.py,sha256=hrj6lRPi9nb_HAohRK4oPnaji69QQ6brj-Wu2q0mU0s,521
172
172
  agentex/lib/core/temporal/plugins/openai_agents/interceptors/context_interceptor.py,sha256=sBLJonJJ5Ke1BJIlzbqtGeO5p8NIbvftbEYQbjgeZCE,7256
173
173
  agentex/lib/core/temporal/plugins/openai_agents/models/__init__.py,sha256=FeTt91JkSfYLlCTdrVFpjcQ0asbQyCd6Rl5efqZkslo,791
174
- agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py,sha256=ZODPIUuvSZO5eYhByMv2n87SOKYjAh9JunkJm8ECRPQ,42310
174
+ agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py,sha256=_lHjIlHsqAbFqsj8PXgqyUbdsr6gy7Yvot-YnLxj4fc,42962
175
175
  agentex/lib/core/temporal/plugins/openai_agents/models/temporal_tracing_model.py,sha256=BiuIhSvyNfocwMYQtxOoqgMpyJsMHLkyXzYPYnw4ChA,17458
176
176
  agentex/lib/core/temporal/plugins/openai_agents/tests/__init__.py,sha256=suEVJuonfBoVZ3IqdO0UMn0hkFFzDqRoso0VEOit-KQ,80
177
177
  agentex/lib/core/temporal/plugins/openai_agents/tests/conftest.py,sha256=oMI_3dVn6DoiLgCjRVUeQE_Z2Gz3tGTwPxTQ1krjKSE,7692
@@ -330,8 +330,8 @@ agentex/types/messages/batch_update_params.py,sha256=Ug5CThbD49a8j4qucg04OdmVrp_
330
330
  agentex/types/messages/batch_update_response.py,sha256=TbSBe6SuPzjXXWSj-nRjT1JHGBooTshHQQDa1AixQA8,278
331
331
  agentex/types/shared/__init__.py,sha256=IKs-Qn5Yja0kFh1G1kDqYZo43qrOu1hSoxlPdN-85dI,149
332
332
  agentex/types/shared/delete_response.py,sha256=8qH3zvQXaOHYQSHyXi7UQxdR4miTzR7V9K4zXVsiUyk,215
333
- agentex_sdk-0.6.3.dist-info/METADATA,sha256=FPfO3b3DUDmtZvVDGhsammLJgD_PlhwLG-zQqo7DzKI,15375
334
- agentex_sdk-0.6.3.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
335
- agentex_sdk-0.6.3.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
336
- agentex_sdk-0.6.3.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
337
- agentex_sdk-0.6.3.dist-info/RECORD,,
333
+ agentex_sdk-0.6.4.dist-info/METADATA,sha256=jsfcoPc8yA4e9Ve522MCmyeYbI6JjE3K-igsLjtRDso,15375
334
+ agentex_sdk-0.6.4.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
335
+ agentex_sdk-0.6.4.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
336
+ agentex_sdk-0.6.4.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
337
+ agentex_sdk-0.6.4.dist-info/RECORD,,