agentex-sdk 0.6.3__py3-none-any.whl → 0.6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agentex/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "agentex"
4
- __version__ = "0.6.3" # x-release-please-version
4
+ __version__ = "0.6.5" # x-release-please-version
@@ -357,9 +357,19 @@ class TemporalStreamingModel(Model):
357
357
  reasoning_param = {
358
358
  "effort": model_settings.reasoning.effort,
359
359
  }
360
- # Add generate_summary if specified and not None
361
- if hasattr(model_settings.reasoning, 'generate_summary') and model_settings.reasoning.generate_summary is not None:
362
- reasoning_param["summary"] = model_settings.reasoning.generate_summary
360
+ # Add summary if specified (check both 'summary' and 'generate_summary' for compatibility)
361
+ summary_value = None
362
+ if hasattr(model_settings.reasoning, 'summary') and model_settings.reasoning.summary is not None:
363
+ summary_value = model_settings.reasoning.summary
364
+ elif (
365
+ hasattr(model_settings.reasoning, 'generate_summary')
366
+ and model_settings.reasoning.generate_summary is not None
367
+ ):
368
+ summary_value = model_settings.reasoning.generate_summary
369
+
370
+ if summary_value is not None:
371
+ reasoning_param["summary"] = summary_value
372
+
363
373
  logger.debug(f"[TemporalStreamingModel] Using reasoning param: {reasoning_param}")
364
374
  return reasoning_param
365
375
 
@@ -487,12 +497,32 @@ class TemporalStreamingModel(Model):
487
497
  include_list.append("message.output_text.logprobs")
488
498
  # Build response format for verbosity and structured output
489
499
  response_format = NOT_GIVEN
500
+
490
501
  if output_schema is not None:
491
- # Handle structured output schema
492
- # This would need conversion logic similar to Converter.get_response_format
493
- pass # TODO: Implement output_schema conversion
494
- elif model_settings.verbosity is not None:
495
- response_format = {"verbosity": model_settings.verbosity}
502
+ # Handle structured output schema for Responses API
503
+ # The Responses API expects the schema in the 'text' parameter with a 'format' key
504
+ logger.debug(f"[TemporalStreamingModel] Converting output_schema to Responses API format")
505
+ try:
506
+ # Get the JSON schema from the output schema
507
+ schema_dict = output_schema.json_schema()
508
+ response_format = {
509
+ "format": {
510
+ "type": "json_schema",
511
+ "name": "final_output",
512
+ "schema": schema_dict,
513
+ "strict": output_schema.is_strict_json_schema() if hasattr(output_schema, 'is_strict_json_schema') else True,
514
+ }
515
+ }
516
+ logger.debug(f"[TemporalStreamingModel] Built response_format with json_schema: {response_format}")
517
+ except Exception as e:
518
+ logger.warning(f"Failed to convert output_schema: {e}")
519
+ response_format = NOT_GIVEN
520
+
521
+ if model_settings.verbosity is not None:
522
+ if response_format is not NOT_GIVEN and isinstance(response_format, dict):
523
+ response_format["verbosity"] = model_settings.verbosity
524
+ else:
525
+ response_format = {"verbosity": model_settings.verbosity}
496
526
 
497
527
  # Build extra_args dict for additional parameters
498
528
  extra_args = dict(model_settings.extra_args or {})
@@ -519,7 +549,7 @@ class TemporalStreamingModel(Model):
519
549
  parallel_tool_calls=self._non_null_or_not_given(model_settings.parallel_tool_calls),
520
550
  # Context and truncation
521
551
  truncation=self._non_null_or_not_given(model_settings.truncation),
522
- # Response configuration
552
+ # Response configuration (includes structured output schema)
523
553
  text=response_format,
524
554
  include=include_list if include_list else NOT_GIVEN,
525
555
  # Metadata and storage
@@ -536,181 +566,154 @@ class TemporalStreamingModel(Model):
536
566
  # Process the stream of events from Responses API
537
567
  output_items = []
538
568
  current_text = ""
569
+ streaming_context = None
539
570
  reasoning_context = None
540
571
  reasoning_summaries = []
541
572
  reasoning_contents = []
542
- current_reasoning_summary = ""
543
573
  event_count = 0
544
574
 
545
575
  # We expect task_id to always be provided for streaming
546
576
  if not task_id:
547
577
  raise ValueError("[TemporalStreamingModel] task_id is required for streaming model")
548
578
 
549
- # Use proper async with context manager for streaming to Redis
550
- async with adk.streaming.streaming_task_message_context(
551
- task_id=task_id,
552
- initial_content=TextContent(
553
- author="agent",
554
- content="",
555
- format="markdown",
556
- ),
557
- ) as streaming_context:
558
- # Process events from the Responses API stream
559
- function_calls_in_progress = {} # Track function calls being streamed
560
-
561
- async for event in stream:
562
- event_count += 1
563
-
564
- # Log event type
565
- logger.debug(f"[TemporalStreamingModel] Event {event_count}: {type(event).__name__}")
566
-
567
- # Handle different event types using isinstance for type safety
568
- if isinstance(event, ResponseOutputItemAddedEvent):
569
- # New output item (reasoning, function call, or message)
570
- item = getattr(event, 'item', None)
571
- output_index = getattr(event, 'output_index', 0)
572
-
573
- if item and getattr(item, 'type', None) == 'reasoning':
574
- logger.debug(f"[TemporalStreamingModel] Starting reasoning item")
575
- if not reasoning_context:
576
- # Start a reasoning context for streaming reasoning to UI
577
- reasoning_context = await adk.streaming.streaming_task_message_context(
578
- task_id=task_id,
579
- initial_content=ReasoningContent(
580
- author="agent",
581
- summary=[],
582
- content=[],
583
- type="reasoning",
584
- style="active",
585
- ),
586
- ).__aenter__()
587
- elif item and getattr(item, 'type', None) == 'function_call':
588
- # Track the function call being streamed
589
- function_calls_in_progress[output_index] = {
590
- 'id': getattr(item, 'id', ''),
591
- 'call_id': getattr(item, 'call_id', ''),
592
- 'name': getattr(item, 'name', ''),
593
- 'arguments': getattr(item, 'arguments', ''),
594
- }
595
- logger.debug(f"[TemporalStreamingModel] Starting function call: {item.name}")
596
-
597
- elif isinstance(event, ResponseFunctionCallArgumentsDeltaEvent):
598
- # Stream function call arguments
599
- output_index = getattr(event, 'output_index', 0)
600
- delta = getattr(event, 'delta', '')
579
+ # Process events from the Responses API stream
580
+ function_calls_in_progress = {} # Track function calls being streamed
601
581
 
602
- if output_index in function_calls_in_progress:
603
- function_calls_in_progress[output_index]['arguments'] += delta
604
- logger.debug(f"[TemporalStreamingModel] Function call args delta: {delta[:50]}...")
582
+ async for event in stream:
583
+ event_count += 1
605
584
 
606
- elif isinstance(event, ResponseFunctionCallArgumentsDoneEvent):
607
- # Function call arguments complete
608
- output_index = getattr(event, 'output_index', 0)
609
- arguments = getattr(event, 'arguments', '')
585
+ # Log event type
586
+ logger.debug(f"[TemporalStreamingModel] Event {event_count}: {type(event).__name__}")
610
587
 
611
- if output_index in function_calls_in_progress:
612
- function_calls_in_progress[output_index]['arguments'] = arguments
613
- logger.debug(f"[TemporalStreamingModel] Function call args done")
588
+ # Handle different event types using isinstance for type safety
589
+ if isinstance(event, ResponseOutputItemAddedEvent):
590
+ # New output item (reasoning, function call, or message)
591
+ item = getattr(event, 'item', None)
592
+ output_index = getattr(event, 'output_index', 0)
614
593
 
615
- elif isinstance(event, (ResponseReasoningTextDeltaEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseTextDeltaEvent)):
616
- # Handle text streaming
617
- delta = getattr(event, 'delta', '')
618
-
619
- if isinstance(event, ResponseReasoningSummaryTextDeltaEvent) and reasoning_context:
620
- # Stream reasoning summary deltas - these are the actual reasoning tokens!
621
- try:
622
- # Use ReasoningSummaryDelta for reasoning summaries
623
- summary_index = getattr(event, 'summary_index', 0)
624
- delta_obj = ReasoningSummaryDelta(
625
- summary_index=summary_index,
626
- summary_delta=delta,
627
- type="reasoning_summary",
628
- )
629
- update = StreamTaskMessageDelta(
630
- parent_task_message=reasoning_context.task_message,
631
- delta=delta_obj,
632
- type="delta",
633
- )
634
- await reasoning_context.stream_update(update)
635
- # Accumulate the reasoning summary
636
- if len(reasoning_summaries) <= summary_index:
637
- reasoning_summaries.extend([""] * (summary_index + 1 - len(reasoning_summaries)))
638
- reasoning_summaries[summary_index] += delta
639
- logger.debug(f"[TemporalStreamingModel] Streamed reasoning summary: {delta[:30]}..." if len(delta) > 30 else f"[TemporalStreamingModel] Streamed reasoning summary: {delta}")
640
- except Exception as e:
641
- logger.warning(f"Failed to send reasoning delta: {e}")
642
- elif isinstance(event, ResponseReasoningTextDeltaEvent) and reasoning_context:
643
- # Regular reasoning delta (if these ever appear)
644
- try:
645
- delta_obj = ReasoningContentDelta(
646
- content_index=0,
647
- content_delta=delta,
648
- type="reasoning_content",
649
- )
650
- update = StreamTaskMessageDelta(
651
- parent_task_message=reasoning_context.task_message,
652
- delta=delta_obj,
653
- type="delta",
654
- )
655
- await reasoning_context.stream_update(update)
656
- reasoning_contents.append(delta)
657
- except Exception as e:
658
- logger.warning(f"Failed to send reasoning delta: {e}")
659
- elif isinstance(event, ResponseTextDeltaEvent):
660
- # Stream regular text output
661
- current_text += delta
662
- try:
663
- delta_obj = TextDelta(
664
- type="text",
665
- text_delta=delta,
666
- )
667
- update = StreamTaskMessageDelta(
668
- parent_task_message=streaming_context.task_message,
669
- delta=delta_obj,
670
- type="delta",
671
- )
672
- await streaming_context.stream_update(update)
673
- except Exception as e:
674
- logger.warning(f"Failed to send text delta: {e}")
675
-
676
- elif isinstance(event, ResponseOutputItemDoneEvent):
677
- # Output item completed
678
- item = getattr(event, 'item', None)
679
- output_index = getattr(event, 'output_index', 0)
680
-
681
- if item and getattr(item, 'type', None) == 'reasoning':
682
- logger.debug(f"[TemporalStreamingModel] Reasoning item completed")
683
- # Don't close the context here - let it stay open for more reasoning events
684
- # It will be closed when we send the final update or at the end
685
- elif item and getattr(item, 'type', None) == 'function_call':
686
- # Function call completed - add to output
687
- if output_index in function_calls_in_progress:
688
- call_data = function_calls_in_progress[output_index]
689
- logger.debug(f"[TemporalStreamingModel] Function call completed: {call_data['name']}")
690
-
691
- # Create proper function call object
692
- tool_call = ResponseFunctionToolCall(
693
- id=call_data['id'],
694
- call_id=call_data['call_id'],
695
- type="function_call",
696
- name=call_data['name'],
697
- arguments=call_data['arguments'],
698
- )
699
- output_items.append(tool_call)
700
-
701
- elif isinstance(event, ResponseReasoningSummaryPartAddedEvent):
702
- # New reasoning part/summary started - reset accumulator
703
- part = getattr(event, 'part', None)
704
- if part:
705
- part_type = getattr(part, 'type', 'unknown')
706
- logger.debug(f"[TemporalStreamingModel] New reasoning part: type={part_type}")
707
- # Reset the current reasoning summary for this new part
708
- current_reasoning_summary = ""
709
-
710
- elif isinstance(event, ResponseReasoningSummaryPartDoneEvent):
711
- # Reasoning part completed - send final update and close if this is the last part
594
+ if item and getattr(item, 'type', None) == 'reasoning':
595
+ logger.debug(f"[TemporalStreamingModel] Starting reasoning item")
596
+ if not reasoning_context:
597
+ # Start a reasoning context for streaming reasoning to UI
598
+ reasoning_context = await adk.streaming.streaming_task_message_context(
599
+ task_id=task_id,
600
+ initial_content=ReasoningContent(
601
+ author="agent",
602
+ summary=[],
603
+ content=[],
604
+ type="reasoning",
605
+ style="active",
606
+ ),
607
+ ).__aenter__()
608
+ elif item and getattr(item, 'type', None) == 'function_call':
609
+ # Track the function call being streamed
610
+ function_calls_in_progress[output_index] = {
611
+ 'id': getattr(item, 'id', ''),
612
+ 'call_id': getattr(item, 'call_id', ''),
613
+ 'name': getattr(item, 'name', ''),
614
+ 'arguments': getattr(item, 'arguments', ''),
615
+ }
616
+ logger.debug(f"[TemporalStreamingModel] Starting function call: {item.name}")
617
+
618
+ elif item and getattr(item, 'type', None) == 'message':
619
+ # Track the message being streamed
620
+ streaming_context = await adk.streaming.streaming_task_message_context(
621
+ task_id=task_id,
622
+ initial_content=TextContent(
623
+ author="agent",
624
+ content="",
625
+ format="markdown",
626
+ ),
627
+ ).__aenter__()
628
+
629
+ elif isinstance(event, ResponseFunctionCallArgumentsDeltaEvent):
630
+ # Stream function call arguments
631
+ output_index = getattr(event, 'output_index', 0)
632
+ delta = getattr(event, 'delta', '')
633
+
634
+ if output_index in function_calls_in_progress:
635
+ function_calls_in_progress[output_index]['arguments'] += delta
636
+ logger.debug(f"[TemporalStreamingModel] Function call args delta: {delta[:50]}...")
637
+
638
+ elif isinstance(event, ResponseFunctionCallArgumentsDoneEvent):
639
+ # Function call arguments complete
640
+ output_index = getattr(event, 'output_index', 0)
641
+ arguments = getattr(event, 'arguments', '')
642
+
643
+ if output_index in function_calls_in_progress:
644
+ function_calls_in_progress[output_index]['arguments'] = arguments
645
+ logger.debug(f"[TemporalStreamingModel] Function call args done")
646
+
647
+ elif isinstance(event, (ResponseReasoningTextDeltaEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseTextDeltaEvent)):
648
+ # Handle text streaming
649
+ delta = getattr(event, 'delta', '')
650
+
651
+ if isinstance(event, ResponseReasoningSummaryTextDeltaEvent) and reasoning_context:
652
+ # Stream reasoning summary deltas - these are the actual reasoning tokens!
653
+ try:
654
+ # Use ReasoningSummaryDelta for reasoning summaries
655
+ summary_index = getattr(event, 'summary_index', 0)
656
+ delta_obj = ReasoningSummaryDelta(
657
+ summary_index=summary_index,
658
+ summary_delta=delta,
659
+ type="reasoning_summary",
660
+ )
661
+ update = StreamTaskMessageDelta(
662
+ parent_task_message=reasoning_context.task_message,
663
+ delta=delta_obj,
664
+ type="delta",
665
+ )
666
+ await reasoning_context.stream_update(update)
667
+ # Accumulate the reasoning summary
668
+ if len(reasoning_summaries) <= summary_index:
669
+ logger.debug(f"[TemporalStreamingModel] Extending reasoning summaries: {summary_index}")
670
+ reasoning_summaries.extend([""] * (summary_index + 1 - len(reasoning_summaries)))
671
+ reasoning_summaries[summary_index] += delta
672
+ logger.debug(f"[TemporalStreamingModel] Streamed reasoning summary: {delta[:30]}..." if len(delta) > 30 else f"[TemporalStreamingModel] Streamed reasoning summary: {delta}")
673
+ except Exception as e:
674
+ logger.warning(f"Failed to send reasoning delta: {e}")
675
+ elif isinstance(event, ResponseReasoningTextDeltaEvent) and reasoning_context:
676
+ # Regular reasoning delta (if these ever appear)
677
+ try:
678
+ delta_obj = ReasoningContentDelta(
679
+ content_index=0,
680
+ content_delta=delta,
681
+ type="reasoning_content",
682
+ )
683
+ update = StreamTaskMessageDelta(
684
+ parent_task_message=reasoning_context.task_message,
685
+ delta=delta_obj,
686
+ type="delta",
687
+ )
688
+ await reasoning_context.stream_update(update)
689
+ reasoning_contents.append(delta)
690
+ except Exception as e:
691
+ logger.warning(f"Failed to send reasoning delta: {e}")
692
+ elif isinstance(event, ResponseTextDeltaEvent):
693
+ # Stream regular text output
694
+ current_text += delta
695
+ try:
696
+ delta_obj = TextDelta(
697
+ type="text",
698
+ text_delta=delta,
699
+ )
700
+ update = StreamTaskMessageDelta(
701
+ parent_task_message=streaming_context.task_message if streaming_context else None,
702
+ delta=delta_obj,
703
+ type="delta",
704
+ )
705
+ await streaming_context.stream_update(update) if streaming_context else None
706
+ except Exception as e:
707
+ logger.warning(f"Failed to send text delta: {e}")
708
+
709
+ elif isinstance(event, ResponseOutputItemDoneEvent):
710
+ # Output item completed
711
+ item = getattr(event, 'item', None)
712
+ output_index = getattr(event, 'output_index', 0)
713
+
714
+ if item and getattr(item, 'type', None) == 'reasoning':
712
715
  if reasoning_context and reasoning_summaries:
713
- logger.debug(f"[TemporalStreamingModel] Reasoning part completed, sending final update")
716
+ logger.debug(f"[TemporalStreamingModel] Reasoning itme completed, sending final update")
714
717
  try:
715
718
  # Send a full message update with the complete reasoning content
716
719
  complete_reasoning_content = ReasoningContent(
@@ -737,19 +740,51 @@ class TemporalStreamingModel(Model):
737
740
  except Exception as e:
738
741
  logger.warning(f"Failed to send reasoning part done update: {e}")
739
742
 
740
- elif isinstance(event, ResponseCompletedEvent):
741
- # Response completed
742
- logger.debug(f"[TemporalStreamingModel] Response completed")
743
- response = getattr(event, 'response', None)
744
- if response and hasattr(response, 'output'):
745
- # Use the final output from the response
746
- output_items = response.output
747
- logger.debug(f"[TemporalStreamingModel] Found {len(output_items)} output items in final response")
748
-
749
- # End of event processing loop - close any open contexts
750
- if reasoning_context:
751
- await reasoning_context.close()
752
- reasoning_context = None
743
+ elif item and getattr(item, 'type', None) == 'function_call':
744
+ # Function call completed - add to output
745
+ if output_index in function_calls_in_progress:
746
+ call_data = function_calls_in_progress[output_index]
747
+ logger.debug(f"[TemporalStreamingModel] Function call completed: {call_data['name']}")
748
+
749
+ # Create proper function call object
750
+ tool_call = ResponseFunctionToolCall(
751
+ id=call_data['id'],
752
+ call_id=call_data['call_id'],
753
+ type="function_call",
754
+ name=call_data['name'],
755
+ arguments=call_data['arguments'],
756
+ )
757
+ output_items.append(tool_call)
758
+
759
+ elif isinstance(event, ResponseReasoningSummaryPartAddedEvent):
760
+ # New reasoning part/summary started - reset accumulator
761
+ part = getattr(event, 'part', None)
762
+ if part:
763
+ part_type = getattr(part, 'type', 'unknown')
764
+ logger.debug(f"[TemporalStreamingModel] New reasoning part: type={part_type}")
765
+ # Reset the current reasoning summary for this new part
766
+
767
+ elif isinstance(event, ResponseReasoningSummaryPartDoneEvent):
768
+ # Reasoning part completed - ResponseOutputItemDoneEvent will handle the final update
769
+ logger.debug(f"[TemporalStreamingModel] Reasoning part completed")
770
+
771
+ elif isinstance(event, ResponseCompletedEvent):
772
+ # Response completed
773
+ logger.debug(f"[TemporalStreamingModel] Response completed")
774
+ response = getattr(event, 'response', None)
775
+ if response and hasattr(response, 'output'):
776
+ # Use the final output from the response
777
+ output_items = response.output
778
+ logger.debug(f"[TemporalStreamingModel] Found {len(output_items)} output items in final response")
779
+
780
+ # End of event processing loop - close any open contexts
781
+ if reasoning_context:
782
+ await reasoning_context.close()
783
+ reasoning_context = None
784
+
785
+ if streaming_context:
786
+ await streaming_context.close()
787
+ streaming_context = None
753
788
 
754
789
  # Build the response from output items collected during streaming
755
790
  # Create output from the items we collected
@@ -842,10 +877,16 @@ class TemporalStreamingModel(Model):
842
877
  class TemporalStreamingModelProvider(ModelProvider):
843
878
  """Custom model provider that returns a streaming-capable model."""
844
879
 
845
- def __init__(self):
846
- """Initialize the provider."""
880
+ def __init__(self, openai_client: Optional[AsyncOpenAI] = None):
881
+ """Initialize the provider.
882
+
883
+ Args:
884
+ openai_client: Optional custom AsyncOpenAI client to use for all models.
885
+ If not provided, each model will create its own default client.
886
+ """
847
887
  super().__init__()
848
- logger.info("[TemporalStreamingModelProvider] Initialized")
888
+ self.openai_client = openai_client
889
+ logger.info(f"[TemporalStreamingModelProvider] Initialized, custom_client={openai_client is not None}")
849
890
 
850
891
  @override
851
892
  def get_model(self, model_name: Union[str, None]) -> Model:
@@ -860,5 +901,5 @@ class TemporalStreamingModelProvider(ModelProvider):
860
901
  # Use the provided model_name or default to gpt-4o
861
902
  actual_model = model_name if model_name else "gpt-4o"
862
903
  logger.info(f"[TemporalStreamingModelProvider] Creating TemporalStreamingModel for model_name: {actual_model}")
863
- model = TemporalStreamingModel(model_name=actual_model)
904
+ model = TemporalStreamingModel(model_name=actual_model, openai_client=self.openai_client)
864
905
  return model
@@ -78,6 +78,9 @@ class BaseACPServer(FastAPI):
78
78
  self.add_middleware(RequestIDMiddleware)
79
79
  self._handlers: dict[RPCMethod, Callable] = {}
80
80
 
81
+ # Agent info to return in healthz
82
+ self.agent_id: str | None = None
83
+
81
84
  @classmethod
82
85
  def create(cls):
83
86
  """Create and initialize BaseACPServer instance"""
@@ -96,6 +99,7 @@ class BaseACPServer(FastAPI):
96
99
  env_vars = EnvironmentVariables.refresh()
97
100
  if env_vars.AGENTEX_BASE_URL:
98
101
  await register_agent(env_vars)
102
+ self.agent_id = env_vars.AGENT_ID
99
103
  else:
100
104
  logger.warning("AGENTEX_BASE_URL not set, skipping agent registration")
101
105
 
@@ -105,7 +109,10 @@ class BaseACPServer(FastAPI):
105
109
 
106
110
  async def _healthz(self):
107
111
  """Health check endpoint"""
108
- return {"status": "healthy"}
112
+ result = {"status": "healthy"}
113
+ if self.agent_id:
114
+ result["agent_id"] = self.agent_id
115
+ return result
109
116
 
110
117
  def _wrap_handler(self, fn: Callable[..., Awaitable[Any]]):
111
118
  """Wraps handler functions to provide JSON-RPC 2.0 response format"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: agentex-sdk
3
- Version: 0.6.3
3
+ Version: 0.6.5
4
4
  Summary: The official Python library for the agentex API
5
5
  Project-URL: Homepage, https://github.com/scaleapi/scale-agentex-python
6
6
  Project-URL: Repository, https://github.com/scaleapi/scale-agentex-python
@@ -11,7 +11,7 @@ agentex/_resource.py,sha256=S1t7wmR5WUvoDIhZjo_x-E7uoTJBynJ3d8tPJMQYdjw,1106
11
11
  agentex/_response.py,sha256=Tb9zazsnemO2rTxWtBjAD5WBqlhli5ZaXGbiKgdu5DE,28794
12
12
  agentex/_streaming.py,sha256=p-m2didLkbw_VBZsP4QqeIPc2haAdGZmB0BOU3gUM2A,10153
13
13
  agentex/_types.py,sha256=F6X63N7bOstytAtVqJ9Yl7T_JbR9Od2MJfZ_iK5DqOY,7237
14
- agentex/_version.py,sha256=c2fQYF_Uula9lejrW5sHf4Mfcdn6IMrnCDZlp3EDgno,159
14
+ agentex/_version.py,sha256=r7u5V4-ao2MtjmYwTsNUkpyaHQ7CJR7ZAG4nuT0xZxc,159
15
15
  agentex/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  agentex/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
17
17
  agentex/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
@@ -171,7 +171,7 @@ agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py,sha256=qbB6RLPlve
171
171
  agentex/lib/core/temporal/plugins/openai_agents/interceptors/__init__.py,sha256=hrj6lRPi9nb_HAohRK4oPnaji69QQ6brj-Wu2q0mU0s,521
172
172
  agentex/lib/core/temporal/plugins/openai_agents/interceptors/context_interceptor.py,sha256=sBLJonJJ5Ke1BJIlzbqtGeO5p8NIbvftbEYQbjgeZCE,7256
173
173
  agentex/lib/core/temporal/plugins/openai_agents/models/__init__.py,sha256=FeTt91JkSfYLlCTdrVFpjcQ0asbQyCd6Rl5efqZkslo,791
174
- agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py,sha256=ZODPIUuvSZO5eYhByMv2n87SOKYjAh9JunkJm8ECRPQ,42310
174
+ agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py,sha256=ca1EeVXu7jehXyWbV_QvJBo60iMKoCNqgjeFtQ-1frs,43906
175
175
  agentex/lib/core/temporal/plugins/openai_agents/models/temporal_tracing_model.py,sha256=BiuIhSvyNfocwMYQtxOoqgMpyJsMHLkyXzYPYnw4ChA,17458
176
176
  agentex/lib/core/temporal/plugins/openai_agents/tests/__init__.py,sha256=suEVJuonfBoVZ3IqdO0UMn0hkFFzDqRoso0VEOit-KQ,80
177
177
  agentex/lib/core/temporal/plugins/openai_agents/tests/conftest.py,sha256=oMI_3dVn6DoiLgCjRVUeQE_Z2Gz3tGTwPxTQ1krjKSE,7692
@@ -202,7 +202,7 @@ agentex/lib/sdk/config/project_config.py,sha256=uMrg9BqEQFcnqdlqqSLYsaQkP1mMedhE
202
202
  agentex/lib/sdk/config/validation.py,sha256=ox8g2vwjYsmfNcz4G-sbPw0ccWjylJRG5bufTEPQMCk,9024
203
203
  agentex/lib/sdk/fastacp/__init__.py,sha256=UvAdexdnfb4z0F4a2sfXROFyh9EjH89kf3AxHPybzCM,75
204
204
  agentex/lib/sdk/fastacp/fastacp.py,sha256=3aT74pFwF76VoTbQnGZsF6As42aLa2o_JrO6EP_XHQM,4591
205
- agentex/lib/sdk/fastacp/base/base_acp_server.py,sha256=1ltdXNidNE1SeVYBmwf4WxGRRn6eb2u9hn6SOOgp2g0,16932
205
+ agentex/lib/sdk/fastacp/base/base_acp_server.py,sha256=W2rMZUC-5GLvLJsLFKZHtmyG9Uhrsgffqo9qcomThsQ,17163
206
206
  agentex/lib/sdk/fastacp/base/constants.py,sha256=FxhXqdaqazQIxFTfAMzl4wx50TMCzBvoNtRI7hUdL2o,837
207
207
  agentex/lib/sdk/fastacp/impl/async_base_acp.py,sha256=xT95pQ-jQpDtBpB8-Z_ZWNuwG9eXgJEluGzQ7vfo8UE,2675
208
208
  agentex/lib/sdk/fastacp/impl/sync_acp.py,sha256=0yNaWr9k28U3jKucKRoV8a53LsPyfyqwlJupe6e5pv0,3933
@@ -330,8 +330,8 @@ agentex/types/messages/batch_update_params.py,sha256=Ug5CThbD49a8j4qucg04OdmVrp_
330
330
  agentex/types/messages/batch_update_response.py,sha256=TbSBe6SuPzjXXWSj-nRjT1JHGBooTshHQQDa1AixQA8,278
331
331
  agentex/types/shared/__init__.py,sha256=IKs-Qn5Yja0kFh1G1kDqYZo43qrOu1hSoxlPdN-85dI,149
332
332
  agentex/types/shared/delete_response.py,sha256=8qH3zvQXaOHYQSHyXi7UQxdR4miTzR7V9K4zXVsiUyk,215
333
- agentex_sdk-0.6.3.dist-info/METADATA,sha256=FPfO3b3DUDmtZvVDGhsammLJgD_PlhwLG-zQqo7DzKI,15375
334
- agentex_sdk-0.6.3.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
335
- agentex_sdk-0.6.3.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
336
- agentex_sdk-0.6.3.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
337
- agentex_sdk-0.6.3.dist-info/RECORD,,
333
+ agentex_sdk-0.6.5.dist-info/METADATA,sha256=RyqOx-PvKF-CECR8mWmH3fbywmVXzoWNOmNwMW3OsDY,15375
334
+ agentex_sdk-0.6.5.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
335
+ agentex_sdk-0.6.5.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
336
+ agentex_sdk-0.6.5.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
337
+ agentex_sdk-0.6.5.dist-info/RECORD,,