agentex-sdk 0.6.4__py3-none-any.whl → 0.6.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agentex/_models.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import os
4
4
  import inspect
5
+ import weakref
5
6
  from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
6
7
  from datetime import date, datetime
7
8
  from typing_extensions import (
@@ -256,15 +257,16 @@ class BaseModel(pydantic.BaseModel):
256
257
  mode: Literal["json", "python"] | str = "python",
257
258
  include: IncEx | None = None,
258
259
  exclude: IncEx | None = None,
260
+ context: Any | None = None,
259
261
  by_alias: bool | None = None,
260
262
  exclude_unset: bool = False,
261
263
  exclude_defaults: bool = False,
262
264
  exclude_none: bool = False,
265
+ exclude_computed_fields: bool = False,
263
266
  round_trip: bool = False,
264
267
  warnings: bool | Literal["none", "warn", "error"] = True,
265
- context: dict[str, Any] | None = None,
266
- serialize_as_any: bool = False,
267
268
  fallback: Callable[[Any], Any] | None = None,
269
+ serialize_as_any: bool = False,
268
270
  ) -> dict[str, Any]:
269
271
  """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
270
272
 
@@ -272,16 +274,24 @@ class BaseModel(pydantic.BaseModel):
272
274
 
273
275
  Args:
274
276
  mode: The mode in which `to_python` should run.
275
- If mode is 'json', the dictionary will only contain JSON serializable types.
276
- If mode is 'python', the dictionary may contain any Python objects.
277
- include: A list of fields to include in the output.
278
- exclude: A list of fields to exclude from the output.
277
+ If mode is 'json', the output will only contain JSON serializable types.
278
+ If mode is 'python', the output may contain non-JSON-serializable Python objects.
279
+ include: A set of fields to include in the output.
280
+ exclude: A set of fields to exclude from the output.
281
+ context: Additional context to pass to the serializer.
279
282
  by_alias: Whether to use the field's alias in the dictionary key if defined.
280
- exclude_unset: Whether to exclude fields that are unset or None from the output.
281
- exclude_defaults: Whether to exclude fields that are set to their default value from the output.
282
- exclude_none: Whether to exclude fields that have a value of `None` from the output.
283
- round_trip: Whether to enable serialization and deserialization round-trip support.
284
- warnings: Whether to log warnings when invalid fields are encountered.
283
+ exclude_unset: Whether to exclude fields that have not been explicitly set.
284
+ exclude_defaults: Whether to exclude fields that are set to their default value.
285
+ exclude_none: Whether to exclude fields that have a value of `None`.
286
+ exclude_computed_fields: Whether to exclude computed fields.
287
+ While this can be useful for round-tripping, it is usually recommended to use the dedicated
288
+ `round_trip` parameter instead.
289
+ round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
290
+ warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
291
+ "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
292
+ fallback: A function to call when an unknown value is encountered. If not provided,
293
+ a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
294
+ serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
285
295
 
286
296
  Returns:
287
297
  A dictionary representation of the model.
@@ -298,6 +308,8 @@ class BaseModel(pydantic.BaseModel):
298
308
  raise ValueError("serialize_as_any is only supported in Pydantic v2")
299
309
  if fallback is not None:
300
310
  raise ValueError("fallback is only supported in Pydantic v2")
311
+ if exclude_computed_fields != False:
312
+ raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
301
313
  dumped = super().dict( # pyright: ignore[reportDeprecated]
302
314
  include=include,
303
315
  exclude=exclude,
@@ -314,15 +326,17 @@ class BaseModel(pydantic.BaseModel):
314
326
  self,
315
327
  *,
316
328
  indent: int | None = None,
329
+ ensure_ascii: bool = False,
317
330
  include: IncEx | None = None,
318
331
  exclude: IncEx | None = None,
332
+ context: Any | None = None,
319
333
  by_alias: bool | None = None,
320
334
  exclude_unset: bool = False,
321
335
  exclude_defaults: bool = False,
322
336
  exclude_none: bool = False,
337
+ exclude_computed_fields: bool = False,
323
338
  round_trip: bool = False,
324
339
  warnings: bool | Literal["none", "warn", "error"] = True,
325
- context: dict[str, Any] | None = None,
326
340
  fallback: Callable[[Any], Any] | None = None,
327
341
  serialize_as_any: bool = False,
328
342
  ) -> str:
@@ -354,6 +368,10 @@ class BaseModel(pydantic.BaseModel):
354
368
  raise ValueError("serialize_as_any is only supported in Pydantic v2")
355
369
  if fallback is not None:
356
370
  raise ValueError("fallback is only supported in Pydantic v2")
371
+ if ensure_ascii != False:
372
+ raise ValueError("ensure_ascii is only supported in Pydantic v2")
373
+ if exclude_computed_fields != False:
374
+ raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
357
375
  return super().json( # type: ignore[reportDeprecated]
358
376
  indent=indent,
359
377
  include=include,
@@ -573,6 +591,9 @@ class CachedDiscriminatorType(Protocol):
573
591
  __discriminator__: DiscriminatorDetails
574
592
 
575
593
 
594
+ DISCRIMINATOR_CACHE: weakref.WeakKeyDictionary[type, DiscriminatorDetails] = weakref.WeakKeyDictionary()
595
+
596
+
576
597
  class DiscriminatorDetails:
577
598
  field_name: str
578
599
  """The name of the discriminator field in the variant class, e.g.
@@ -615,8 +636,9 @@ class DiscriminatorDetails:
615
636
 
616
637
 
617
638
  def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None:
618
- if isinstance(union, CachedDiscriminatorType):
619
- return union.__discriminator__
639
+ cached = DISCRIMINATOR_CACHE.get(union)
640
+ if cached is not None:
641
+ return cached
620
642
 
621
643
  discriminator_field_name: str | None = None
622
644
 
@@ -669,7 +691,7 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
669
691
  discriminator_field=discriminator_field_name,
670
692
  discriminator_alias=discriminator_alias,
671
693
  )
672
- cast(CachedDiscriminatorType, union).__discriminator__ = details
694
+ DISCRIMINATOR_CACHE.setdefault(union, details)
673
695
  return details
674
696
 
675
697
 
agentex/_utils/_sync.py CHANGED
@@ -1,10 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
- import sys
4
3
  import asyncio
5
4
  import functools
6
- import contextvars
7
- from typing import Any, TypeVar, Callable, Awaitable
5
+ from typing import TypeVar, Callable, Awaitable
8
6
  from typing_extensions import ParamSpec
9
7
 
10
8
  import anyio
@@ -15,34 +13,11 @@ T_Retval = TypeVar("T_Retval")
15
13
  T_ParamSpec = ParamSpec("T_ParamSpec")
16
14
 
17
15
 
18
- if sys.version_info >= (3, 9):
19
- _asyncio_to_thread = asyncio.to_thread
20
- else:
21
- # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread
22
- # for Python 3.8 support
23
- async def _asyncio_to_thread(
24
- func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
25
- ) -> Any:
26
- """Asynchronously run function *func* in a separate thread.
27
-
28
- Any *args and **kwargs supplied for this function are directly passed
29
- to *func*. Also, the current :class:`contextvars.Context` is propagated,
30
- allowing context variables from the main thread to be accessed in the
31
- separate thread.
32
-
33
- Returns a coroutine that can be awaited to get the eventual result of *func*.
34
- """
35
- loop = asyncio.events.get_running_loop()
36
- ctx = contextvars.copy_context()
37
- func_call = functools.partial(ctx.run, func, *args, **kwargs)
38
- return await loop.run_in_executor(None, func_call)
39
-
40
-
41
16
  async def to_thread(
42
17
  func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
43
18
  ) -> T_Retval:
44
19
  if sniffio.current_async_library() == "asyncio":
45
- return await _asyncio_to_thread(func, *args, **kwargs)
20
+ return await asyncio.to_thread(func, *args, **kwargs)
46
21
 
47
22
  return await anyio.to_thread.run_sync(
48
23
  functools.partial(func, *args, **kwargs),
@@ -53,10 +28,7 @@ async def to_thread(
53
28
  def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
54
29
  """
55
30
  Take a blocking function and create an async one that receives the same
56
- positional and keyword arguments. For python version 3.9 and above, it uses
57
- asyncio.to_thread to run the function in a separate thread. For python version
58
- 3.8, it uses locally defined copy of the asyncio.to_thread function which was
59
- introduced in python 3.9.
31
+ positional and keyword arguments.
60
32
 
61
33
  Usage:
62
34
 
agentex/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "agentex"
4
- __version__ = "0.6.4" # x-release-please-version
4
+ __version__ = "0.6.7" # x-release-please-version
@@ -86,7 +86,7 @@ agent:
86
86
 
87
87
  # Optional: Set Environment variables for running your agent locally as well
88
88
  # as for deployment later on
89
- # env:
89
+ env: {}
90
90
  # OPENAI_API_KEY: "<YOUR_OPENAI_API_KEY_HERE>"
91
91
  # OPENAI_BASE_URL: "<YOUR_OPENAI_BASE_URL_HERE>"
92
92
  # OPENAI_ORG_ID: "<YOUR_OPENAI_ORG_ID_HERE>"
@@ -100,13 +100,12 @@ deployment:
100
100
  repository: "" # Update with your container registry
101
101
  tag: "latest" # Default tag, should be versioned in production
102
102
 
103
+ imagePullSecrets: [] # Update with your image pull secret names
104
+ # - name: my-registry-secret
105
+
103
106
  # Global deployment settings that apply to all clusters
104
- # These can be overridden in cluster-specific files (deploy/*.yaml)
107
+ # These can be overridden in cluster-specific environments (environments.yaml)
105
108
  global:
106
- agent:
107
- name: "{{ agent_name }}"
108
- description: "{{ description }}"
109
-
110
109
  # Default replica count
111
110
  replicaCount: 1
112
111
 
@@ -1,29 +1,56 @@
1
1
  from agentex.lib.sdk.fastacp.fastacp import FastACP
2
2
  from agentex.lib.types.fastacp import AsyncACPConfig
3
3
  from agentex.lib.types.acp import SendEventParams, CancelTaskParams, CreateTaskParams
4
+ from agentex.lib.utils.logging import make_logger
5
+ from agentex.types.text_content import TextContent
6
+ from agentex.lib import adk
7
+
8
+
9
+ logger = make_logger(__name__)
4
10
 
5
11
 
6
12
  # Create an ACP server
13
+ # This sets up the core server that will handle task creation, events, and cancellation
14
+ # The `type="base"` configuration is the default configuration for the ACP server
7
15
  acp = FastACP.create(
8
16
  acp_type="async",
9
- config=AsyncACPConfig(type="base")
17
+ config=AsyncACPConfig(
18
+ type="base",
19
+ ),
10
20
  )
11
21
 
12
22
 
23
+ # This handler is called first whenever a new task is created.
24
+ # It's a good place to initialize any state or resources needed for the task.
13
25
  @acp.on_task_event_send
14
26
  async def handle_task_event_send(params: SendEventParams):
15
- # For this tutorial, we print the parameters sent to the handler
16
- # so you can see where and how messages within a task are handled
17
- print(f"Hello world! I just received this message: {params}")
27
+ # For this tutorial, we log the parameters sent to the handler
28
+ # so you can see where and how messages within a long running task are handled
29
+ logger.info(f"Received task event send rpc: {params}")
30
+
31
+ # 1. Echo back the client's message to show it in the UI. This is not done by default so the agent developer has full control over what is shown to the user.
32
+ await adk.messages.create(task_id=params.task.id, content=params.event.content)
33
+
34
+ # 2. Send a simple response message.
35
+ # In future tutorials, this is where we'll add more sophisticated response logic.
36
+ await adk.messages.create(
37
+ task_id=params.task.id,
38
+ content=TextContent(
39
+ author="agent",
40
+ content=f"Hello! I've received your message. I can't respond right now, but in future tutorials we'll see how you can get me to intelligently respond to your message.",
41
+ ),
42
+ )
18
43
 
19
44
  @acp.on_task_cancel
20
45
  async def handle_task_canceled(params: CancelTaskParams):
21
46
  # For this tutorial, we print the parameters sent to the handler
22
47
  # so you can see where and how task cancellation is handled
23
- print(f"Hello world! Task canceled: {params.task.id}")
48
+ logger.info(f"Received task cancel rpc: {params}")
24
49
 
25
50
  @acp.on_task_create
26
51
  async def handle_task_create(params: CreateTaskParams):
27
- # For this tutorial, we print the parameters sent to the handler
52
+ # For this tutorial, we log the parameters sent to the handler
28
53
  # so you can see where and how task creation is handled
29
- print(f"Hello world! Task created: {params.task.id}")
54
+
55
+ # Here is where you can initialize any state or resources needed for the task.
56
+ logger.info(f"Received task create rpc: {params}")
@@ -74,14 +74,14 @@ agent:
74
74
  # Optional: Credentials mapping
75
75
  # Maps Kubernetes secrets to environment variables
76
76
  # Common credentials include:
77
- # credentials:
77
+ credentials: [] # Update with your credentials
78
78
  # - env_var_name: OPENAI_API_KEY
79
79
  # secret_name: openai-api-key
80
80
  # secret_key: api-key
81
81
 
82
82
  # Optional: Set Environment variables for running your agent locally as well
83
83
  # as for deployment later on
84
- # env:
84
+ env: {} # Update with your environment variables
85
85
  # OPENAI_API_KEY: "<YOUR_OPENAI_API_KEY_HERE>"
86
86
  # OPENAI_BASE_URL: "<YOUR_OPENAI_BASE_URL_HERE>"
87
87
  # OPENAI_ORG_ID: "<YOUR_OPENAI_ORG_ID_HERE>"
@@ -95,14 +95,13 @@ deployment:
95
95
  image:
96
96
  repository: "" # Update with your container registry
97
97
  tag: "latest" # Default tag, should be versioned in production
98
+
99
+ imagePullSecrets: [] # Update with your image pull secret names
100
+ # - name: my-registry-secret
98
101
 
99
102
  # Global deployment settings that apply to all clusters
100
- # These can be overridden in cluster-specific files (deploy/*.yaml)
103
+ # These can be overridden in cluster-specific environments (environments.yaml)
101
104
  global:
102
- agent:
103
- name: "{{ agent_name }}"
104
- description: "{{ description }}"
105
-
106
105
  # Default replica count
107
106
  replicaCount: 1
108
107
 
@@ -106,7 +106,7 @@ agent:
106
106
 
107
107
  # Optional: Set Environment variables for running your agent locally as well
108
108
  # as for deployment later on
109
- # env:
109
+ env: {}
110
110
  # OPENAI_API_KEY: "<YOUR_OPENAI_API_KEY_HERE>"
111
111
  # OPENAI_BASE_URL: "<YOUR_OPENAI_BASE_URL_HERE>"
112
112
  # OPENAI_ORG_ID: "<YOUR_OPENAI_ORG_ID_HERE>"
@@ -121,16 +121,12 @@ deployment:
121
121
  repository: "" # Update with your container registry
122
122
  tag: "latest" # Default tag, should be versioned in production
123
123
 
124
- imagePullSecrets:
125
- - name: my-registry-secret # Update with your image pull secret name
124
+ imagePullSecrets: [] # Update with your image pull secret name
125
+ # - name: my-registry-secret
126
126
 
127
127
  # Global deployment settings that apply to all clusters
128
- # These can be overridden using --override-file with custom configuration files
128
+ # These can be overridden in cluster-specific environments (environments.yaml)
129
129
  global:
130
- agent:
131
- name: "{{ agent_name }}"
132
- description: "{{ description }}"
133
-
134
130
  # Default replica count
135
131
  replicaCount: 1
136
132
 
@@ -497,12 +497,32 @@ class TemporalStreamingModel(Model):
497
497
  include_list.append("message.output_text.logprobs")
498
498
  # Build response format for verbosity and structured output
499
499
  response_format = NOT_GIVEN
500
+
500
501
  if output_schema is not None:
501
- # Handle structured output schema
502
- # This would need conversion logic similar to Converter.get_response_format
503
- pass # TODO: Implement output_schema conversion
504
- elif model_settings.verbosity is not None:
505
- response_format = {"verbosity": model_settings.verbosity}
502
+ # Handle structured output schema for Responses API
503
+ # The Responses API expects the schema in the 'text' parameter with a 'format' key
504
+ logger.debug(f"[TemporalStreamingModel] Converting output_schema to Responses API format")
505
+ try:
506
+ # Get the JSON schema from the output schema
507
+ schema_dict = output_schema.json_schema()
508
+ response_format = {
509
+ "format": {
510
+ "type": "json_schema",
511
+ "name": "final_output",
512
+ "schema": schema_dict,
513
+ "strict": output_schema.is_strict_json_schema() if hasattr(output_schema, 'is_strict_json_schema') else True,
514
+ }
515
+ }
516
+ logger.debug(f"[TemporalStreamingModel] Built response_format with json_schema: {response_format}")
517
+ except Exception as e:
518
+ logger.warning(f"Failed to convert output_schema: {e}")
519
+ response_format = NOT_GIVEN
520
+
521
+ if model_settings.verbosity is not None:
522
+ if response_format is not NOT_GIVEN and isinstance(response_format, dict):
523
+ response_format["verbosity"] = model_settings.verbosity
524
+ else:
525
+ response_format = {"verbosity": model_settings.verbosity}
506
526
 
507
527
  # Build extra_args dict for additional parameters
508
528
  extra_args = dict(model_settings.extra_args or {})
@@ -529,7 +549,7 @@ class TemporalStreamingModel(Model):
529
549
  parallel_tool_calls=self._non_null_or_not_given(model_settings.parallel_tool_calls),
530
550
  # Context and truncation
531
551
  truncation=self._non_null_or_not_given(model_settings.truncation),
532
- # Response configuration
552
+ # Response configuration (includes structured output schema)
533
553
  text=response_format,
534
554
  include=include_list if include_list else NOT_GIVEN,
535
555
  # Metadata and storage
@@ -546,219 +566,225 @@ class TemporalStreamingModel(Model):
546
566
  # Process the stream of events from Responses API
547
567
  output_items = []
548
568
  current_text = ""
569
+ streaming_context = None
549
570
  reasoning_context = None
550
571
  reasoning_summaries = []
551
572
  reasoning_contents = []
552
- current_reasoning_summary = ""
553
573
  event_count = 0
554
574
 
555
575
  # We expect task_id to always be provided for streaming
556
576
  if not task_id:
557
577
  raise ValueError("[TemporalStreamingModel] task_id is required for streaming model")
558
578
 
559
- # Use proper async with context manager for streaming to Redis
560
- async with adk.streaming.streaming_task_message_context(
561
- task_id=task_id,
562
- initial_content=TextContent(
563
- author="agent",
564
- content="",
565
- format="markdown",
566
- ),
567
- ) as streaming_context:
568
- # Process events from the Responses API stream
569
- function_calls_in_progress = {} # Track function calls being streamed
570
-
571
- async for event in stream:
572
- event_count += 1
573
-
574
- # Log event type
575
- logger.debug(f"[TemporalStreamingModel] Event {event_count}: {type(event).__name__}")
576
-
577
- # Handle different event types using isinstance for type safety
578
- if isinstance(event, ResponseOutputItemAddedEvent):
579
- # New output item (reasoning, function call, or message)
580
- item = getattr(event, 'item', None)
581
- output_index = getattr(event, 'output_index', 0)
582
-
583
- if item and getattr(item, 'type', None) == 'reasoning':
584
- logger.debug(f"[TemporalStreamingModel] Starting reasoning item")
585
- if not reasoning_context:
586
- # Start a reasoning context for streaming reasoning to UI
587
- reasoning_context = await adk.streaming.streaming_task_message_context(
588
- task_id=task_id,
589
- initial_content=ReasoningContent(
590
- author="agent",
591
- summary=[],
592
- content=[],
593
- type="reasoning",
594
- style="active",
595
- ),
596
- ).__aenter__()
597
- elif item and getattr(item, 'type', None) == 'function_call':
598
- # Track the function call being streamed
599
- function_calls_in_progress[output_index] = {
600
- 'id': getattr(item, 'id', ''),
601
- 'call_id': getattr(item, 'call_id', ''),
602
- 'name': getattr(item, 'name', ''),
603
- 'arguments': getattr(item, 'arguments', ''),
604
- }
605
- logger.debug(f"[TemporalStreamingModel] Starting function call: {item.name}")
606
-
607
- elif isinstance(event, ResponseFunctionCallArgumentsDeltaEvent):
608
- # Stream function call arguments
609
- output_index = getattr(event, 'output_index', 0)
610
- delta = getattr(event, 'delta', '')
611
-
612
- if output_index in function_calls_in_progress:
613
- function_calls_in_progress[output_index]['arguments'] += delta
614
- logger.debug(f"[TemporalStreamingModel] Function call args delta: {delta[:50]}...")
615
-
616
- elif isinstance(event, ResponseFunctionCallArgumentsDoneEvent):
617
- # Function call arguments complete
618
- output_index = getattr(event, 'output_index', 0)
619
- arguments = getattr(event, 'arguments', '')
620
-
621
- if output_index in function_calls_in_progress:
622
- function_calls_in_progress[output_index]['arguments'] = arguments
623
- logger.debug(f"[TemporalStreamingModel] Function call args done")
624
-
625
- elif isinstance(event, (ResponseReasoningTextDeltaEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseTextDeltaEvent)):
626
- # Handle text streaming
627
- delta = getattr(event, 'delta', '')
628
-
629
- if isinstance(event, ResponseReasoningSummaryTextDeltaEvent) and reasoning_context:
630
- # Stream reasoning summary deltas - these are the actual reasoning tokens!
631
- try:
632
- # Use ReasoningSummaryDelta for reasoning summaries
633
- summary_index = getattr(event, 'summary_index', 0)
634
- delta_obj = ReasoningSummaryDelta(
635
- summary_index=summary_index,
636
- summary_delta=delta,
637
- type="reasoning_summary",
638
- )
639
- update = StreamTaskMessageDelta(
640
- parent_task_message=reasoning_context.task_message,
641
- delta=delta_obj,
642
- type="delta",
643
- )
644
- await reasoning_context.stream_update(update)
645
- # Accumulate the reasoning summary
646
- if len(reasoning_summaries) <= summary_index:
647
- reasoning_summaries.extend([""] * (summary_index + 1 - len(reasoning_summaries)))
648
- reasoning_summaries[summary_index] += delta
649
- logger.debug(f"[TemporalStreamingModel] Streamed reasoning summary: {delta[:30]}..." if len(delta) > 30 else f"[TemporalStreamingModel] Streamed reasoning summary: {delta}")
650
- except Exception as e:
651
- logger.warning(f"Failed to send reasoning delta: {e}")
652
- elif isinstance(event, ResponseReasoningTextDeltaEvent) and reasoning_context:
653
- # Regular reasoning delta (if these ever appear)
579
+ # Process events from the Responses API stream
580
+ function_calls_in_progress = {} # Track function calls being streamed
581
+
582
+ async for event in stream:
583
+ event_count += 1
584
+
585
+ # Log event type
586
+ logger.debug(f"[TemporalStreamingModel] Event {event_count}: {type(event).__name__}")
587
+
588
+ # Handle different event types using isinstance for type safety
589
+ if isinstance(event, ResponseOutputItemAddedEvent):
590
+ # New output item (reasoning, function call, or message)
591
+ item = getattr(event, 'item', None)
592
+ output_index = getattr(event, 'output_index', 0)
593
+
594
+ if item and getattr(item, 'type', None) == 'reasoning':
595
+ logger.debug(f"[TemporalStreamingModel] Starting reasoning item")
596
+ if not reasoning_context:
597
+ # Start a reasoning context for streaming reasoning to UI
598
+ reasoning_context = await adk.streaming.streaming_task_message_context(
599
+ task_id=task_id,
600
+ initial_content=ReasoningContent(
601
+ author="agent",
602
+ summary=[],
603
+ content=[],
604
+ type="reasoning",
605
+ style="active",
606
+ ),
607
+ ).__aenter__()
608
+ elif item and getattr(item, 'type', None) == 'function_call':
609
+ # Track the function call being streamed
610
+ function_calls_in_progress[output_index] = {
611
+ 'id': getattr(item, 'id', ''),
612
+ 'call_id': getattr(item, 'call_id', ''),
613
+ 'name': getattr(item, 'name', ''),
614
+ 'arguments': getattr(item, 'arguments', ''),
615
+ }
616
+ logger.debug(f"[TemporalStreamingModel] Starting function call: {item.name}")
617
+
618
+ elif item and getattr(item, 'type', None) == 'message':
619
+ # Track the message being streamed
620
+ streaming_context = await adk.streaming.streaming_task_message_context(
621
+ task_id=task_id,
622
+ initial_content=TextContent(
623
+ author="agent",
624
+ content="",
625
+ format="markdown",
626
+ ),
627
+ ).__aenter__()
628
+
629
+ elif isinstance(event, ResponseFunctionCallArgumentsDeltaEvent):
630
+ # Stream function call arguments
631
+ output_index = getattr(event, 'output_index', 0)
632
+ delta = getattr(event, 'delta', '')
633
+
634
+ if output_index in function_calls_in_progress:
635
+ function_calls_in_progress[output_index]['arguments'] += delta
636
+ logger.debug(f"[TemporalStreamingModel] Function call args delta: {delta[:50]}...")
637
+
638
+ elif isinstance(event, ResponseFunctionCallArgumentsDoneEvent):
639
+ # Function call arguments complete
640
+ output_index = getattr(event, 'output_index', 0)
641
+ arguments = getattr(event, 'arguments', '')
642
+
643
+ if output_index in function_calls_in_progress:
644
+ function_calls_in_progress[output_index]['arguments'] = arguments
645
+ logger.debug(f"[TemporalStreamingModel] Function call args done")
646
+
647
+ elif isinstance(event, (ResponseReasoningTextDeltaEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseTextDeltaEvent)):
648
+ # Handle text streaming
649
+ delta = getattr(event, 'delta', '')
650
+
651
+ if isinstance(event, ResponseReasoningSummaryTextDeltaEvent) and reasoning_context:
652
+ # Stream reasoning summary deltas - these are the actual reasoning tokens!
653
+ try:
654
+ # Use ReasoningSummaryDelta for reasoning summaries
655
+ summary_index = getattr(event, 'summary_index', 0)
656
+ delta_obj = ReasoningSummaryDelta(
657
+ summary_index=summary_index,
658
+ summary_delta=delta,
659
+ type="reasoning_summary",
660
+ )
661
+ update = StreamTaskMessageDelta(
662
+ parent_task_message=reasoning_context.task_message,
663
+ delta=delta_obj,
664
+ type="delta",
665
+ )
666
+ await reasoning_context.stream_update(update)
667
+ # Accumulate the reasoning summary
668
+ if len(reasoning_summaries) <= summary_index:
669
+ logger.debug(f"[TemporalStreamingModel] Extending reasoning summaries: {summary_index}")
670
+ reasoning_summaries.extend([""] * (summary_index + 1 - len(reasoning_summaries)))
671
+ reasoning_summaries[summary_index] += delta
672
+ logger.debug(f"[TemporalStreamingModel] Streamed reasoning summary: {delta[:30]}..." if len(delta) > 30 else f"[TemporalStreamingModel] Streamed reasoning summary: {delta}")
673
+ except Exception as e:
674
+ logger.warning(f"Failed to send reasoning delta: {e}")
675
+ elif isinstance(event, ResponseReasoningTextDeltaEvent) and reasoning_context:
676
+ # Regular reasoning delta (if these ever appear)
677
+ try:
678
+ delta_obj = ReasoningContentDelta(
679
+ content_index=0,
680
+ content_delta=delta,
681
+ type="reasoning_content",
682
+ )
683
+ update = StreamTaskMessageDelta(
684
+ parent_task_message=reasoning_context.task_message,
685
+ delta=delta_obj,
686
+ type="delta",
687
+ )
688
+ await reasoning_context.stream_update(update)
689
+ reasoning_contents.append(delta)
690
+ except Exception as e:
691
+ logger.warning(f"Failed to send reasoning delta: {e}")
692
+ elif isinstance(event, ResponseTextDeltaEvent):
693
+ # Stream regular text output
694
+ current_text += delta
695
+ try:
696
+ delta_obj = TextDelta(
697
+ type="text",
698
+ text_delta=delta,
699
+ )
700
+ update = StreamTaskMessageDelta(
701
+ parent_task_message=streaming_context.task_message if streaming_context else None,
702
+ delta=delta_obj,
703
+ type="delta",
704
+ )
705
+ await streaming_context.stream_update(update) if streaming_context else None
706
+ except Exception as e:
707
+ logger.warning(f"Failed to send text delta: {e}")
708
+
709
+ elif isinstance(event, ResponseOutputItemDoneEvent):
710
+ # Output item completed
711
+ item = getattr(event, 'item', None)
712
+ output_index = getattr(event, 'output_index', 0)
713
+
714
+ if item and getattr(item, 'type', None) == 'reasoning':
715
+ if reasoning_context and reasoning_summaries:
716
+ logger.debug(f"[TemporalStreamingModel] Reasoning itme completed, sending final update")
654
717
  try:
655
- delta_obj = ReasoningContentDelta(
656
- content_index=0,
657
- content_delta=delta,
658
- type="reasoning_content",
718
+ # Send a full message update with the complete reasoning content
719
+ complete_reasoning_content = ReasoningContent(
720
+ author="agent",
721
+ summary=reasoning_summaries, # Use accumulated summaries
722
+ content=reasoning_contents if reasoning_contents else [],
723
+ type="reasoning",
724
+ style="static",
659
725
  )
660
- update = StreamTaskMessageDelta(
661
- parent_task_message=reasoning_context.task_message,
662
- delta=delta_obj,
663
- type="delta",
664
- )
665
- await reasoning_context.stream_update(update)
666
- reasoning_contents.append(delta)
667
- except Exception as e:
668
- logger.warning(f"Failed to send reasoning delta: {e}")
669
- elif isinstance(event, ResponseTextDeltaEvent):
670
- # Stream regular text output
671
- current_text += delta
672
- try:
673
- delta_obj = TextDelta(
674
- type="text",
675
- text_delta=delta,
676
- )
677
- update = StreamTaskMessageDelta(
678
- parent_task_message=streaming_context.task_message,
679
- delta=delta_obj,
680
- type="delta",
726
+
727
+ await reasoning_context.stream_update(
728
+ update=StreamTaskMessageFull(
729
+ parent_task_message=reasoning_context.task_message,
730
+ content=complete_reasoning_content,
731
+ type="full",
732
+ ),
681
733
  )
682
- await streaming_context.stream_update(update)
734
+
735
+ # Close the reasoning context after sending the final update
736
+ # This matches the reference implementation pattern
737
+ await reasoning_context.close()
738
+ reasoning_context = None
739
+ logger.debug(f"[TemporalStreamingModel] Closed reasoning context after final update")
683
740
  except Exception as e:
684
- logger.warning(f"Failed to send text delta: {e}")
685
-
686
- elif isinstance(event, ResponseOutputItemDoneEvent):
687
- # Output item completed
688
- item = getattr(event, 'item', None)
689
- output_index = getattr(event, 'output_index', 0)
690
-
691
- if item and getattr(item, 'type', None) == 'reasoning':
692
- if reasoning_context and reasoning_summaries:
693
- logger.debug(f"[TemporalStreamingModel] Reasoning itme completed, sending final update")
694
- try:
695
- # Send a full message update with the complete reasoning content
696
- complete_reasoning_content = ReasoningContent(
697
- author="agent",
698
- summary=reasoning_summaries, # Use accumulated summaries
699
- content=reasoning_contents if reasoning_contents else [],
700
- type="reasoning",
701
- style="static",
702
- )
703
-
704
- await reasoning_context.stream_update(
705
- update=StreamTaskMessageFull(
706
- parent_task_message=reasoning_context.task_message,
707
- content=complete_reasoning_content,
708
- type="full",
709
- ),
710
- )
711
-
712
- # Close the reasoning context after sending the final update
713
- # This matches the reference implementation pattern
714
- await reasoning_context.close()
715
- reasoning_context = None
716
- logger.debug(f"[TemporalStreamingModel] Closed reasoning context after final update")
717
- except Exception as e:
718
- logger.warning(f"Failed to send reasoning part done update: {e}")
719
-
720
- elif item and getattr(item, 'type', None) == 'function_call':
721
- # Function call completed - add to output
722
- if output_index in function_calls_in_progress:
723
- call_data = function_calls_in_progress[output_index]
724
- logger.debug(f"[TemporalStreamingModel] Function call completed: {call_data['name']}")
725
-
726
- # Create proper function call object
727
- tool_call = ResponseFunctionToolCall(
728
- id=call_data['id'],
729
- call_id=call_data['call_id'],
730
- type="function_call",
731
- name=call_data['name'],
732
- arguments=call_data['arguments'],
733
- )
734
- output_items.append(tool_call)
735
-
736
- elif isinstance(event, ResponseReasoningSummaryPartAddedEvent):
737
- # New reasoning part/summary started - reset accumulator
738
- part = getattr(event, 'part', None)
739
- if part:
740
- part_type = getattr(part, 'type', 'unknown')
741
- logger.debug(f"[TemporalStreamingModel] New reasoning part: type={part_type}")
742
- # Reset the current reasoning summary for this new part
743
- current_reasoning_summary = ""
744
-
745
- elif isinstance(event, ResponseReasoningSummaryPartDoneEvent):
746
- # Reasoning part completed - ResponseOutputItemDoneEvent will handle the final update
747
- logger.debug(f"[TemporalStreamingModel] Reasoning part completed")
748
-
749
- elif isinstance(event, ResponseCompletedEvent):
750
- # Response completed
751
- logger.debug(f"[TemporalStreamingModel] Response completed")
752
- response = getattr(event, 'response', None)
753
- if response and hasattr(response, 'output'):
754
- # Use the final output from the response
755
- output_items = response.output
756
- logger.debug(f"[TemporalStreamingModel] Found {len(output_items)} output items in final response")
757
-
758
- # End of event processing loop - close any open contexts
759
- if reasoning_context:
760
- await reasoning_context.close()
761
- reasoning_context = None
741
+ logger.warning(f"Failed to send reasoning part done update: {e}")
742
+
743
+ elif item and getattr(item, 'type', None) == 'function_call':
744
+ # Function call completed - add to output
745
+ if output_index in function_calls_in_progress:
746
+ call_data = function_calls_in_progress[output_index]
747
+ logger.debug(f"[TemporalStreamingModel] Function call completed: {call_data['name']}")
748
+
749
+ # Create proper function call object
750
+ tool_call = ResponseFunctionToolCall(
751
+ id=call_data['id'],
752
+ call_id=call_data['call_id'],
753
+ type="function_call",
754
+ name=call_data['name'],
755
+ arguments=call_data['arguments'],
756
+ )
757
+ output_items.append(tool_call)
758
+
759
+ elif isinstance(event, ResponseReasoningSummaryPartAddedEvent):
760
+ # New reasoning part/summary started - reset accumulator
761
+ part = getattr(event, 'part', None)
762
+ if part:
763
+ part_type = getattr(part, 'type', 'unknown')
764
+ logger.debug(f"[TemporalStreamingModel] New reasoning part: type={part_type}")
765
+ # Reset the current reasoning summary for this new part
766
+
767
+ elif isinstance(event, ResponseReasoningSummaryPartDoneEvent):
768
+ # Reasoning part completed - ResponseOutputItemDoneEvent will handle the final update
769
+ logger.debug(f"[TemporalStreamingModel] Reasoning part completed")
770
+
771
+ elif isinstance(event, ResponseCompletedEvent):
772
+ # Response completed
773
+ logger.debug(f"[TemporalStreamingModel] Response completed")
774
+ response = getattr(event, 'response', None)
775
+ if response and hasattr(response, 'output'):
776
+ # Use the final output from the response
777
+ output_items = response.output
778
+ logger.debug(f"[TemporalStreamingModel] Found {len(output_items)} output items in final response")
779
+
780
+ # End of event processing loop - close any open contexts
781
+ if reasoning_context:
782
+ await reasoning_context.close()
783
+ reasoning_context = None
784
+
785
+ if streaming_context:
786
+ await streaming_context.close()
787
+ streaming_context = None
762
788
 
763
789
  # Build the response from output items collected during streaming
764
790
  # Create output from the items we collected
@@ -800,6 +826,8 @@ class TemporalStreamingModel(Model):
800
826
  # Serialize response output items for span tracing
801
827
  new_items = []
802
828
  final_output = None
829
+ tool_calls = []
830
+ tool_outputs = []
803
831
 
804
832
  for item in response_output:
805
833
  try:
@@ -819,12 +847,38 @@ class TemporalStreamingModel(Model):
819
847
  logger.warning(f"Failed to serialize item in temporal_streaming_model: {e}")
820
848
  continue
821
849
 
850
+ # Extract tool calls and outputs from input
851
+ try:
852
+ if isinstance(input, list):
853
+ for item in input:
854
+ try:
855
+ item_dict = _serialize_item(item) if not isinstance(item, dict) else item
856
+ if item_dict:
857
+ # Capture function calls
858
+ if item_dict.get('type') == 'function_call':
859
+ tool_calls.append(item_dict)
860
+ # Capture function outputs
861
+ elif item_dict.get('type') == 'function_call_output':
862
+ tool_outputs.append(item_dict)
863
+ except Exception:
864
+ pass
865
+ except Exception as e:
866
+ logger.warning(f"Failed to extract tool calls and outputs: {e}")
867
+
822
868
  # Set span output with structured data
823
869
  if span:
824
- span.output = {
870
+ output_data = {
825
871
  "new_items": new_items,
826
872
  "final_output": final_output,
827
873
  }
874
+ # Include tool calls if any were in the input
875
+ if tool_calls:
876
+ output_data["tool_calls"] = tool_calls
877
+ # Include tool outputs if any were processed
878
+ if tool_outputs:
879
+ output_data["tool_outputs"] = tool_outputs
880
+
881
+ span.output = output_data
828
882
 
829
883
  # Return the response
830
884
  return ModelResponse(
@@ -78,6 +78,9 @@ class BaseACPServer(FastAPI):
78
78
  self.add_middleware(RequestIDMiddleware)
79
79
  self._handlers: dict[RPCMethod, Callable] = {}
80
80
 
81
+ # Agent info to return in healthz
82
+ self.agent_id: str | None = None
83
+
81
84
  @classmethod
82
85
  def create(cls):
83
86
  """Create and initialize BaseACPServer instance"""
@@ -96,6 +99,7 @@ class BaseACPServer(FastAPI):
96
99
  env_vars = EnvironmentVariables.refresh()
97
100
  if env_vars.AGENTEX_BASE_URL:
98
101
  await register_agent(env_vars)
102
+ self.agent_id = env_vars.AGENT_ID
99
103
  else:
100
104
  logger.warning("AGENTEX_BASE_URL not set, skipping agent registration")
101
105
 
@@ -105,7 +109,10 @@ class BaseACPServer(FastAPI):
105
109
 
106
110
  async def _healthz(self):
107
111
  """Health check endpoint"""
108
- return {"status": "healthy"}
112
+ result = {"status": "healthy"}
113
+ if self.agent_id:
114
+ result["agent_id"] = self.agent_id
115
+ return result
109
116
 
110
117
  def _wrap_handler(self, fn: Callable[..., Awaitable[Any]]):
111
118
  """Wraps handler functions to provide JSON-RPC 2.0 response format"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: agentex-sdk
3
- Version: 0.6.4
3
+ Version: 0.6.7
4
4
  Summary: The official Python library for the agentex API
5
5
  Project-URL: Homepage, https://github.com/scaleapi/scale-agentex-python
6
6
  Project-URL: Repository, https://github.com/scaleapi/scale-agentex-python
@@ -55,6 +55,7 @@ Requires-Dist: tzdata>=2025.2
55
55
  Requires-Dist: tzlocal>=5.3.1
56
56
  Requires-Dist: uvicorn>=0.31.1
57
57
  Requires-Dist: watchfiles<1.0,>=0.24.0
58
+ Requires-Dist: yaspin>=3.1.0
58
59
  Provides-Extra: aiohttp
59
60
  Requires-Dist: aiohttp; extra == 'aiohttp'
60
61
  Requires-Dist: httpx-aiohttp>=0.1.9; extra == 'aiohttp'
@@ -68,7 +69,7 @@ Description-Content-Type: text/markdown
68
69
  <!-- prettier-ignore -->
69
70
  [![PyPI version](https://img.shields.io/pypi/v/agentex-sdk.svg?label=pypi%20(stable))](https://pypi.org/project/agentex-sdk/)
70
71
 
71
- The Agentex Python library provides convenient access to the Agentex REST API from any Python 3.8+
72
+ The Agentex Python library provides convenient access to the Agentex REST API from any Python 3.9+
72
73
  application. The library includes type definitions for all request params and response fields,
73
74
  and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx).
74
75
 
@@ -453,7 +454,7 @@ print(agentex.__version__)
453
454
 
454
455
  ## Requirements
455
456
 
456
- Python 3.8 or higher.
457
+ Python 3.9 or higher.
457
458
 
458
459
  ## Contributing
459
460
 
@@ -5,13 +5,13 @@ agentex/_compat.py,sha256=DQBVORjFb33zch24jzkhM14msvnzY7mmSmgDLaVFUM8,6562
5
5
  agentex/_constants.py,sha256=oGldMuFz7eZtwD8_6rJUippKhZB5fGSA7ffbCDGourA,466
6
6
  agentex/_exceptions.py,sha256=B09aFjWFRSShb9BFJd-MNDblsGDyGk3w-vItYmjg_AI,3222
7
7
  agentex/_files.py,sha256=KnEzGi_O756MvKyJ4fOCW_u3JhOeWPQ4RsmDvqihDQU,3545
8
- agentex/_models.py,sha256=lKnskYPONAWDvWo8tmbbVk7HmG7UOsI0Nve0vSMmkRc,30452
8
+ agentex/_models.py,sha256=3D65psj_C02Mw0K2zpBWrn1khmrvtEXgTTQ6P4r3tUY,31837
9
9
  agentex/_qs.py,sha256=craIKyvPktJ94cvf9zn8j8ekG9dWJzhWv0ob34lIOv4,4828
10
10
  agentex/_resource.py,sha256=S1t7wmR5WUvoDIhZjo_x-E7uoTJBynJ3d8tPJMQYdjw,1106
11
11
  agentex/_response.py,sha256=Tb9zazsnemO2rTxWtBjAD5WBqlhli5ZaXGbiKgdu5DE,28794
12
12
  agentex/_streaming.py,sha256=p-m2didLkbw_VBZsP4QqeIPc2haAdGZmB0BOU3gUM2A,10153
13
13
  agentex/_types.py,sha256=F6X63N7bOstytAtVqJ9Yl7T_JbR9Od2MJfZ_iK5DqOY,7237
14
- agentex/_version.py,sha256=aqKdT0Jt6xBNgHPcCkDvPAaRPHGW9fWqLx7ayeP03X0,159
14
+ agentex/_version.py,sha256=hFgfMDcXgm5wkZ_37lejLUTDpbuA7laOIRtc6YkGeRk,159
15
15
  agentex/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  agentex/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
17
17
  agentex/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
@@ -21,7 +21,7 @@ agentex/_utils/_proxy.py,sha256=aglnj2yBTDyGX9Akk2crZHrl10oqRmceUy2Zp008XEs,1975
21
21
  agentex/_utils/_reflection.py,sha256=ZmGkIgT_PuwedyNBrrKGbxoWtkpytJNU1uU4QHnmEMU,1364
22
22
  agentex/_utils/_resources_proxy.py,sha256=W1Rrg7LVZHLIUq40nOfgQv6orKG16CKqDRKHiaSUVYg,594
23
23
  agentex/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289
24
- agentex/_utils/_sync.py,sha256=TpGLrrhRNWTJtODNE6Fup3_k7zrWm1j2RlirzBwre-0,2862
24
+ agentex/_utils/_sync.py,sha256=HBnZkkBnzxtwOZe0212C4EyoRvxhTVtTrLFDz2_xVCg,1589
25
25
  agentex/_utils/_transform.py,sha256=NjCzmnfqYrsAikUHQig6N9QfuTVbKipuP3ur9mcNF-E,15951
26
26
  agentex/_utils/_typing.py,sha256=fb420NYkXitEaod2CiEH-hCtzG1z9WKUQiFtuukHtr4,4967
27
27
  agentex/_utils/_utils.py,sha256=ugfUaneOK7I8h9b3656flwf5u_kthY0gvNuqvgOLoSU,12252
@@ -73,18 +73,18 @@ agentex/lib/cli/templates/default/Dockerfile.j2,sha256=Q-8T9mid_DfgZlQd6JFZKphdD
73
73
  agentex/lib/cli/templates/default/README.md.j2,sha256=78MxbwnFZqHx48A41hbngSl-cnJcc8sqZzvoocsbKoE,6359
74
74
  agentex/lib/cli/templates/default/dev.ipynb.j2,sha256=wWOZJuccKtrtN5-f44Ma8puQUNf-Q6geTPzI-QlioOA,3393
75
75
  agentex/lib/cli/templates/default/environments.yaml.j2,sha256=xEtxkMISgIAsqFTn_2dxqx0wbmj5mczv4dRei_8ZHcM,1821
76
- agentex/lib/cli/templates/default/manifest.yaml.j2,sha256=QkGnce9Xn5HLtFxu5Kezfukr5F0_i0izbze9D0EVAKQ,3853
76
+ agentex/lib/cli/templates/default/manifest.yaml.j2,sha256=8XpW1Qobk08HRKQ22-ZHnqUO9zC7FLx6CAj8cZ_4zFw,3879
77
77
  agentex/lib/cli/templates/default/pyproject.toml.j2,sha256=eyN6dYqJTFzb5WztJMxboy9Wc0XPXVnKYaF5JBxJE7o,507
78
78
  agentex/lib/cli/templates/default/requirements.txt.j2,sha256=iTmO-z8qFkUa1jTctFCs0WYuq7Sqi6VNQAwATakh2fQ,94
79
79
  agentex/lib/cli/templates/default/test_agent.py.j2,sha256=PAlGQC5p3CRQxZsa0QVb0EIHWanFKBOd4Xb5TNuVjzs,5186
80
- agentex/lib/cli/templates/default/project/acp.py.j2,sha256=kyeXXBIDDNCqEmvEcbK-eR-rtMjyZ1Q2xXMRXMmRbkU,1130
80
+ agentex/lib/cli/templates/default/project/acp.py.j2,sha256=w6dQ2f5inBeSp3ba-8KHGaig8h9oXYk7FxwCmXvqUGM,2398
81
81
  agentex/lib/cli/templates/sync/.dockerignore.j2,sha256=hweGFxw5eDZYsb5EnRHpv27o9M1HF2PEWOxqsfBBcAE,320
82
82
  agentex/lib/cli/templates/sync/Dockerfile-uv.j2,sha256=9-xbz3mh5yGuSxtQ6FRltzY45OyUzvi1ZmlfwOioK-M,1085
83
83
  agentex/lib/cli/templates/sync/Dockerfile.j2,sha256=-P2CwE84h4mwO1Gnl779c4MdoOcVX8_ndpesq9M4fQQ,1093
84
84
  agentex/lib/cli/templates/sync/README.md.j2,sha256=_S7Ngl4qOUQHPFldLXDBvuIWPFU2-WcuxGmr5EXLX6k,8816
85
85
  agentex/lib/cli/templates/sync/dev.ipynb.j2,sha256=Z42iRveuI_k5LcJqWX-3H1glPtNTkxg_MKVe1lwuJos,6055
86
86
  agentex/lib/cli/templates/sync/environments.yaml.j2,sha256=BGprRPca_Y2sPA7kOiSK8COYp4_USoikB4cQ3wbAg94,1769
87
- agentex/lib/cli/templates/sync/manifest.yaml.j2,sha256=V497KXzvA76sHrgIJ5zRJptpIH8sGbSXZaIsEyp5NZ4,3747
87
+ agentex/lib/cli/templates/sync/manifest.yaml.j2,sha256=pvH3AmdSsp4NvOtQkGTHE9ZM5wou0j99RF5ITWK5wH8,3848
88
88
  agentex/lib/cli/templates/sync/pyproject.toml.j2,sha256=eyN6dYqJTFzb5WztJMxboy9Wc0XPXVnKYaF5JBxJE7o,507
89
89
  agentex/lib/cli/templates/sync/requirements.txt.j2,sha256=iTmO-z8qFkUa1jTctFCs0WYuq7Sqi6VNQAwATakh2fQ,94
90
90
  agentex/lib/cli/templates/sync/test_agent.py.j2,sha256=zMJMCqWcEvKUUxfXSC0GRtLveqvGTknZjZWy2-HbtNU,2049
@@ -95,7 +95,7 @@ agentex/lib/cli/templates/temporal/Dockerfile.j2,sha256=N1Z73jb8pnxsjP9zbs-tSyNH
95
95
  agentex/lib/cli/templates/temporal/README.md.j2,sha256=wJKvycGC-2scQbs8uRgW9HcDa9SU2jjLeuUZ2abVAPk,10853
96
96
  agentex/lib/cli/templates/temporal/dev.ipynb.j2,sha256=wWOZJuccKtrtN5-f44Ma8puQUNf-Q6geTPzI-QlioOA,3393
97
97
  agentex/lib/cli/templates/temporal/environments.yaml.j2,sha256=zu7-nGRt_LF3qmWFxR_izTUOYQXuDZeypEVa03kVW10,2096
98
- agentex/lib/cli/templates/temporal/manifest.yaml.j2,sha256=hqDWx70ykAQlV1E_me01nF7-Y22Dwb0N1wGkj2bDC6c,4686
98
+ agentex/lib/cli/templates/temporal/manifest.yaml.j2,sha256=k1TVT6QmE25KEXyNxx7m58vCowsFq74naiRgKrbwaho,4604
99
99
  agentex/lib/cli/templates/temporal/pyproject.toml.j2,sha256=MoR1g6KnGOQrXWOXhFKMw561kgpxy0tdom0KLtQe8A8,548
100
100
  agentex/lib/cli/templates/temporal/requirements.txt.j2,sha256=iTmO-z8qFkUa1jTctFCs0WYuq7Sqi6VNQAwATakh2fQ,94
101
101
  agentex/lib/cli/templates/temporal/test_agent.py.j2,sha256=PAlGQC5p3CRQxZsa0QVb0EIHWanFKBOd4Xb5TNuVjzs,5186
@@ -171,7 +171,7 @@ agentex/lib/core/temporal/plugins/openai_agents/hooks/hooks.py,sha256=qbB6RLPlve
171
171
  agentex/lib/core/temporal/plugins/openai_agents/interceptors/__init__.py,sha256=hrj6lRPi9nb_HAohRK4oPnaji69QQ6brj-Wu2q0mU0s,521
172
172
  agentex/lib/core/temporal/plugins/openai_agents/interceptors/context_interceptor.py,sha256=sBLJonJJ5Ke1BJIlzbqtGeO5p8NIbvftbEYQbjgeZCE,7256
173
173
  agentex/lib/core/temporal/plugins/openai_agents/models/__init__.py,sha256=FeTt91JkSfYLlCTdrVFpjcQ0asbQyCd6Rl5efqZkslo,791
174
- agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py,sha256=_lHjIlHsqAbFqsj8PXgqyUbdsr6gy7Yvot-YnLxj4fc,42962
174
+ agentex/lib/core/temporal/plugins/openai_agents/models/temporal_streaming_model.py,sha256=elOGyx5UyRVCkvGi6sNFucRareG7zs4gT4kv_U4Cy1o,45372
175
175
  agentex/lib/core/temporal/plugins/openai_agents/models/temporal_tracing_model.py,sha256=BiuIhSvyNfocwMYQtxOoqgMpyJsMHLkyXzYPYnw4ChA,17458
176
176
  agentex/lib/core/temporal/plugins/openai_agents/tests/__init__.py,sha256=suEVJuonfBoVZ3IqdO0UMn0hkFFzDqRoso0VEOit-KQ,80
177
177
  agentex/lib/core/temporal/plugins/openai_agents/tests/conftest.py,sha256=oMI_3dVn6DoiLgCjRVUeQE_Z2Gz3tGTwPxTQ1krjKSE,7692
@@ -202,7 +202,7 @@ agentex/lib/sdk/config/project_config.py,sha256=uMrg9BqEQFcnqdlqqSLYsaQkP1mMedhE
202
202
  agentex/lib/sdk/config/validation.py,sha256=ox8g2vwjYsmfNcz4G-sbPw0ccWjylJRG5bufTEPQMCk,9024
203
203
  agentex/lib/sdk/fastacp/__init__.py,sha256=UvAdexdnfb4z0F4a2sfXROFyh9EjH89kf3AxHPybzCM,75
204
204
  agentex/lib/sdk/fastacp/fastacp.py,sha256=3aT74pFwF76VoTbQnGZsF6As42aLa2o_JrO6EP_XHQM,4591
205
- agentex/lib/sdk/fastacp/base/base_acp_server.py,sha256=1ltdXNidNE1SeVYBmwf4WxGRRn6eb2u9hn6SOOgp2g0,16932
205
+ agentex/lib/sdk/fastacp/base/base_acp_server.py,sha256=W2rMZUC-5GLvLJsLFKZHtmyG9Uhrsgffqo9qcomThsQ,17163
206
206
  agentex/lib/sdk/fastacp/base/constants.py,sha256=FxhXqdaqazQIxFTfAMzl4wx50TMCzBvoNtRI7hUdL2o,837
207
207
  agentex/lib/sdk/fastacp/impl/async_base_acp.py,sha256=xT95pQ-jQpDtBpB8-Z_ZWNuwG9eXgJEluGzQ7vfo8UE,2675
208
208
  agentex/lib/sdk/fastacp/impl/sync_acp.py,sha256=0yNaWr9k28U3jKucKRoV8a53LsPyfyqwlJupe6e5pv0,3933
@@ -330,8 +330,8 @@ agentex/types/messages/batch_update_params.py,sha256=Ug5CThbD49a8j4qucg04OdmVrp_
330
330
  agentex/types/messages/batch_update_response.py,sha256=TbSBe6SuPzjXXWSj-nRjT1JHGBooTshHQQDa1AixQA8,278
331
331
  agentex/types/shared/__init__.py,sha256=IKs-Qn5Yja0kFh1G1kDqYZo43qrOu1hSoxlPdN-85dI,149
332
332
  agentex/types/shared/delete_response.py,sha256=8qH3zvQXaOHYQSHyXi7UQxdR4miTzR7V9K4zXVsiUyk,215
333
- agentex_sdk-0.6.4.dist-info/METADATA,sha256=jsfcoPc8yA4e9Ve522MCmyeYbI6JjE3K-igsLjtRDso,15375
334
- agentex_sdk-0.6.4.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
335
- agentex_sdk-0.6.4.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
336
- agentex_sdk-0.6.4.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
337
- agentex_sdk-0.6.4.dist-info/RECORD,,
333
+ agentex_sdk-0.6.7.dist-info/METADATA,sha256=-kCWZMsYVsBTLgWWy_4SDsA0NXu9tTShBQ3cxlvYFfk,15404
334
+ agentex_sdk-0.6.7.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
335
+ agentex_sdk-0.6.7.dist-info/entry_points.txt,sha256=V7vJuMZdF0UlvgX6KiBN7XUvq_cxF5kplcYvc1QlFaQ,62
336
+ agentex_sdk-0.6.7.dist-info/licenses/LICENSE,sha256=Q1AOx2FtRcMlyMgQJ9eVN2WKPq2mQ33lnB4tvWxabLA,11337
337
+ agentex_sdk-0.6.7.dist-info/RECORD,,