openai-agents 0.3.3__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/__init__.py CHANGED
@@ -81,6 +81,12 @@ from .tool import (
81
81
  MCPToolApprovalFunctionResult,
82
82
  MCPToolApprovalRequest,
83
83
  Tool,
84
+ ToolOutputFileContent,
85
+ ToolOutputFileContentDict,
86
+ ToolOutputImage,
87
+ ToolOutputImageDict,
88
+ ToolOutputText,
89
+ ToolOutputTextDict,
84
90
  WebSearchTool,
85
91
  default_tool_error_function,
86
92
  function_tool,
@@ -273,6 +279,12 @@ __all__ = [
273
279
  "MCPToolApprovalFunction",
274
280
  "MCPToolApprovalRequest",
275
281
  "MCPToolApprovalFunctionResult",
282
+ "ToolOutputText",
283
+ "ToolOutputTextDict",
284
+ "ToolOutputImage",
285
+ "ToolOutputImageDict",
286
+ "ToolOutputFileContent",
287
+ "ToolOutputFileContentDict",
276
288
  "function_tool",
277
289
  "Usage",
278
290
  "add_trace_processor",
agents/_run_impl.py CHANGED
@@ -267,10 +267,11 @@ class RunImpl:
267
267
  new_step_items: list[RunItem] = []
268
268
  new_step_items.extend(processed_response.new_items)
269
269
 
270
- # First, lets run the tool calls - function tools and computer actions
270
+ # First, lets run the tool calls - function tools, computer actions, and local shell calls
271
271
  (
272
272
  (function_results, tool_input_guardrail_results, tool_output_guardrail_results),
273
273
  computer_results,
274
+ local_shell_results,
274
275
  ) = await asyncio.gather(
275
276
  cls.execute_function_tool_calls(
276
277
  agent=agent,
@@ -286,9 +287,17 @@ class RunImpl:
286
287
  context_wrapper=context_wrapper,
287
288
  config=run_config,
288
289
  ),
290
+ cls.execute_local_shell_calls(
291
+ agent=agent,
292
+ calls=processed_response.local_shell_calls,
293
+ hooks=hooks,
294
+ context_wrapper=context_wrapper,
295
+ config=run_config,
296
+ ),
289
297
  )
290
298
  new_step_items.extend([result.run_item for result in function_results])
291
299
  new_step_items.extend(computer_results)
300
+ new_step_items.extend(local_shell_results)
292
301
 
293
302
  # Next, run the MCP approval requests
294
303
  if processed_response.mcp_approval_requests:
@@ -823,7 +832,7 @@ class RunImpl:
823
832
  output=result,
824
833
  run_item=ToolCallOutputItem(
825
834
  output=result,
826
- raw_item=ItemHelpers.tool_call_output_item(tool_run.tool_call, str(result)),
835
+ raw_item=ItemHelpers.tool_call_output_item(tool_run.tool_call, result),
827
836
  agent=agent,
828
837
  ),
829
838
  )
@@ -1163,6 +1172,8 @@ class RunImpl:
1163
1172
  event = RunItemStreamEvent(item=item, name="reasoning_item_created")
1164
1173
  elif isinstance(item, MCPApprovalRequestItem):
1165
1174
  event = RunItemStreamEvent(item=item, name="mcp_approval_requested")
1175
+ elif isinstance(item, MCPApprovalResponseItem):
1176
+ event = RunItemStreamEvent(item=item, name="mcp_approval_response")
1166
1177
  elif isinstance(item, MCPListToolsItem):
1167
1178
  event = RunItemStreamEvent(item=item, name="mcp_list_tools")
1168
1179
 
@@ -1414,12 +1425,13 @@ class LocalShellAction:
1414
1425
 
1415
1426
  return ToolCallOutputItem(
1416
1427
  agent=agent,
1417
- output=output,
1418
- raw_item={
1428
+ output=result,
1429
+ # LocalShellCallOutput type uses the field name "id", but the server wants "call_id".
1430
+ # raw_item keeps the upstream type, so we ignore the type checker here.
1431
+ raw_item={ # type: ignore[misc, arg-type]
1419
1432
  "type": "local_shell_call_output",
1420
- "id": call.tool_call.call_id,
1433
+ "call_id": call.tool_call.call_id,
1421
1434
  "output": result,
1422
- # "id": "out" + call.tool_call.id, # TODO remove this, it should be optional
1423
1435
  },
1424
1436
  )
1425
1437
 
@@ -58,8 +58,6 @@ def __getattr__(name: str) -> Any:
58
58
 
59
59
  return AdvancedSQLiteSession
60
60
  except ModuleNotFoundError as e:
61
- raise ImportError(
62
- f"Failed to import AdvancedSQLiteSession: {e}"
63
- ) from e
61
+ raise ImportError(f"Failed to import AdvancedSQLiteSession: {e}") from e
64
62
 
65
63
  raise AttributeError(f"module {__name__} has no attribute {name}")
@@ -195,7 +195,10 @@ class SQLAlchemySession(SessionABC):
195
195
  stmt = (
196
196
  select(self._messages.c.message_data)
197
197
  .where(self._messages.c.session_id == self.session_id)
198
- .order_by(self._messages.c.created_at.asc())
198
+ .order_by(
199
+ self._messages.c.created_at.asc(),
200
+ self._messages.c.id.asc(),
201
+ )
199
202
  )
200
203
  else:
201
204
  stmt = (
@@ -203,7 +206,10 @@ class SQLAlchemySession(SessionABC):
203
206
  .where(self._messages.c.session_id == self.session_id)
204
207
  # Use DESC + LIMIT to get the latest N
205
208
  # then reverse later for chronological order.
206
- .order_by(self._messages.c.created_at.desc())
209
+ .order_by(
210
+ self._messages.c.created_at.desc(),
211
+ self._messages.c.id.desc(),
212
+ )
207
213
  .limit(limit)
208
214
  )
209
215
 
@@ -278,7 +284,10 @@ class SQLAlchemySession(SessionABC):
278
284
  subq = (
279
285
  select(self._messages.c.id)
280
286
  .where(self._messages.c.session_id == self.session_id)
281
- .order_by(self._messages.c.created_at.desc())
287
+ .order_by(
288
+ self._messages.c.created_at.desc(),
289
+ self._messages.c.id.desc(),
290
+ )
282
291
  .limit(1)
283
292
  )
284
293
  res = await sess.execute(subq)
@@ -310,3 +319,16 @@ class SQLAlchemySession(SessionABC):
310
319
  await sess.execute(
311
320
  delete(self._sessions).where(self._sessions.c.session_id == self.session_id)
312
321
  )
322
+
323
+ @property
324
+ def engine(self) -> AsyncEngine:
325
+ """Access the underlying SQLAlchemy AsyncEngine.
326
+
327
+ This property provides direct access to the engine for advanced use cases,
328
+ such as checking connection pool status, configuring engine settings,
329
+ or manually disposing the engine when needed.
330
+
331
+ Returns:
332
+ AsyncEngine: The SQLAlchemy async engine instance.
333
+ """
334
+ return self._engine
@@ -18,7 +18,7 @@ except ImportError as _e:
18
18
  "dependency group: `pip install 'openai-agents[litellm]'`."
19
19
  ) from _e
20
20
 
21
- from openai import NOT_GIVEN, AsyncStream, NotGiven
21
+ from openai import AsyncStream, NotGiven, omit
22
22
  from openai.types.chat import (
23
23
  ChatCompletionChunk,
24
24
  ChatCompletionMessageCustomToolCall,
@@ -44,6 +44,7 @@ from ...models.chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE
44
44
  from ...models.chatcmpl_stream_handler import ChatCmplStreamHandler
45
45
  from ...models.fake_id import FAKE_RESPONSES_ID
46
46
  from ...models.interface import Model, ModelTracing
47
+ from ...models.openai_responses import Converter as OpenAIResponsesConverter
47
48
  from ...tool import Tool
48
49
  from ...tracing import generation_span
49
50
  from ...tracing.span_data import GenerationSpanData
@@ -269,7 +270,7 @@ class LitellmModel(Model):
269
270
  )
270
271
 
271
272
  # Fix for interleaved thinking bug: reorder messages to ensure tool_use comes before tool_result # noqa: E501
272
- if preserve_thinking_blocks:
273
+ if "anthropic" in self.model.lower() or "claude" in self.model.lower():
273
274
  converted_messages = self._fix_tool_message_ordering(converted_messages)
274
275
 
275
276
  if system_instructions:
@@ -367,15 +368,19 @@ class LitellmModel(Model):
367
368
  if isinstance(ret, litellm.types.utils.ModelResponse):
368
369
  return ret
369
370
 
371
+ responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice(
372
+ model_settings.tool_choice
373
+ )
374
+ if responses_tool_choice is None or responses_tool_choice is omit:
375
+ responses_tool_choice = "auto"
376
+
370
377
  response = Response(
371
378
  id=FAKE_RESPONSES_ID,
372
379
  created_at=time.time(),
373
380
  model=self.model,
374
381
  object="response",
375
382
  output=[],
376
- tool_choice=cast(Literal["auto", "required", "none"], tool_choice)
377
- if tool_choice != NOT_GIVEN
378
- else "auto",
383
+ tool_choice=responses_tool_choice, # type: ignore[arg-type]
379
384
  top_p=model_settings.top_p,
380
385
  temperature=model_settings.temperature,
381
386
  tools=[],
@@ -500,7 +505,7 @@ class LitellmModel(Model):
500
505
  return fixed_messages
501
506
 
502
507
  def _remove_not_given(self, value: Any) -> Any:
503
- if isinstance(value, NotGiven):
508
+ if value is omit or isinstance(value, NotGiven):
504
509
  return None
505
510
  return value
506
511
 
agents/items.py CHANGED
@@ -21,6 +21,12 @@ from openai.types.responses import (
21
21
  from openai.types.responses.response_code_interpreter_tool_call import (
22
22
  ResponseCodeInterpreterToolCall,
23
23
  )
24
+ from openai.types.responses.response_function_call_output_item_list_param import (
25
+ ResponseFunctionCallOutputItemListParam,
26
+ ResponseFunctionCallOutputItemParam,
27
+ )
28
+ from openai.types.responses.response_input_file_content_param import ResponseInputFileContentParam
29
+ from openai.types.responses.response_input_image_content_param import ResponseInputImageContentParam
24
30
  from openai.types.responses.response_input_item_param import (
25
31
  ComputerCallOutput,
26
32
  FunctionCallOutput,
@@ -36,9 +42,17 @@ from openai.types.responses.response_output_item import (
36
42
  )
37
43
  from openai.types.responses.response_reasoning_item import ResponseReasoningItem
38
44
  from pydantic import BaseModel
39
- from typing_extensions import TypeAlias
45
+ from typing_extensions import TypeAlias, assert_never
40
46
 
41
47
  from .exceptions import AgentsException, ModelBehaviorError
48
+ from .logger import logger
49
+ from .tool import (
50
+ ToolOutputFileContent,
51
+ ToolOutputImage,
52
+ ToolOutputText,
53
+ ValidToolOutputPydanticModels,
54
+ ValidToolOutputPydanticModelsTypeAdapter,
55
+ )
42
56
  from .usage import Usage
43
57
 
44
58
  if TYPE_CHECKING:
@@ -298,11 +312,96 @@ class ItemHelpers:
298
312
 
299
313
  @classmethod
300
314
  def tool_call_output_item(
301
- cls, tool_call: ResponseFunctionToolCall, output: str
315
+ cls, tool_call: ResponseFunctionToolCall, output: Any
302
316
  ) -> FunctionCallOutput:
303
- """Creates a tool call output item from a tool call and its output."""
317
+ """Creates a tool call output item from a tool call and its output.
318
+
319
+ Accepts either plain values (stringified) or structured outputs using
320
+ input_text/input_image/input_file shapes. Structured outputs may be
321
+ provided as Pydantic models or dicts, or an iterable of such items.
322
+ """
323
+
324
+ converted_output = cls._convert_tool_output(output)
325
+
304
326
  return {
305
327
  "call_id": tool_call.call_id,
306
- "output": output,
328
+ "output": converted_output,
307
329
  "type": "function_call_output",
308
330
  }
331
+
332
+ @classmethod
333
+ def _convert_tool_output(cls, output: Any) -> str | ResponseFunctionCallOutputItemListParam:
334
+ """Converts a tool return value into an output acceptable by the Responses API."""
335
+
336
+ # If the output is either a single or list of the known structured output types, convert to
337
+ # ResponseFunctionCallOutputItemListParam. Else, just stringify.
338
+ if isinstance(output, (list, tuple)):
339
+ maybe_converted_output_list = [
340
+ cls._maybe_get_output_as_structured_function_output(item) for item in output
341
+ ]
342
+ if all(maybe_converted_output_list):
343
+ return [
344
+ cls._convert_single_tool_output_pydantic_model(item)
345
+ for item in maybe_converted_output_list
346
+ if item is not None
347
+ ]
348
+ else:
349
+ return str(output)
350
+ else:
351
+ maybe_converted_output = cls._maybe_get_output_as_structured_function_output(output)
352
+ if maybe_converted_output:
353
+ return [cls._convert_single_tool_output_pydantic_model(maybe_converted_output)]
354
+ else:
355
+ return str(output)
356
+
357
+ @classmethod
358
+ def _maybe_get_output_as_structured_function_output(
359
+ cls, output: Any
360
+ ) -> ValidToolOutputPydanticModels | None:
361
+ if isinstance(output, (ToolOutputText, ToolOutputImage, ToolOutputFileContent)):
362
+ return output
363
+ elif isinstance(output, dict):
364
+ # Require explicit 'type' field in dict to be considered a structured output
365
+ if "type" not in output:
366
+ return None
367
+ try:
368
+ return ValidToolOutputPydanticModelsTypeAdapter.validate_python(output)
369
+ except pydantic.ValidationError:
370
+ logger.debug("dict was not a valid tool output pydantic model")
371
+ return None
372
+
373
+ return None
374
+
375
+ @classmethod
376
+ def _convert_single_tool_output_pydantic_model(
377
+ cls, output: ValidToolOutputPydanticModels
378
+ ) -> ResponseFunctionCallOutputItemParam:
379
+ if isinstance(output, ToolOutputText):
380
+ return {"type": "input_text", "text": output.text}
381
+ elif isinstance(output, ToolOutputImage):
382
+ # Forward all provided optional fields so the Responses API receives
383
+ # the correct identifiers and settings for the image resource.
384
+ result: ResponseInputImageContentParam = {"type": "input_image"}
385
+ if output.image_url is not None:
386
+ result["image_url"] = output.image_url
387
+ if output.file_id is not None:
388
+ result["file_id"] = output.file_id
389
+ if output.detail is not None:
390
+ result["detail"] = output.detail
391
+ return result
392
+ elif isinstance(output, ToolOutputFileContent):
393
+ # Forward all provided optional fields so the Responses API receives
394
+ # the correct identifiers and metadata for the file resource.
395
+ result_file: ResponseInputFileContentParam = {"type": "input_file"}
396
+ if output.file_data is not None:
397
+ result_file["file_data"] = output.file_data
398
+ if output.file_url is not None:
399
+ result_file["file_url"] = output.file_url
400
+ if output.file_id is not None:
401
+ result_file["file_id"] = output.file_id
402
+ if output.filename is not None:
403
+ result_file["filename"] = output.filename
404
+ return result_file
405
+ else:
406
+ assert_never(output)
407
+ raise ValueError(f"Unexpected tool output type: {output}")
agents/mcp/server.py CHANGED
@@ -11,6 +11,7 @@ from typing import TYPE_CHECKING, Any, Callable, Literal, TypeVar
11
11
 
12
12
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
13
13
  from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client
14
+ from mcp.client.session import MessageHandlerFnT
14
15
  from mcp.client.sse import sse_client
15
16
  from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client
16
17
  from mcp.shared.message import SessionMessage
@@ -20,7 +21,7 @@ from typing_extensions import NotRequired, TypedDict
20
21
  from ..exceptions import UserError
21
22
  from ..logger import logger
22
23
  from ..run_context import RunContextWrapper
23
- from .util import ToolFilter, ToolFilterContext, ToolFilterStatic
24
+ from .util import HttpClientFactory, ToolFilter, ToolFilterContext, ToolFilterStatic
24
25
 
25
26
  T = TypeVar("T")
26
27
 
@@ -103,6 +104,7 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
103
104
  use_structured_content: bool = False,
104
105
  max_retry_attempts: int = 0,
105
106
  retry_backoff_seconds_base: float = 1.0,
107
+ message_handler: MessageHandlerFnT | None = None,
106
108
  ):
107
109
  """
108
110
  Args:
@@ -124,6 +126,8 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
124
126
  Defaults to no retries.
125
127
  retry_backoff_seconds_base: The base delay, in seconds, used for exponential
126
128
  backoff between retries.
129
+ message_handler: Optional handler invoked for session messages as delivered by the
130
+ ClientSession.
127
131
  """
128
132
  super().__init__(use_structured_content=use_structured_content)
129
133
  self.session: ClientSession | None = None
@@ -135,6 +139,7 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
135
139
  self.client_session_timeout_seconds = client_session_timeout_seconds
136
140
  self.max_retry_attempts = max_retry_attempts
137
141
  self.retry_backoff_seconds_base = retry_backoff_seconds_base
142
+ self.message_handler = message_handler
138
143
 
139
144
  # The cache is always dirty at startup, so that we fetch tools at least once
140
145
  self._cache_dirty = True
@@ -272,6 +277,7 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
272
277
  timedelta(seconds=self.client_session_timeout_seconds)
273
278
  if self.client_session_timeout_seconds
274
279
  else None,
280
+ message_handler=self.message_handler,
275
281
  )
276
282
  )
277
283
  server_result = await session.initialize()
@@ -394,6 +400,7 @@ class MCPServerStdio(_MCPServerWithClientSession):
394
400
  use_structured_content: bool = False,
395
401
  max_retry_attempts: int = 0,
396
402
  retry_backoff_seconds_base: float = 1.0,
403
+ message_handler: MessageHandlerFnT | None = None,
397
404
  ):
398
405
  """Create a new MCP server based on the stdio transport.
399
406
 
@@ -421,6 +428,8 @@ class MCPServerStdio(_MCPServerWithClientSession):
421
428
  Defaults to no retries.
422
429
  retry_backoff_seconds_base: The base delay, in seconds, for exponential
423
430
  backoff between retries.
431
+ message_handler: Optional handler invoked for session messages as delivered by the
432
+ ClientSession.
424
433
  """
425
434
  super().__init__(
426
435
  cache_tools_list,
@@ -429,6 +438,7 @@ class MCPServerStdio(_MCPServerWithClientSession):
429
438
  use_structured_content,
430
439
  max_retry_attempts,
431
440
  retry_backoff_seconds_base,
441
+ message_handler=message_handler,
432
442
  )
433
443
 
434
444
  self.params = StdioServerParameters(
@@ -492,6 +502,7 @@ class MCPServerSse(_MCPServerWithClientSession):
492
502
  use_structured_content: bool = False,
493
503
  max_retry_attempts: int = 0,
494
504
  retry_backoff_seconds_base: float = 1.0,
505
+ message_handler: MessageHandlerFnT | None = None,
495
506
  ):
496
507
  """Create a new MCP server based on the HTTP with SSE transport.
497
508
 
@@ -521,6 +532,8 @@ class MCPServerSse(_MCPServerWithClientSession):
521
532
  Defaults to no retries.
522
533
  retry_backoff_seconds_base: The base delay, in seconds, for exponential
523
534
  backoff between retries.
535
+ message_handler: Optional handler invoked for session messages as delivered by the
536
+ ClientSession.
524
537
  """
525
538
  super().__init__(
526
539
  cache_tools_list,
@@ -529,6 +542,7 @@ class MCPServerSse(_MCPServerWithClientSession):
529
542
  use_structured_content,
530
543
  max_retry_attempts,
531
544
  retry_backoff_seconds_base,
545
+ message_handler=message_handler,
532
546
  )
533
547
 
534
548
  self.params = params
@@ -575,6 +589,9 @@ class MCPServerStreamableHttpParams(TypedDict):
575
589
  terminate_on_close: NotRequired[bool]
576
590
  """Terminate on close"""
577
591
 
592
+ httpx_client_factory: NotRequired[HttpClientFactory]
593
+ """Custom HTTP client factory for configuring httpx.AsyncClient behavior."""
594
+
578
595
 
579
596
  class MCPServerStreamableHttp(_MCPServerWithClientSession):
580
597
  """MCP server implementation that uses the Streamable HTTP transport. See the [spec]
@@ -592,14 +609,15 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
592
609
  use_structured_content: bool = False,
593
610
  max_retry_attempts: int = 0,
594
611
  retry_backoff_seconds_base: float = 1.0,
612
+ message_handler: MessageHandlerFnT | None = None,
595
613
  ):
596
614
  """Create a new MCP server based on the Streamable HTTP transport.
597
615
 
598
616
  Args:
599
617
  params: The params that configure the server. This includes the URL of the server,
600
- the headers to send to the server, the timeout for the HTTP request, and the
601
- timeout for the Streamable HTTP connection and whether we need to
602
- terminate on close.
618
+ the headers to send to the server, the timeout for the HTTP request, the
619
+ timeout for the Streamable HTTP connection, whether we need to
620
+ terminate on close, and an optional custom HTTP client factory.
603
621
 
604
622
  cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
605
623
  cached and only fetched from the server once. If `False`, the tools list will be
@@ -622,6 +640,8 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
622
640
  Defaults to no retries.
623
641
  retry_backoff_seconds_base: The base delay, in seconds, for exponential
624
642
  backoff between retries.
643
+ message_handler: Optional handler invoked for session messages as delivered by the
644
+ ClientSession.
625
645
  """
626
646
  super().__init__(
627
647
  cache_tools_list,
@@ -630,6 +650,7 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
630
650
  use_structured_content,
631
651
  max_retry_attempts,
632
652
  retry_backoff_seconds_base,
653
+ message_handler=message_handler,
633
654
  )
634
655
 
635
656
  self.params = params
@@ -645,13 +666,24 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
645
666
  ]
646
667
  ]:
647
668
  """Create the streams for the server."""
648
- return streamablehttp_client(
649
- url=self.params["url"],
650
- headers=self.params.get("headers", None),
651
- timeout=self.params.get("timeout", 5),
652
- sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
653
- terminate_on_close=self.params.get("terminate_on_close", True),
654
- )
669
+ # Only pass httpx_client_factory if it's provided
670
+ if "httpx_client_factory" in self.params:
671
+ return streamablehttp_client(
672
+ url=self.params["url"],
673
+ headers=self.params.get("headers", None),
674
+ timeout=self.params.get("timeout", 5),
675
+ sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
676
+ terminate_on_close=self.params.get("terminate_on_close", True),
677
+ httpx_client_factory=self.params["httpx_client_factory"],
678
+ )
679
+ else:
680
+ return streamablehttp_client(
681
+ url=self.params["url"],
682
+ headers=self.params.get("headers", None),
683
+ timeout=self.params.get("timeout", 5),
684
+ sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
685
+ terminate_on_close=self.params.get("terminate_on_close", True),
686
+ )
655
687
 
656
688
  @property
657
689
  def name(self) -> str:
agents/mcp/util.py CHANGED
@@ -1,8 +1,9 @@
1
1
  import functools
2
2
  import json
3
3
  from dataclasses import dataclass
4
- from typing import TYPE_CHECKING, Any, Callable, Optional, Union
4
+ from typing import TYPE_CHECKING, Any, Callable, Optional, Protocol, Union
5
5
 
6
+ import httpx
6
7
  from typing_extensions import NotRequired, TypedDict
7
8
 
8
9
  from .. import _debug
@@ -21,6 +22,21 @@ if TYPE_CHECKING:
21
22
  from .server import MCPServer
22
23
 
23
24
 
25
+ class HttpClientFactory(Protocol):
26
+ """Protocol for HTTP client factory functions.
27
+
28
+ This interface matches the MCP SDK's McpHttpClientFactory but is defined locally
29
+ to avoid accessing internal MCP SDK modules.
30
+ """
31
+
32
+ def __call__(
33
+ self,
34
+ headers: Optional[dict[str, str]] = None,
35
+ timeout: Optional[httpx.Timeout] = None,
36
+ auth: Optional[httpx.Auth] = None,
37
+ ) -> httpx.AsyncClient: ...
38
+
39
+
24
40
  @dataclass
25
41
  class ToolFilterContext:
26
42
  """Context information available to tool filter functions."""
@@ -50,7 +50,7 @@ class OpenAIConversationsSession(SessionABC):
50
50
  order="asc",
51
51
  ):
52
52
  # calling model_dump() to make this serializable
53
- all_items.append(item.model_dump())
53
+ all_items.append(item.model_dump(exclude_unset=True))
54
54
  else:
55
55
  async for item in self._openai_client.conversations.items.list(
56
56
  conversation_id=session_id,
@@ -58,7 +58,7 @@ class OpenAIConversationsSession(SessionABC):
58
58
  order="desc",
59
59
  ):
60
60
  # calling model_dump() to make this serializable
61
- all_items.append(item.model_dump())
61
+ all_items.append(item.model_dump(exclude_unset=True))
62
62
  if limit is not None and len(all_items) >= limit:
63
63
  break
64
64
  all_items.reverse()