fastmcp 2.14.0__py3-none-any.whl → 2.14.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. fastmcp/client/client.py +79 -12
  2. fastmcp/client/sampling/__init__.py +69 -0
  3. fastmcp/client/sampling/handlers/__init__.py +0 -0
  4. fastmcp/client/sampling/handlers/anthropic.py +387 -0
  5. fastmcp/client/sampling/handlers/openai.py +399 -0
  6. fastmcp/client/tasks.py +0 -63
  7. fastmcp/client/transports.py +35 -16
  8. fastmcp/experimental/sampling/handlers/__init__.py +5 -0
  9. fastmcp/experimental/sampling/handlers/openai.py +4 -169
  10. fastmcp/prompts/prompt.py +5 -5
  11. fastmcp/prompts/prompt_manager.py +3 -4
  12. fastmcp/resources/resource.py +4 -4
  13. fastmcp/resources/resource_manager.py +9 -14
  14. fastmcp/resources/template.py +5 -5
  15. fastmcp/server/auth/auth.py +20 -5
  16. fastmcp/server/auth/oauth_proxy.py +73 -15
  17. fastmcp/server/auth/providers/supabase.py +11 -6
  18. fastmcp/server/context.py +448 -113
  19. fastmcp/server/dependencies.py +5 -0
  20. fastmcp/server/elicitation.py +7 -3
  21. fastmcp/server/middleware/error_handling.py +1 -1
  22. fastmcp/server/openapi/components.py +2 -4
  23. fastmcp/server/proxy.py +3 -3
  24. fastmcp/server/sampling/__init__.py +10 -0
  25. fastmcp/server/sampling/run.py +301 -0
  26. fastmcp/server/sampling/sampling_tool.py +108 -0
  27. fastmcp/server/server.py +84 -78
  28. fastmcp/server/tasks/converters.py +2 -1
  29. fastmcp/tools/tool.py +8 -6
  30. fastmcp/tools/tool_manager.py +5 -7
  31. fastmcp/utilities/cli.py +23 -43
  32. fastmcp/utilities/json_schema.py +40 -0
  33. fastmcp/utilities/openapi/schemas.py +4 -4
  34. {fastmcp-2.14.0.dist-info → fastmcp-2.14.2.dist-info}/METADATA +8 -3
  35. {fastmcp-2.14.0.dist-info → fastmcp-2.14.2.dist-info}/RECORD +38 -34
  36. fastmcp/client/sampling.py +0 -56
  37. fastmcp/experimental/sampling/handlers/base.py +0 -21
  38. fastmcp/server/sampling/handler.py +0 -19
  39. {fastmcp-2.14.0.dist-info → fastmcp-2.14.2.dist-info}/WHEEL +0 -0
  40. {fastmcp-2.14.0.dist-info → fastmcp-2.14.2.dist-info}/entry_points.txt +0 -0
  41. {fastmcp-2.14.0.dist-info → fastmcp-2.14.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,399 @@
1
+ """OpenAI sampling handler for FastMCP."""
2
+
3
+ import json
4
+ from collections.abc import Iterator, Sequence
5
+ from typing import Any, get_args
6
+
7
+ from mcp import ClientSession, ServerSession
8
+ from mcp.shared.context import LifespanContextT, RequestContext
9
+ from mcp.types import CreateMessageRequestParams as SamplingParams
10
+ from mcp.types import (
11
+ CreateMessageResult,
12
+ CreateMessageResultWithTools,
13
+ ModelPreferences,
14
+ SamplingMessage,
15
+ StopReason,
16
+ TextContent,
17
+ Tool,
18
+ ToolChoice,
19
+ ToolResultContent,
20
+ ToolUseContent,
21
+ )
22
+
23
+ try:
24
+ from openai import NOT_GIVEN, AsyncOpenAI, NotGiven
25
+ from openai.types.chat import (
26
+ ChatCompletion,
27
+ ChatCompletionAssistantMessageParam,
28
+ ChatCompletionMessageParam,
29
+ ChatCompletionMessageToolCallParam,
30
+ ChatCompletionSystemMessageParam,
31
+ ChatCompletionToolChoiceOptionParam,
32
+ ChatCompletionToolMessageParam,
33
+ ChatCompletionToolParam,
34
+ ChatCompletionUserMessageParam,
35
+ )
36
+ from openai.types.shared.chat_model import ChatModel
37
+ from openai.types.shared_params import FunctionDefinition
38
+ except ImportError as e:
39
+ raise ImportError(
40
+ "The `openai` package is not installed. "
41
+ "Please install `fastmcp[openai]` or add `openai` to your dependencies manually."
42
+ ) from e
43
+
44
+
45
+ class OpenAISamplingHandler:
46
+ """Sampling handler that uses the OpenAI API."""
47
+
48
+ def __init__(
49
+ self,
50
+ default_model: ChatModel,
51
+ client: AsyncOpenAI | None = None,
52
+ ) -> None:
53
+ self.client: AsyncOpenAI = client or AsyncOpenAI()
54
+ self.default_model: ChatModel = default_model
55
+
56
+ async def __call__(
57
+ self,
58
+ messages: list[SamplingMessage],
59
+ params: SamplingParams,
60
+ context: RequestContext[ServerSession, LifespanContextT]
61
+ | RequestContext[ClientSession, LifespanContextT],
62
+ ) -> CreateMessageResult | CreateMessageResultWithTools:
63
+ openai_messages: list[ChatCompletionMessageParam] = (
64
+ self._convert_to_openai_messages(
65
+ system_prompt=params.systemPrompt,
66
+ messages=messages,
67
+ )
68
+ )
69
+
70
+ model: ChatModel = self._select_model_from_preferences(params.modelPreferences)
71
+
72
+ # Convert MCP tools to OpenAI format
73
+ openai_tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN
74
+ if params.tools:
75
+ openai_tools = self._convert_tools_to_openai(params.tools)
76
+
77
+ # Convert tool_choice to OpenAI format
78
+ openai_tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN
79
+ if params.toolChoice:
80
+ openai_tool_choice = self._convert_tool_choice_to_openai(params.toolChoice)
81
+
82
+ response = await self.client.chat.completions.create(
83
+ model=model,
84
+ messages=openai_messages,
85
+ temperature=(
86
+ params.temperature if params.temperature is not None else NOT_GIVEN
87
+ ),
88
+ max_tokens=params.maxTokens,
89
+ stop=params.stopSequences if params.stopSequences else NOT_GIVEN,
90
+ tools=openai_tools,
91
+ tool_choice=openai_tool_choice,
92
+ )
93
+
94
+ # Return appropriate result type based on whether tools were provided
95
+ if params.tools:
96
+ return self._chat_completion_to_result_with_tools(response)
97
+ return self._chat_completion_to_create_message_result(response)
98
+
99
+ @staticmethod
100
+ def _iter_models_from_preferences(
101
+ model_preferences: ModelPreferences | str | list[str] | None,
102
+ ) -> Iterator[str]:
103
+ if model_preferences is None:
104
+ return
105
+
106
+ if isinstance(model_preferences, str) and model_preferences in get_args(
107
+ ChatModel
108
+ ):
109
+ yield model_preferences
110
+
111
+ elif isinstance(model_preferences, list):
112
+ yield from model_preferences
113
+
114
+ elif isinstance(model_preferences, ModelPreferences):
115
+ if not (hints := model_preferences.hints):
116
+ return
117
+
118
+ for hint in hints:
119
+ if not (name := hint.name):
120
+ continue
121
+
122
+ yield name
123
+
124
+ @staticmethod
125
+ def _convert_to_openai_messages(
126
+ system_prompt: str | None, messages: Sequence[SamplingMessage]
127
+ ) -> list[ChatCompletionMessageParam]:
128
+ openai_messages: list[ChatCompletionMessageParam] = []
129
+
130
+ if system_prompt:
131
+ openai_messages.append(
132
+ ChatCompletionSystemMessageParam(
133
+ role="system",
134
+ content=system_prompt,
135
+ )
136
+ )
137
+
138
+ for message in messages:
139
+ content = message.content
140
+
141
+ # Handle list content (from CreateMessageResultWithTools)
142
+ if isinstance(content, list):
143
+ # Collect tool calls and text from the list
144
+ tool_calls: list[ChatCompletionMessageToolCallParam] = []
145
+ text_parts: list[str] = []
146
+ # Collect tool results separately to maintain correct ordering
147
+ tool_messages: list[ChatCompletionToolMessageParam] = []
148
+
149
+ for item in content:
150
+ if isinstance(item, ToolUseContent):
151
+ tool_calls.append(
152
+ ChatCompletionMessageToolCallParam(
153
+ id=item.id,
154
+ type="function",
155
+ function={
156
+ "name": item.name,
157
+ "arguments": json.dumps(item.input),
158
+ },
159
+ )
160
+ )
161
+ elif isinstance(item, TextContent):
162
+ text_parts.append(item.text)
163
+ elif isinstance(item, ToolResultContent):
164
+ # Collect tool results (added after assistant message)
165
+ content_text = ""
166
+ if item.content:
167
+ result_texts = []
168
+ for sub_item in item.content:
169
+ if isinstance(sub_item, TextContent):
170
+ result_texts.append(sub_item.text)
171
+ content_text = "\n".join(result_texts)
172
+ tool_messages.append(
173
+ ChatCompletionToolMessageParam(
174
+ role="tool",
175
+ tool_call_id=item.toolUseId,
176
+ content=content_text,
177
+ )
178
+ )
179
+
180
+ # Add assistant message with tool calls if present
181
+ # OpenAI requires: assistant (with tool_calls) -> tool messages
182
+ if tool_calls or text_parts:
183
+ msg_content = "\n".join(text_parts) if text_parts else None
184
+ if tool_calls:
185
+ openai_messages.append(
186
+ ChatCompletionAssistantMessageParam(
187
+ role="assistant",
188
+ content=msg_content,
189
+ tool_calls=tool_calls,
190
+ )
191
+ )
192
+ # Add tool messages AFTER assistant message
193
+ openai_messages.extend(tool_messages)
194
+ elif msg_content:
195
+ if message.role == "user":
196
+ openai_messages.append(
197
+ ChatCompletionUserMessageParam(
198
+ role="user",
199
+ content=msg_content,
200
+ )
201
+ )
202
+ else:
203
+ openai_messages.append(
204
+ ChatCompletionAssistantMessageParam(
205
+ role="assistant",
206
+ content=msg_content,
207
+ )
208
+ )
209
+ elif tool_messages:
210
+ # Tool results only (assistant message was in previous message)
211
+ openai_messages.extend(tool_messages)
212
+ continue
213
+
214
+ # Handle ToolUseContent (assistant's tool calls)
215
+ if isinstance(content, ToolUseContent):
216
+ openai_messages.append(
217
+ ChatCompletionAssistantMessageParam(
218
+ role="assistant",
219
+ tool_calls=[
220
+ ChatCompletionMessageToolCallParam(
221
+ id=content.id,
222
+ type="function",
223
+ function={
224
+ "name": content.name,
225
+ "arguments": json.dumps(content.input),
226
+ },
227
+ )
228
+ ],
229
+ )
230
+ )
231
+ continue
232
+
233
+ # Handle ToolResultContent (user's tool results)
234
+ if isinstance(content, ToolResultContent):
235
+ # Extract text parts from the content list
236
+ result_texts: list[str] = []
237
+ if content.content:
238
+ for item in content.content:
239
+ if isinstance(item, TextContent):
240
+ result_texts.append(item.text)
241
+ openai_messages.append(
242
+ ChatCompletionToolMessageParam(
243
+ role="tool",
244
+ tool_call_id=content.toolUseId,
245
+ content="\n".join(result_texts),
246
+ )
247
+ )
248
+ continue
249
+
250
+ # Handle TextContent
251
+ if isinstance(content, TextContent):
252
+ if message.role == "user":
253
+ openai_messages.append(
254
+ ChatCompletionUserMessageParam(
255
+ role="user",
256
+ content=content.text,
257
+ )
258
+ )
259
+ else:
260
+ openai_messages.append(
261
+ ChatCompletionAssistantMessageParam(
262
+ role="assistant",
263
+ content=content.text,
264
+ )
265
+ )
266
+ continue
267
+
268
+ raise ValueError(f"Unsupported content type: {type(content)}")
269
+
270
+ return openai_messages
271
+
272
+ @staticmethod
273
+ def _chat_completion_to_create_message_result(
274
+ chat_completion: ChatCompletion,
275
+ ) -> CreateMessageResult:
276
+ if len(chat_completion.choices) == 0:
277
+ raise ValueError("No response for completion")
278
+
279
+ first_choice = chat_completion.choices[0]
280
+
281
+ if content := first_choice.message.content:
282
+ return CreateMessageResult(
283
+ content=TextContent(type="text", text=content),
284
+ role="assistant",
285
+ model=chat_completion.model,
286
+ )
287
+
288
+ raise ValueError("No content in response from completion")
289
+
290
+ def _select_model_from_preferences(
291
+ self, model_preferences: ModelPreferences | str | list[str] | None
292
+ ) -> ChatModel:
293
+ for model_option in self._iter_models_from_preferences(model_preferences):
294
+ if model_option in get_args(ChatModel):
295
+ chosen_model: ChatModel = model_option # type: ignore[assignment]
296
+ return chosen_model
297
+
298
+ return self.default_model
299
+
300
+ @staticmethod
301
+ def _convert_tools_to_openai(tools: list[Tool]) -> list[ChatCompletionToolParam]:
302
+ """Convert MCP tools to OpenAI tool format."""
303
+ openai_tools: list[ChatCompletionToolParam] = []
304
+ for tool in tools:
305
+ # Build parameters dict, ensuring required fields
306
+ parameters: dict[str, Any] = dict(tool.inputSchema)
307
+ if "type" not in parameters:
308
+ parameters["type"] = "object"
309
+
310
+ openai_tools.append(
311
+ ChatCompletionToolParam(
312
+ type="function",
313
+ function=FunctionDefinition(
314
+ name=tool.name,
315
+ description=tool.description or "",
316
+ parameters=parameters,
317
+ ),
318
+ )
319
+ )
320
+ return openai_tools
321
+
322
+ @staticmethod
323
+ def _convert_tool_choice_to_openai(
324
+ tool_choice: ToolChoice,
325
+ ) -> ChatCompletionToolChoiceOptionParam:
326
+ """Convert MCP tool_choice to OpenAI format."""
327
+ if tool_choice.mode == "auto":
328
+ return "auto"
329
+ elif tool_choice.mode == "required":
330
+ return "required"
331
+ elif tool_choice.mode == "none":
332
+ return "none"
333
+ else:
334
+ raise ValueError(f"Unsupported tool_choice mode: {tool_choice.mode!r}")
335
+
336
+ @staticmethod
337
+ def _chat_completion_to_result_with_tools(
338
+ chat_completion: ChatCompletion,
339
+ ) -> CreateMessageResultWithTools:
340
+ """Convert OpenAI response to CreateMessageResultWithTools."""
341
+ if len(chat_completion.choices) == 0:
342
+ raise ValueError("No response for completion")
343
+
344
+ first_choice = chat_completion.choices[0]
345
+ message = first_choice.message
346
+
347
+ # Determine stop reason
348
+ stop_reason: StopReason
349
+ if first_choice.finish_reason == "tool_calls":
350
+ stop_reason = "toolUse"
351
+ elif first_choice.finish_reason == "stop":
352
+ stop_reason = "endTurn"
353
+ elif first_choice.finish_reason == "length":
354
+ stop_reason = "maxTokens"
355
+ else:
356
+ stop_reason = "endTurn"
357
+
358
+ # Build content list
359
+ content: list[TextContent | ToolUseContent] = []
360
+
361
+ # Add text content if present
362
+ if message.content:
363
+ content.append(TextContent(type="text", text=message.content))
364
+
365
+ # Add tool calls if present
366
+ if message.tool_calls:
367
+ for tool_call in message.tool_calls:
368
+ # Skip non-function tool calls
369
+ if not hasattr(tool_call, "function"):
370
+ continue
371
+ func = tool_call.function # type: ignore[union-attr]
372
+ # Parse the arguments JSON string
373
+ try:
374
+ arguments = json.loads(func.arguments) # type: ignore[union-attr]
375
+ except json.JSONDecodeError as e:
376
+ raise ValueError(
377
+ f"Invalid JSON in tool arguments for "
378
+ f"'{func.name}': {func.arguments}" # type: ignore[union-attr]
379
+ ) from e
380
+
381
+ content.append(
382
+ ToolUseContent(
383
+ type="tool_use",
384
+ id=tool_call.id,
385
+ name=func.name, # type: ignore[union-attr]
386
+ input=arguments,
387
+ )
388
+ )
389
+
390
+ # Must have at least some content
391
+ if not content:
392
+ raise ValueError("No content in response from completion")
393
+
394
+ return CreateMessageResultWithTools(
395
+ content=content, # type: ignore[arg-type]
396
+ role="assistant",
397
+ model=chat_completion.model,
398
+ stopReason=stop_reason,
399
+ )
fastmcp/client/tasks.py CHANGED
@@ -12,13 +12,6 @@ from datetime import datetime, timezone
12
12
  from typing import TYPE_CHECKING, Generic, TypeVar
13
13
 
14
14
  import mcp.types
15
- from mcp import ClientSession
16
- from mcp.client.session import (
17
- SUPPORTED_PROTOCOL_VERSIONS,
18
- _default_elicitation_callback,
19
- _default_list_roots_callback,
20
- _default_sampling_callback,
21
- )
22
15
  from mcp.types import GetTaskResult, TaskStatusNotification
23
16
 
24
17
  from fastmcp.client.messages import Message, MessageHandler
@@ -30,62 +23,6 @@ if TYPE_CHECKING:
30
23
  from fastmcp.client.client import CallToolResult, Client
31
24
 
32
25
 
33
- # TODO(SEP-1686): Remove this function when the MCP SDK adds an
34
- # `experimental_capabilities` parameter to ClientSession (the server side
35
- # already has this via `create_initialization_options(experimental_capabilities={})`).
36
- # The SDK currently hardcodes `experimental=None` in ClientSession.initialize().
37
- async def _task_capable_initialize(
38
- session: ClientSession,
39
- ) -> mcp.types.InitializeResult:
40
- """Initialize a session with task capabilities declared."""
41
- sampling = (
42
- mcp.types.SamplingCapability()
43
- if session._sampling_callback != _default_sampling_callback
44
- else None
45
- )
46
- elicitation = (
47
- mcp.types.ElicitationCapability()
48
- if session._elicitation_callback != _default_elicitation_callback
49
- else None
50
- )
51
- roots = (
52
- mcp.types.RootsCapability(listChanged=True)
53
- if session._list_roots_callback != _default_list_roots_callback
54
- else None
55
- )
56
-
57
- result = await session.send_request(
58
- mcp.types.ClientRequest(
59
- mcp.types.InitializeRequest(
60
- params=mcp.types.InitializeRequestParams(
61
- protocolVersion=mcp.types.LATEST_PROTOCOL_VERSION,
62
- capabilities=mcp.types.ClientCapabilities(
63
- sampling=sampling,
64
- elicitation=elicitation,
65
- experimental={"tasks": {}},
66
- roots=roots,
67
- ),
68
- clientInfo=session._client_info,
69
- ),
70
- )
71
- ),
72
- mcp.types.InitializeResult,
73
- )
74
-
75
- if result.protocolVersion not in SUPPORTED_PROTOCOL_VERSIONS:
76
- raise RuntimeError(
77
- f"Unsupported protocol version from the server: {result.protocolVersion}"
78
- )
79
-
80
- session._server_capabilities = result.capabilities
81
-
82
- await session.send_notification(
83
- mcp.types.ClientNotification(mcp.types.InitializedNotification())
84
- )
85
-
86
- return result
87
-
88
-
89
26
  class TaskNotificationHandler(MessageHandler):
90
27
  """MessageHandler that routes task status notifications to Task objects."""
91
28
 
@@ -23,7 +23,7 @@ from mcp.client.session import (
23
23
  )
24
24
  from mcp.client.sse import sse_client
25
25
  from mcp.client.stdio import stdio_client
26
- from mcp.client.streamable_http import streamablehttp_client
26
+ from mcp.client.streamable_http import streamable_http_client
27
27
  from mcp.server.fastmcp import FastMCP as FastMCP1Server
28
28
  from mcp.shared._httpx_utils import McpHttpClientFactory
29
29
  from mcp.shared.memory import create_client_server_memory_streams
@@ -66,6 +66,7 @@ class SessionKwargs(TypedDict, total=False):
66
66
 
67
67
  read_timeout_seconds: datetime.timedelta | None
68
68
  sampling_callback: SamplingFnT | None
69
+ sampling_capabilities: mcp.types.SamplingCapability | None
69
70
  list_roots_callback: ListRootsFnT | None
70
71
  logging_callback: LoggingFnT | None
71
72
  elicitation_callback: ElicitationFnT | None
@@ -251,6 +252,16 @@ class StreamableHttpTransport(ClientTransport):
251
252
  self.httpx_client_factory = httpx_client_factory
252
253
  self._set_auth(auth)
253
254
 
255
+ if sse_read_timeout is not None:
256
+ if fastmcp.settings.deprecation_warnings:
257
+ warnings.warn(
258
+ "The `sse_read_timeout` parameter is deprecated and no longer used. "
259
+ "The new streamable_http_client API does not support this parameter. "
260
+ "Use `read_timeout_seconds` in session_kwargs or configure timeout on "
261
+ "the httpx client via `httpx_client_factory` instead.",
262
+ DeprecationWarning,
263
+ stacklevel=2,
264
+ )
254
265
  if isinstance(sse_read_timeout, int | float):
255
266
  sse_read_timeout = datetime.timedelta(seconds=float(sse_read_timeout))
256
267
  self.sse_read_timeout = sse_read_timeout
@@ -268,28 +279,36 @@ class StreamableHttpTransport(ClientTransport):
268
279
  async def connect_session(
269
280
  self, **session_kwargs: Unpack[SessionKwargs]
270
281
  ) -> AsyncIterator[ClientSession]:
271
- client_kwargs: dict[str, Any] = {}
272
-
273
- # load headers from an active HTTP request, if available. This will only be true
282
+ # Load headers from an active HTTP request, if available. This will only be true
274
283
  # if the client is used in a FastMCP Proxy, in which case the MCP client headers
275
284
  # need to be forwarded to the remote server.
276
- client_kwargs["headers"] = get_http_headers() | self.headers
285
+ headers = get_http_headers() | self.headers
277
286
 
278
- # sse_read_timeout has a default value set, so we can't pass None without overriding it
279
- # instead we simply leave the kwarg out if it's not provided
280
- if self.sse_read_timeout is not None:
281
- client_kwargs["sse_read_timeout"] = self.sse_read_timeout
287
+ # Build httpx client configuration
288
+ httpx_client_kwargs: dict[str, Any] = {
289
+ "headers": headers,
290
+ "auth": self.auth,
291
+ "follow_redirects": True,
292
+ }
293
+
294
+ # Configure timeout if provided (convert timedelta to seconds for httpx)
282
295
  if session_kwargs.get("read_timeout_seconds") is not None:
283
- client_kwargs["timeout"] = session_kwargs.get("read_timeout_seconds")
296
+ read_timeout_seconds = cast(
297
+ datetime.timedelta, session_kwargs.get("read_timeout_seconds")
298
+ )
299
+ httpx_client_kwargs["timeout"] = read_timeout_seconds.total_seconds()
284
300
 
301
+ # Create httpx client from factory or use default
285
302
  if self.httpx_client_factory is not None:
286
- client_kwargs["httpx_client_factory"] = self.httpx_client_factory
303
+ http_client = self.httpx_client_factory(**httpx_client_kwargs)
304
+ else:
305
+ http_client = httpx.AsyncClient(**httpx_client_kwargs)
287
306
 
288
- async with streamablehttp_client(
289
- self.url,
290
- auth=self.auth,
291
- **client_kwargs,
292
- ) as transport:
307
+ # Ensure httpx client is closed after use
308
+ async with (
309
+ http_client,
310
+ streamable_http_client(self.url, http_client=http_client) as transport,
311
+ ):
293
312
  read_stream, write_stream, get_session_id = transport
294
313
  self._get_session_id_cb = get_session_id
295
314
  async with ClientSession(
@@ -0,0 +1,5 @@
1
+ # Re-export for backwards compatibility
2
+ # The canonical location is now fastmcp.client.sampling.handlers
3
+ from fastmcp.client.sampling.handlers.openai import OpenAISamplingHandler
4
+
5
+ __all__ = ["OpenAISamplingHandler"]