fastmcp 2.13.3__py3-none-any.whl → 2.14.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. fastmcp/__init__.py +0 -21
  2. fastmcp/cli/__init__.py +0 -3
  3. fastmcp/cli/__main__.py +5 -0
  4. fastmcp/cli/cli.py +8 -22
  5. fastmcp/cli/install/shared.py +0 -15
  6. fastmcp/cli/tasks.py +110 -0
  7. fastmcp/client/auth/oauth.py +9 -9
  8. fastmcp/client/client.py +739 -136
  9. fastmcp/client/elicitation.py +11 -5
  10. fastmcp/client/messages.py +7 -5
  11. fastmcp/client/roots.py +2 -1
  12. fastmcp/client/sampling/__init__.py +69 -0
  13. fastmcp/client/sampling/handlers/__init__.py +0 -0
  14. fastmcp/client/sampling/handlers/anthropic.py +387 -0
  15. fastmcp/client/sampling/handlers/openai.py +399 -0
  16. fastmcp/client/tasks.py +551 -0
  17. fastmcp/client/transports.py +72 -21
  18. fastmcp/contrib/component_manager/component_service.py +4 -20
  19. fastmcp/dependencies.py +25 -0
  20. fastmcp/experimental/sampling/handlers/__init__.py +5 -0
  21. fastmcp/experimental/sampling/handlers/openai.py +4 -169
  22. fastmcp/experimental/server/openapi/__init__.py +15 -13
  23. fastmcp/experimental/utilities/openapi/__init__.py +12 -38
  24. fastmcp/prompts/prompt.py +38 -38
  25. fastmcp/resources/resource.py +33 -16
  26. fastmcp/resources/template.py +69 -59
  27. fastmcp/server/auth/__init__.py +0 -9
  28. fastmcp/server/auth/auth.py +127 -3
  29. fastmcp/server/auth/oauth_proxy.py +47 -97
  30. fastmcp/server/auth/oidc_proxy.py +7 -0
  31. fastmcp/server/auth/providers/in_memory.py +2 -2
  32. fastmcp/server/auth/providers/oci.py +2 -2
  33. fastmcp/server/context.py +509 -180
  34. fastmcp/server/dependencies.py +464 -6
  35. fastmcp/server/elicitation.py +285 -47
  36. fastmcp/server/event_store.py +177 -0
  37. fastmcp/server/http.py +15 -3
  38. fastmcp/server/low_level.py +56 -12
  39. fastmcp/server/middleware/middleware.py +2 -2
  40. fastmcp/server/openapi/__init__.py +35 -0
  41. fastmcp/{experimental/server → server}/openapi/components.py +4 -3
  42. fastmcp/{experimental/server → server}/openapi/routing.py +1 -1
  43. fastmcp/{experimental/server → server}/openapi/server.py +6 -5
  44. fastmcp/server/proxy.py +53 -40
  45. fastmcp/server/sampling/__init__.py +10 -0
  46. fastmcp/server/sampling/run.py +301 -0
  47. fastmcp/server/sampling/sampling_tool.py +108 -0
  48. fastmcp/server/server.py +793 -552
  49. fastmcp/server/tasks/__init__.py +21 -0
  50. fastmcp/server/tasks/capabilities.py +22 -0
  51. fastmcp/server/tasks/config.py +89 -0
  52. fastmcp/server/tasks/converters.py +206 -0
  53. fastmcp/server/tasks/handlers.py +356 -0
  54. fastmcp/server/tasks/keys.py +93 -0
  55. fastmcp/server/tasks/protocol.py +355 -0
  56. fastmcp/server/tasks/subscriptions.py +205 -0
  57. fastmcp/settings.py +101 -103
  58. fastmcp/tools/tool.py +83 -49
  59. fastmcp/tools/tool_transform.py +1 -12
  60. fastmcp/utilities/components.py +3 -3
  61. fastmcp/utilities/json_schema_type.py +4 -4
  62. fastmcp/utilities/mcp_config.py +1 -2
  63. fastmcp/utilities/mcp_server_config/v1/mcp_server_config.py +1 -1
  64. fastmcp/{experimental/utilities → utilities}/openapi/README.md +7 -35
  65. fastmcp/utilities/openapi/__init__.py +63 -0
  66. fastmcp/{experimental/utilities → utilities}/openapi/formatters.py +5 -5
  67. fastmcp/{experimental/utilities → utilities}/openapi/json_schema_converter.py +1 -1
  68. fastmcp/utilities/tests.py +11 -5
  69. fastmcp/utilities/types.py +8 -0
  70. {fastmcp-2.13.3.dist-info → fastmcp-2.14.1.dist-info}/METADATA +7 -4
  71. {fastmcp-2.13.3.dist-info → fastmcp-2.14.1.dist-info}/RECORD +79 -63
  72. fastmcp/client/sampling.py +0 -56
  73. fastmcp/experimental/sampling/handlers/base.py +0 -21
  74. fastmcp/server/auth/providers/bearer.py +0 -25
  75. fastmcp/server/openapi.py +0 -1087
  76. fastmcp/server/sampling/handler.py +0 -19
  77. fastmcp/utilities/openapi.py +0 -1568
  78. /fastmcp/{experimental/server → server}/openapi/README.md +0 -0
  79. /fastmcp/{experimental/utilities → utilities}/openapi/director.py +0 -0
  80. /fastmcp/{experimental/utilities → utilities}/openapi/models.py +0 -0
  81. /fastmcp/{experimental/utilities → utilities}/openapi/parser.py +0 -0
  82. /fastmcp/{experimental/utilities → utilities}/openapi/schemas.py +0 -0
  83. {fastmcp-2.13.3.dist-info → fastmcp-2.14.1.dist-info}/WHEEL +0 -0
  84. {fastmcp-2.13.3.dist-info → fastmcp-2.14.1.dist-info}/entry_points.txt +0 -0
  85. {fastmcp-2.13.3.dist-info → fastmcp-2.14.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,399 @@
1
+ """OpenAI sampling handler for FastMCP."""
2
+
3
+ import json
4
+ from collections.abc import Iterator, Sequence
5
+ from typing import Any, get_args
6
+
7
+ from mcp import ClientSession, ServerSession
8
+ from mcp.shared.context import LifespanContextT, RequestContext
9
+ from mcp.types import CreateMessageRequestParams as SamplingParams
10
+ from mcp.types import (
11
+ CreateMessageResult,
12
+ CreateMessageResultWithTools,
13
+ ModelPreferences,
14
+ SamplingMessage,
15
+ StopReason,
16
+ TextContent,
17
+ Tool,
18
+ ToolChoice,
19
+ ToolResultContent,
20
+ ToolUseContent,
21
+ )
22
+
23
+ try:
24
+ from openai import NOT_GIVEN, AsyncOpenAI, NotGiven
25
+ from openai.types.chat import (
26
+ ChatCompletion,
27
+ ChatCompletionAssistantMessageParam,
28
+ ChatCompletionMessageParam,
29
+ ChatCompletionMessageToolCallParam,
30
+ ChatCompletionSystemMessageParam,
31
+ ChatCompletionToolChoiceOptionParam,
32
+ ChatCompletionToolMessageParam,
33
+ ChatCompletionToolParam,
34
+ ChatCompletionUserMessageParam,
35
+ )
36
+ from openai.types.shared.chat_model import ChatModel
37
+ from openai.types.shared_params import FunctionDefinition
38
+ except ImportError as e:
39
+ raise ImportError(
40
+ "The `openai` package is not installed. "
41
+ "Please install `fastmcp[openai]` or add `openai` to your dependencies manually."
42
+ ) from e
43
+
44
+
45
+ class OpenAISamplingHandler:
46
+ """Sampling handler that uses the OpenAI API."""
47
+
48
+ def __init__(
49
+ self,
50
+ default_model: ChatModel,
51
+ client: AsyncOpenAI | None = None,
52
+ ) -> None:
53
+ self.client: AsyncOpenAI = client or AsyncOpenAI()
54
+ self.default_model: ChatModel = default_model
55
+
56
+ async def __call__(
57
+ self,
58
+ messages: list[SamplingMessage],
59
+ params: SamplingParams,
60
+ context: RequestContext[ServerSession, LifespanContextT]
61
+ | RequestContext[ClientSession, LifespanContextT],
62
+ ) -> CreateMessageResult | CreateMessageResultWithTools:
63
+ openai_messages: list[ChatCompletionMessageParam] = (
64
+ self._convert_to_openai_messages(
65
+ system_prompt=params.systemPrompt,
66
+ messages=messages,
67
+ )
68
+ )
69
+
70
+ model: ChatModel = self._select_model_from_preferences(params.modelPreferences)
71
+
72
+ # Convert MCP tools to OpenAI format
73
+ openai_tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN
74
+ if params.tools:
75
+ openai_tools = self._convert_tools_to_openai(params.tools)
76
+
77
+ # Convert tool_choice to OpenAI format
78
+ openai_tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN
79
+ if params.toolChoice:
80
+ openai_tool_choice = self._convert_tool_choice_to_openai(params.toolChoice)
81
+
82
+ response = await self.client.chat.completions.create(
83
+ model=model,
84
+ messages=openai_messages,
85
+ temperature=(
86
+ params.temperature if params.temperature is not None else NOT_GIVEN
87
+ ),
88
+ max_tokens=params.maxTokens,
89
+ stop=params.stopSequences if params.stopSequences else NOT_GIVEN,
90
+ tools=openai_tools,
91
+ tool_choice=openai_tool_choice,
92
+ )
93
+
94
+ # Return appropriate result type based on whether tools were provided
95
+ if params.tools:
96
+ return self._chat_completion_to_result_with_tools(response)
97
+ return self._chat_completion_to_create_message_result(response)
98
+
99
+ @staticmethod
100
+ def _iter_models_from_preferences(
101
+ model_preferences: ModelPreferences | str | list[str] | None,
102
+ ) -> Iterator[str]:
103
+ if model_preferences is None:
104
+ return
105
+
106
+ if isinstance(model_preferences, str) and model_preferences in get_args(
107
+ ChatModel
108
+ ):
109
+ yield model_preferences
110
+
111
+ elif isinstance(model_preferences, list):
112
+ yield from model_preferences
113
+
114
+ elif isinstance(model_preferences, ModelPreferences):
115
+ if not (hints := model_preferences.hints):
116
+ return
117
+
118
+ for hint in hints:
119
+ if not (name := hint.name):
120
+ continue
121
+
122
+ yield name
123
+
124
+ @staticmethod
125
+ def _convert_to_openai_messages(
126
+ system_prompt: str | None, messages: Sequence[SamplingMessage]
127
+ ) -> list[ChatCompletionMessageParam]:
128
+ openai_messages: list[ChatCompletionMessageParam] = []
129
+
130
+ if system_prompt:
131
+ openai_messages.append(
132
+ ChatCompletionSystemMessageParam(
133
+ role="system",
134
+ content=system_prompt,
135
+ )
136
+ )
137
+
138
+ for message in messages:
139
+ content = message.content
140
+
141
+ # Handle list content (from CreateMessageResultWithTools)
142
+ if isinstance(content, list):
143
+ # Collect tool calls and text from the list
144
+ tool_calls: list[ChatCompletionMessageToolCallParam] = []
145
+ text_parts: list[str] = []
146
+ # Collect tool results separately to maintain correct ordering
147
+ tool_messages: list[ChatCompletionToolMessageParam] = []
148
+
149
+ for item in content:
150
+ if isinstance(item, ToolUseContent):
151
+ tool_calls.append(
152
+ ChatCompletionMessageToolCallParam(
153
+ id=item.id,
154
+ type="function",
155
+ function={
156
+ "name": item.name,
157
+ "arguments": json.dumps(item.input),
158
+ },
159
+ )
160
+ )
161
+ elif isinstance(item, TextContent):
162
+ text_parts.append(item.text)
163
+ elif isinstance(item, ToolResultContent):
164
+ # Collect tool results (added after assistant message)
165
+ content_text = ""
166
+ if item.content:
167
+ result_texts = []
168
+ for sub_item in item.content:
169
+ if isinstance(sub_item, TextContent):
170
+ result_texts.append(sub_item.text)
171
+ content_text = "\n".join(result_texts)
172
+ tool_messages.append(
173
+ ChatCompletionToolMessageParam(
174
+ role="tool",
175
+ tool_call_id=item.toolUseId,
176
+ content=content_text,
177
+ )
178
+ )
179
+
180
+ # Add assistant message with tool calls if present
181
+ # OpenAI requires: assistant (with tool_calls) -> tool messages
182
+ if tool_calls or text_parts:
183
+ msg_content = "\n".join(text_parts) if text_parts else None
184
+ if tool_calls:
185
+ openai_messages.append(
186
+ ChatCompletionAssistantMessageParam(
187
+ role="assistant",
188
+ content=msg_content,
189
+ tool_calls=tool_calls,
190
+ )
191
+ )
192
+ # Add tool messages AFTER assistant message
193
+ openai_messages.extend(tool_messages)
194
+ elif msg_content:
195
+ if message.role == "user":
196
+ openai_messages.append(
197
+ ChatCompletionUserMessageParam(
198
+ role="user",
199
+ content=msg_content,
200
+ )
201
+ )
202
+ else:
203
+ openai_messages.append(
204
+ ChatCompletionAssistantMessageParam(
205
+ role="assistant",
206
+ content=msg_content,
207
+ )
208
+ )
209
+ elif tool_messages:
210
+ # Tool results only (assistant message was in previous message)
211
+ openai_messages.extend(tool_messages)
212
+ continue
213
+
214
+ # Handle ToolUseContent (assistant's tool calls)
215
+ if isinstance(content, ToolUseContent):
216
+ openai_messages.append(
217
+ ChatCompletionAssistantMessageParam(
218
+ role="assistant",
219
+ tool_calls=[
220
+ ChatCompletionMessageToolCallParam(
221
+ id=content.id,
222
+ type="function",
223
+ function={
224
+ "name": content.name,
225
+ "arguments": json.dumps(content.input),
226
+ },
227
+ )
228
+ ],
229
+ )
230
+ )
231
+ continue
232
+
233
+ # Handle ToolResultContent (user's tool results)
234
+ if isinstance(content, ToolResultContent):
235
+ # Extract text parts from the content list
236
+ result_texts: list[str] = []
237
+ if content.content:
238
+ for item in content.content:
239
+ if isinstance(item, TextContent):
240
+ result_texts.append(item.text)
241
+ openai_messages.append(
242
+ ChatCompletionToolMessageParam(
243
+ role="tool",
244
+ tool_call_id=content.toolUseId,
245
+ content="\n".join(result_texts),
246
+ )
247
+ )
248
+ continue
249
+
250
+ # Handle TextContent
251
+ if isinstance(content, TextContent):
252
+ if message.role == "user":
253
+ openai_messages.append(
254
+ ChatCompletionUserMessageParam(
255
+ role="user",
256
+ content=content.text,
257
+ )
258
+ )
259
+ else:
260
+ openai_messages.append(
261
+ ChatCompletionAssistantMessageParam(
262
+ role="assistant",
263
+ content=content.text,
264
+ )
265
+ )
266
+ continue
267
+
268
+ raise ValueError(f"Unsupported content type: {type(content)}")
269
+
270
+ return openai_messages
271
+
272
+ @staticmethod
273
+ def _chat_completion_to_create_message_result(
274
+ chat_completion: ChatCompletion,
275
+ ) -> CreateMessageResult:
276
+ if len(chat_completion.choices) == 0:
277
+ raise ValueError("No response for completion")
278
+
279
+ first_choice = chat_completion.choices[0]
280
+
281
+ if content := first_choice.message.content:
282
+ return CreateMessageResult(
283
+ content=TextContent(type="text", text=content),
284
+ role="assistant",
285
+ model=chat_completion.model,
286
+ )
287
+
288
+ raise ValueError("No content in response from completion")
289
+
290
+ def _select_model_from_preferences(
291
+ self, model_preferences: ModelPreferences | str | list[str] | None
292
+ ) -> ChatModel:
293
+ for model_option in self._iter_models_from_preferences(model_preferences):
294
+ if model_option in get_args(ChatModel):
295
+ chosen_model: ChatModel = model_option # type: ignore[assignment]
296
+ return chosen_model
297
+
298
+ return self.default_model
299
+
300
+ @staticmethod
301
+ def _convert_tools_to_openai(tools: list[Tool]) -> list[ChatCompletionToolParam]:
302
+ """Convert MCP tools to OpenAI tool format."""
303
+ openai_tools: list[ChatCompletionToolParam] = []
304
+ for tool in tools:
305
+ # Build parameters dict, ensuring required fields
306
+ parameters: dict[str, Any] = dict(tool.inputSchema)
307
+ if "type" not in parameters:
308
+ parameters["type"] = "object"
309
+
310
+ openai_tools.append(
311
+ ChatCompletionToolParam(
312
+ type="function",
313
+ function=FunctionDefinition(
314
+ name=tool.name,
315
+ description=tool.description or "",
316
+ parameters=parameters,
317
+ ),
318
+ )
319
+ )
320
+ return openai_tools
321
+
322
+ @staticmethod
323
+ def _convert_tool_choice_to_openai(
324
+ tool_choice: ToolChoice,
325
+ ) -> ChatCompletionToolChoiceOptionParam:
326
+ """Convert MCP tool_choice to OpenAI format."""
327
+ if tool_choice.mode == "auto":
328
+ return "auto"
329
+ elif tool_choice.mode == "required":
330
+ return "required"
331
+ elif tool_choice.mode == "none":
332
+ return "none"
333
+ else:
334
+ raise ValueError(f"Unsupported tool_choice mode: {tool_choice.mode!r}")
335
+
336
+ @staticmethod
337
+ def _chat_completion_to_result_with_tools(
338
+ chat_completion: ChatCompletion,
339
+ ) -> CreateMessageResultWithTools:
340
+ """Convert OpenAI response to CreateMessageResultWithTools."""
341
+ if len(chat_completion.choices) == 0:
342
+ raise ValueError("No response for completion")
343
+
344
+ first_choice = chat_completion.choices[0]
345
+ message = first_choice.message
346
+
347
+ # Determine stop reason
348
+ stop_reason: StopReason
349
+ if first_choice.finish_reason == "tool_calls":
350
+ stop_reason = "toolUse"
351
+ elif first_choice.finish_reason == "stop":
352
+ stop_reason = "endTurn"
353
+ elif first_choice.finish_reason == "length":
354
+ stop_reason = "maxTokens"
355
+ else:
356
+ stop_reason = "endTurn"
357
+
358
+ # Build content list
359
+ content: list[TextContent | ToolUseContent] = []
360
+
361
+ # Add text content if present
362
+ if message.content:
363
+ content.append(TextContent(type="text", text=message.content))
364
+
365
+ # Add tool calls if present
366
+ if message.tool_calls:
367
+ for tool_call in message.tool_calls:
368
+ # Skip non-function tool calls
369
+ if not hasattr(tool_call, "function"):
370
+ continue
371
+ func = tool_call.function # type: ignore[union-attr]
372
+ # Parse the arguments JSON string
373
+ try:
374
+ arguments = json.loads(func.arguments) # type: ignore[union-attr]
375
+ except json.JSONDecodeError as e:
376
+ raise ValueError(
377
+ f"Invalid JSON in tool arguments for "
378
+ f"'{func.name}': {func.arguments}" # type: ignore[union-attr]
379
+ ) from e
380
+
381
+ content.append(
382
+ ToolUseContent(
383
+ type="tool_use",
384
+ id=tool_call.id,
385
+ name=func.name, # type: ignore[union-attr]
386
+ input=arguments,
387
+ )
388
+ )
389
+
390
+ # Must have at least some content
391
+ if not content:
392
+ raise ValueError("No content in response from completion")
393
+
394
+ return CreateMessageResultWithTools(
395
+ content=content, # type: ignore[arg-type]
396
+ role="assistant",
397
+ model=chat_completion.model,
398
+ stopReason=stop_reason,
399
+ )