golf-mcp 0.2.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. golf/__init__.py +1 -0
  2. golf/auth/__init__.py +277 -0
  3. golf/auth/api_key.py +73 -0
  4. golf/auth/factory.py +360 -0
  5. golf/auth/helpers.py +175 -0
  6. golf/auth/providers.py +586 -0
  7. golf/auth/registry.py +256 -0
  8. golf/cli/__init__.py +1 -0
  9. golf/cli/branding.py +191 -0
  10. golf/cli/main.py +377 -0
  11. golf/commands/__init__.py +5 -0
  12. golf/commands/build.py +81 -0
  13. golf/commands/init.py +290 -0
  14. golf/commands/run.py +137 -0
  15. golf/core/__init__.py +1 -0
  16. golf/core/builder.py +1884 -0
  17. golf/core/builder_auth.py +209 -0
  18. golf/core/builder_metrics.py +221 -0
  19. golf/core/builder_telemetry.py +99 -0
  20. golf/core/config.py +199 -0
  21. golf/core/parser.py +1085 -0
  22. golf/core/telemetry.py +492 -0
  23. golf/core/transformer.py +231 -0
  24. golf/examples/__init__.py +0 -0
  25. golf/examples/basic/.env.example +4 -0
  26. golf/examples/basic/README.md +133 -0
  27. golf/examples/basic/auth.py +76 -0
  28. golf/examples/basic/golf.json +5 -0
  29. golf/examples/basic/prompts/welcome.py +27 -0
  30. golf/examples/basic/resources/current_time.py +34 -0
  31. golf/examples/basic/resources/info.py +28 -0
  32. golf/examples/basic/resources/weather/city.py +46 -0
  33. golf/examples/basic/resources/weather/client.py +48 -0
  34. golf/examples/basic/resources/weather/current.py +36 -0
  35. golf/examples/basic/resources/weather/forecast.py +36 -0
  36. golf/examples/basic/tools/calculator.py +94 -0
  37. golf/examples/basic/tools/say/hello.py +65 -0
  38. golf/metrics/__init__.py +10 -0
  39. golf/metrics/collector.py +320 -0
  40. golf/metrics/registry.py +12 -0
  41. golf/telemetry/__init__.py +23 -0
  42. golf/telemetry/instrumentation.py +1402 -0
  43. golf/utilities/__init__.py +12 -0
  44. golf/utilities/context.py +53 -0
  45. golf/utilities/elicitation.py +170 -0
  46. golf/utilities/sampling.py +221 -0
  47. golf_mcp-0.2.16.dist-info/METADATA +262 -0
  48. golf_mcp-0.2.16.dist-info/RECORD +52 -0
  49. golf_mcp-0.2.16.dist-info/WHEEL +5 -0
  50. golf_mcp-0.2.16.dist-info/entry_points.txt +2 -0
  51. golf_mcp-0.2.16.dist-info/licenses/LICENSE +201 -0
  52. golf_mcp-0.2.16.dist-info/top_level.txt +1 -0
@@ -0,0 +1,12 @@
1
+ """Golf utilities for enhanced MCP tool development.
2
+
3
+ This module provides convenient utilities for Golf tool authors to access
4
+ advanced MCP features like elicitation and sampling without needing to
5
+ manage FastMCP Context objects directly.
6
+ """
7
+
8
+ from .elicitation import elicit, elicit_confirmation
9
+ from .sampling import sample, sample_structured, sample_with_context
10
+ from .context import get_current_context
11
+
12
+ __all__ = ["elicit", "elicit_confirmation", "sample", "sample_structured", "sample_with_context", "get_current_context"]
@@ -0,0 +1,53 @@
1
+ """Context utilities for Golf MCP tools.
2
+
3
+ This module provides utilities to access the current FastMCP Context
4
+ from within Golf tool functions.
5
+ """
6
+
7
+ from typing import TYPE_CHECKING
8
+
9
+ if TYPE_CHECKING:
10
+ from fastmcp.server.context import Context
11
+
12
+
13
+ def get_current_context() -> "Context":
14
+ """Get the current FastMCP Context.
15
+
16
+ This function retrieves the current FastMCP Context that was injected
17
+ into the tool function. It works by importing the FastMCP context
18
+ utilities at runtime.
19
+
20
+ Returns:
21
+ The current FastMCP Context instance
22
+
23
+ Raises:
24
+ RuntimeError: If called outside of an MCP request context
25
+ ImportError: If FastMCP is not available
26
+
27
+ Example:
28
+ ```python
29
+ from golf.utilities import get_current_context
30
+
31
+ async def my_tool(data: str):
32
+ ctx = get_current_context()
33
+ await ctx.info(f"Processing: {data}")
34
+ return "done"
35
+ ```
36
+ """
37
+ try:
38
+ # Import FastMCP context utilities at runtime
39
+ from fastmcp.server.context import _current_context
40
+
41
+ # Get the current context from the context variable
42
+ context = _current_context.get(None)
43
+
44
+ if context is None:
45
+ raise RuntimeError(
46
+ "No FastMCP Context available. This function must be called "
47
+ "from within an MCP tool function that has context injection enabled."
48
+ )
49
+
50
+ return context
51
+
52
+ except ImportError as e:
53
+ raise ImportError("FastMCP is not available. Please ensure fastmcp>=2.11.0 is installed.") from e
@@ -0,0 +1,170 @@
1
+ """Elicitation utilities for Golf MCP tools.
2
+
3
+ This module provides simplified elicitation functions that Golf tool authors
4
+ can use without needing to manage FastMCP Context objects directly.
5
+ """
6
+
7
+ from typing import Any, TypeVar, overload
8
+ from collections.abc import Callable
9
+
10
+ from .context import get_current_context
11
+
12
+ T = TypeVar("T")
13
+
14
+ # Apply telemetry instrumentation if available
15
+ try:
16
+ from golf.telemetry import instrument_elicitation
17
+
18
+ _instrumentation_available = True
19
+ except ImportError:
20
+ _instrumentation_available = False
21
+
22
+ def instrument_elicitation(func: Callable, elicitation_type: str = "elicit") -> Callable:
23
+ """No-op instrumentation when telemetry is not available."""
24
+ return func
25
+
26
+
27
+ @overload
28
+ async def elicit(
29
+ message: str,
30
+ response_type: None = None,
31
+ ) -> dict[str, Any]:
32
+ """Elicit with no response type returns empty dict."""
33
+ ...
34
+
35
+
36
+ @overload
37
+ async def elicit(
38
+ message: str,
39
+ response_type: type[T],
40
+ ) -> T:
41
+ """Elicit with response type returns typed data."""
42
+ ...
43
+
44
+
45
+ @overload
46
+ async def elicit(
47
+ message: str,
48
+ response_type: list[str],
49
+ ) -> str:
50
+ """Elicit with list of options returns selected string."""
51
+ ...
52
+
53
+
54
+ async def elicit(
55
+ message: str,
56
+ response_type: type[T] | list[str] | None = None,
57
+ ) -> T | dict[str, Any] | str:
58
+ """Request additional information from the user via MCP elicitation.
59
+
60
+ This is a simplified wrapper around FastMCP's Context.elicit() method
61
+ that automatically handles context retrieval and response processing.
62
+
63
+ Args:
64
+ message: Human-readable message explaining what information is needed
65
+ response_type: The type of response expected:
66
+ - None: Returns empty dict (for confirmation prompts)
67
+ - type[T]: Returns validated instance of T (BaseModel, dataclass, etc.)
68
+ - list[str]: Returns selected string from the options
69
+
70
+ Returns:
71
+ The user's response in the requested format
72
+
73
+ Raises:
74
+ RuntimeError: If called outside MCP context or user declines/cancels
75
+ ValueError: If response validation fails
76
+
77
+ Examples:
78
+ ```python
79
+ from golf.utilities import elicit
80
+ from pydantic import BaseModel
81
+
82
+ class UserInfo(BaseModel):
83
+ name: str
84
+ email: str
85
+
86
+ async def collect_user_info():
87
+ # Structured elicitation
88
+ info = await elicit("Please provide your details:", UserInfo)
89
+
90
+ # Simple text elicitation
91
+ reason = await elicit("Why do you need this?", str)
92
+
93
+ # Multiple choice elicitation
94
+ priority = await elicit("Select priority:", ["low", "medium", "high"])
95
+
96
+ # Confirmation elicitation
97
+ await elicit("Proceed with the action?")
98
+
99
+ return f"User {info.name} requested {reason} with {priority} priority"
100
+ ```
101
+ """
102
+ try:
103
+ # Get the current FastMCP context
104
+ ctx = get_current_context()
105
+
106
+ # Call the context's elicit method
107
+ result = await ctx.elicit(message, response_type)
108
+
109
+ # Handle the response based on the action
110
+ if hasattr(result, "action"):
111
+ if result.action == "accept":
112
+ return result.data
113
+ elif result.action == "decline":
114
+ raise RuntimeError(f"User declined the elicitation request: {message}")
115
+ elif result.action == "cancel":
116
+ raise RuntimeError(f"User cancelled the elicitation request: {message}")
117
+ else:
118
+ raise RuntimeError(f"Unexpected elicitation response: {result.action}")
119
+ else:
120
+ # Direct response (shouldn't happen with current FastMCP)
121
+ return result
122
+
123
+ except Exception as e:
124
+ if isinstance(e, RuntimeError):
125
+ raise # Re-raise our custom errors
126
+ raise RuntimeError(f"Elicitation failed: {str(e)}") from e
127
+
128
+
129
+ async def elicit_confirmation(message: str) -> bool:
130
+ """Request a simple yes/no confirmation from the user.
131
+
132
+ This is a convenience function for common confirmation prompts.
133
+
134
+ Args:
135
+ message: The confirmation message to show the user
136
+
137
+ Returns:
138
+ True if user confirmed, False if declined
139
+
140
+ Raises:
141
+ RuntimeError: If user cancels or other error occurs
142
+
143
+ Example:
144
+ ```python
145
+ from golf.utilities import elicit_confirmation
146
+
147
+ async def delete_file(filename: str):
148
+ confirmed = await elicit_confirmation(
149
+ f"Are you sure you want to delete {filename}?"
150
+ )
151
+ if confirmed:
152
+ # Proceed with deletion
153
+ return f"Deleted {filename}"
154
+ else:
155
+ return "Deletion cancelled"
156
+ ```
157
+ """
158
+ try:
159
+ # Use elicitation with boolean choice
160
+ choice = await elicit(message, ["yes", "no"])
161
+ return choice.lower() == "yes"
162
+ except RuntimeError as e:
163
+ if "declined" in str(e):
164
+ return False
165
+ raise # Re-raise cancellation or other errors
166
+
167
+
168
+ # Apply instrumentation to all elicitation functions
169
+ elicit = instrument_elicitation(elicit, "elicit")
170
+ elicit_confirmation = instrument_elicitation(elicit_confirmation, "confirmation")
@@ -0,0 +1,221 @@
1
+ """Sampling utilities for Golf MCP tools.
2
+
3
+ This module provides simplified LLM sampling functions that Golf tool authors
4
+ can use without needing to manage FastMCP Context objects directly.
5
+ """
6
+
7
+ from typing import Any
8
+ from collections.abc import Callable
9
+
10
+ from .context import get_current_context
11
+
12
+ # Apply telemetry instrumentation if available
13
+ try:
14
+ from golf.telemetry import instrument_sampling
15
+
16
+ _instrumentation_available = True
17
+ except ImportError:
18
+ _instrumentation_available = False
19
+
20
+ def instrument_sampling(func: Callable, sampling_type: str = "sample") -> Callable:
21
+ """No-op instrumentation when telemetry is not available."""
22
+ return func
23
+
24
+
25
+ async def sample(
26
+ messages: str | list[str],
27
+ system_prompt: str | None = None,
28
+ temperature: float | None = None,
29
+ max_tokens: int | None = None,
30
+ model_preferences: str | list[str] | None = None,
31
+ ) -> str:
32
+ """Request an LLM completion from the MCP client.
33
+
34
+ This is a simplified wrapper around FastMCP's Context.sample() method
35
+ that automatically handles context retrieval and response processing.
36
+
37
+ Args:
38
+ messages: The message(s) to send to the LLM:
39
+ - str: Single user message
40
+ - list[str]: Multiple user messages
41
+ system_prompt: Optional system prompt to guide the LLM
42
+ temperature: Optional temperature for sampling (0.0 to 1.0)
43
+ max_tokens: Optional maximum tokens to generate (default: 512)
44
+ model_preferences: Optional model preferences:
45
+ - str: Single model name hint
46
+ - list[str]: Multiple model name hints in preference order
47
+
48
+ Returns:
49
+ The LLM's response as a string
50
+
51
+ Raises:
52
+ RuntimeError: If called outside MCP context or sampling fails
53
+ ValueError: If parameters are invalid
54
+
55
+ Examples:
56
+ ```python
57
+ from golf.utilities import sample
58
+
59
+ async def analyze_data(data: str):
60
+ # Simple completion
61
+ analysis = await sample(f"Analyze this data: {data}")
62
+
63
+ # With system prompt and temperature
64
+ creative_response = await sample(
65
+ "Write a creative story about this data",
66
+ system_prompt="You are a creative writer",
67
+ temperature=0.8,
68
+ max_tokens=1000
69
+ )
70
+
71
+ # With model preferences
72
+ technical_analysis = await sample(
73
+ f"Provide technical analysis: {data}",
74
+ model_preferences=["gpt-4", "claude-3-sonnet"]
75
+ )
76
+
77
+ return {
78
+ "analysis": analysis,
79
+ "creative": creative_response,
80
+ "technical": technical_analysis
81
+ }
82
+ ```
83
+ """
84
+ try:
85
+ # Get the current FastMCP context
86
+ ctx = get_current_context()
87
+
88
+ # Call the context's sample method
89
+ result = await ctx.sample(
90
+ messages=messages,
91
+ system_prompt=system_prompt,
92
+ temperature=temperature,
93
+ max_tokens=max_tokens,
94
+ model_preferences=model_preferences,
95
+ )
96
+
97
+ # Extract text content from the ContentBlock response
98
+ if hasattr(result, "text"):
99
+ return result.text
100
+ elif hasattr(result, "content"):
101
+ # Handle different content block types
102
+ if isinstance(result.content, str):
103
+ return result.content
104
+ elif hasattr(result.content, "text"):
105
+ return result.content.text
106
+ else:
107
+ return str(result.content)
108
+ else:
109
+ return str(result)
110
+
111
+ except Exception as e:
112
+ raise RuntimeError(f"LLM sampling failed: {str(e)}") from e
113
+
114
+
115
+ async def sample_structured(
116
+ messages: str | list[str],
117
+ format_instructions: str,
118
+ system_prompt: str | None = None,
119
+ temperature: float = 0.1,
120
+ max_tokens: int | None = None,
121
+ ) -> str:
122
+ """Request a structured LLM completion with specific formatting.
123
+
124
+ This is a convenience function for requesting structured responses
125
+ like JSON, XML, or other formatted output.
126
+
127
+ Args:
128
+ messages: The message(s) to send to the LLM
129
+ format_instructions: Instructions for the desired output format
130
+ system_prompt: Optional system prompt
131
+ temperature: Temperature for sampling (default: 0.1 for consistency)
132
+ max_tokens: Optional maximum tokens to generate
133
+
134
+ Returns:
135
+ The structured LLM response as a string
136
+
137
+ Example:
138
+ ```python
139
+ from golf.utilities import sample_structured
140
+
141
+ async def extract_entities(text: str):
142
+ entities = await sample_structured(
143
+ f"Extract entities from: {text}",
144
+ format_instructions="Return as JSON with keys: persons, "
145
+ "organizations, locations",
146
+ system_prompt="You are an expert at named entity recognition"
147
+ )
148
+ return entities
149
+ ```
150
+ """
151
+ # Combine the format instructions with the messages
152
+ if isinstance(messages, str):
153
+ formatted_message = f"{messages}\n\n{format_instructions}"
154
+ else:
155
+ formatted_message = messages + [format_instructions]
156
+
157
+ return await sample(
158
+ messages=formatted_message,
159
+ system_prompt=system_prompt,
160
+ temperature=temperature,
161
+ max_tokens=max_tokens,
162
+ )
163
+
164
+
165
+ async def sample_with_context(
166
+ messages: str | list[str],
167
+ context_data: dict[str, Any],
168
+ system_prompt: str | None = None,
169
+ **kwargs: Any,
170
+ ) -> str:
171
+ """Request an LLM completion with additional context data.
172
+
173
+ This convenience function formats context data and includes it
174
+ in the sampling request.
175
+
176
+ Args:
177
+ messages: The message(s) to send to the LLM
178
+ context_data: Dictionary of context data to include
179
+ system_prompt: Optional system prompt
180
+ **kwargs: Additional arguments passed to sample()
181
+
182
+ Returns:
183
+ The LLM response as a string
184
+
185
+ Example:
186
+ ```python
187
+ from golf.utilities import sample_with_context
188
+
189
+ async def generate_report(topic: str, user_data: dict):
190
+ report = await sample_with_context(
191
+ f"Generate a report about {topic}",
192
+ context_data={
193
+ "user_preferences": user_data,
194
+ "timestamp": "2024-01-01",
195
+ "format": "markdown"
196
+ },
197
+ system_prompt="You are a professional report writer"
198
+ )
199
+ return report
200
+ ```
201
+ """
202
+ # Format context data as a readable string
203
+ context_str = "\n".join([f"{k}: {v}" for k, v in context_data.items()])
204
+
205
+ # Add context to the message
206
+ if isinstance(messages, str):
207
+ contextual_message = f"{messages}\n\nContext:\n{context_str}"
208
+ else:
209
+ contextual_message = messages + [f"Context:\n{context_str}"]
210
+
211
+ return await sample(
212
+ messages=contextual_message,
213
+ system_prompt=system_prompt,
214
+ **kwargs,
215
+ )
216
+
217
+
218
+ # Apply instrumentation to all sampling functions
219
+ sample = instrument_sampling(sample, "sample")
220
+ sample_structured = instrument_sampling(sample_structured, "structured")
221
+ sample_with_context = instrument_sampling(sample_with_context, "context")