arcade-core 2.4.0__tar.gz → 2.5.0rc2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/.gitignore +3 -0
  2. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/PKG-INFO +1 -4
  3. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/catalog.py +20 -9
  4. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/config_model.py +10 -5
  5. arcade_core-2.5.0rc2/arcade_core/context.py +128 -0
  6. arcade_core-2.5.0rc2/arcade_core/converters/openai.py +220 -0
  7. arcade_core-2.5.0rc2/arcade_core/discovery.py +253 -0
  8. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/parse.py +12 -0
  9. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/schema.py +56 -17
  10. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/toolkit.py +74 -3
  11. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/utils.py +4 -1
  12. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/pyproject.toml +1 -4
  13. arcade_core-2.4.0/arcade_core/telemetry.py +0 -130
  14. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/README.md +0 -0
  15. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/__init__.py +0 -0
  16. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/annotations.py +0 -0
  17. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/auth.py +0 -0
  18. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/config.py +0 -0
  19. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/errors.py +0 -0
  20. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/executor.py +0 -0
  21. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/output.py +0 -0
  22. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/py.typed +0 -0
  23. {arcade_core-2.4.0 → arcade_core-2.5.0rc2}/arcade_core/version.py +0 -0
@@ -173,3 +173,6 @@ cython_debug/
173
173
  # and can be added to the global gitignore or merged into this file. For a more nuclear
174
174
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
175
175
  #.idea/
176
+
177
+ # Docs
178
+ libs/arcade-mcp-server/site/*
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arcade-core
3
- Version: 2.4.0
3
+ Version: 2.5.0rc2
4
4
  Summary: Arcade Core - Core library for Arcade platform
5
5
  Author-email: Arcade <dev@arcade.dev>
6
6
  License: MIT
@@ -14,9 +14,6 @@ Classifier: Programming Language :: Python :: 3.12
14
14
  Classifier: Programming Language :: Python :: 3.13
15
15
  Requires-Python: >=3.10
16
16
  Requires-Dist: loguru>=0.7.0
17
- Requires-Dist: opentelemetry-exporter-otlp-proto-common==1.28.2
18
- Requires-Dist: opentelemetry-exporter-otlp-proto-http==1.28.2
19
- Requires-Dist: opentelemetry-instrumentation-fastapi==0.49b2
20
17
  Requires-Dist: packaging>=24.1
21
18
  Requires-Dist: pydantic>=2.7.0
22
19
  Requires-Dist: pyjwt>=2.8.0
@@ -405,7 +405,9 @@ class ToolCatalog(BaseModel):
405
405
  # Hard requirement: tools must have descriptions
406
406
  tool_description = getattr(tool, "__tool_description__", None)
407
407
  if not tool_description:
408
- raise ToolDefinitionError(f"Tool '{raw_tool_name}' is missing a description")
408
+ raise ToolDefinitionError(
409
+ f"Tool '{raw_tool_name}' is missing a description. Tool descriptions are specified as docstrings for the tool function."
410
+ )
409
411
 
410
412
  # If the function returns a value, it must have a type annotation
411
413
  if does_function_return_value(tool) and tool.__annotations__.get("return") is None:
@@ -449,7 +451,9 @@ def create_input_definition(func: Callable) -> ToolInput:
449
451
  tool_context_param_name: str | None = None
450
452
 
451
453
  for _, param in inspect.signature(func, follow_wrapped=True).parameters.items():
452
- if param.annotation is ToolContext:
454
+ ann = param.annotation
455
+ if isinstance(ann, type) and issubclass(ann, ToolContext):
456
+ # Soft guidance for developers using legacy ToolContext
453
457
  if tool_context_param_name is not None:
454
458
  raise ToolInputSchemaError(
455
459
  f"Only one ToolContext parameter is supported, but tool {func.__name__} has multiple."
@@ -690,7 +694,9 @@ def extract_field_info(param: inspect.Parameter) -> ToolParamInfo:
690
694
 
691
695
  # Final reality check
692
696
  if param_info.description is None:
693
- raise ToolInputSchemaError(f"Parameter '{param_info.name}' is missing a description")
697
+ raise ToolInputSchemaError(
698
+ f"Parameter '{param_info.name}' is missing a description. Parameter descriptions are specified as string annotations using the typing.Annotated class."
699
+ )
694
700
 
695
701
  if wire_type_info.wire_type is None:
696
702
  raise ToolInputSchemaError(f"Unknown parameter type: {param_info.field_type}")
@@ -983,8 +989,9 @@ def create_func_models(func: Callable) -> tuple[type[BaseModel], type[BaseModel]
983
989
  if asyncio.iscoroutinefunction(func) and hasattr(func, "__wrapped__"):
984
990
  func = func.__wrapped__
985
991
  for name, param in inspect.signature(func, follow_wrapped=True).parameters.items():
986
- # Skip ToolContext parameters
987
- if param.annotation is ToolContext:
992
+ # Skip ToolContext parameters (including subclasses like arcade_mcp_server.Context)
993
+ ann = param.annotation
994
+ if isinstance(ann, type) and issubclass(ann, ToolContext):
988
995
  continue
989
996
 
990
997
  # TODO make this cleaner
@@ -1004,7 +1011,7 @@ def create_func_models(func: Callable) -> tuple[type[BaseModel], type[BaseModel]
1004
1011
  return input_model, output_model
1005
1012
 
1006
1013
 
1007
- def determine_output_model(func: Callable) -> type[BaseModel]: # noqa: C901
1014
+ def determine_output_model(func: Callable) -> type[BaseModel]:
1008
1015
  """
1009
1016
  Determine the output model for a function based on its return annotation.
1010
1017
  """
@@ -1149,9 +1156,13 @@ def create_model_from_typeddict(typeddict_class: type, model_name: str) -> type[
1149
1156
  def to_tool_secret_requirements(
1150
1157
  secrets_requirement: list[str],
1151
1158
  ) -> list[ToolSecretRequirement]:
1152
- # Iterate through the list, de-dupe case-insensitively, and convert each string to a ToolSecretRequirement
1153
- unique_secrets = {name.lower(): name.lower() for name in secrets_requirement}.values()
1154
- return [ToolSecretRequirement(key=name) for name in unique_secrets]
1159
+ # De-dupe case-insensitively but preserve the original casing for env var lookup
1160
+ unique_map: dict[str, str] = {}
1161
+ for name in secrets_requirement:
1162
+ lowered = str(name).lower()
1163
+ if lowered not in unique_map:
1164
+ unique_map[lowered] = str(name)
1165
+ return [ToolSecretRequirement(key=orig_name) for orig_name in unique_map.values()]
1155
1166
 
1156
1167
 
1157
1168
  def to_tool_metadata_requirements(
@@ -99,19 +99,24 @@ class Config(BaseConfig):
99
99
  config_file_path = cls.get_config_file_path()
100
100
 
101
101
  if not config_file_path.exists():
102
- # Create a file using the default configuration
103
- default_config = cls.model_construct(api=ApiConfig.model_construct())
104
- default_config.save_to_file()
102
+ raise FileNotFoundError(
103
+ f"Configuration file not found at {config_file_path}. "
104
+ "Please run 'arcade login' to create your configuration."
105
+ )
105
106
 
106
107
  config_data = yaml.safe_load(config_file_path.read_text())
107
108
 
108
109
  if config_data is None:
109
110
  raise ValueError(
110
- "Invalid credentials.yaml file. Please ensure it is a valid YAML file."
111
+ "Invalid credentials.yaml file. Please ensure it is a valid YAML file or"
112
+ "run `arcade logout`, then `arcade login` to start from a clean slate."
111
113
  )
112
114
 
113
115
  if "cloud" not in config_data:
114
- raise ValueError("Invalid credentials.yaml file. Expected a 'cloud' key.")
116
+ raise ValueError(
117
+ "Invalid credentials.yaml file. Expected a 'cloud' key."
118
+ "Run `arcade logout`, then `arcade login` to start from a clean slate."
119
+ )
115
120
 
116
121
  try:
117
122
  return cls(**config_data["cloud"])
@@ -0,0 +1,128 @@
1
+ """
2
+ Arcade Core Runtime Context Protocols
3
+
4
+ Defines the developer-facing, transport-agnostic runtime context interfaces
5
+ (namespaced APIs: logs, progress, resources, tools, prompts, sampling, UI,
6
+ notifications) and the top-level ModelContext Protocol that aggregates them.
7
+
8
+ Implementations live in runtime packages (e.g., arcade_mcp_server); tool authors should
9
+ use `arcade_mcp_server.Context` for concrete usage.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from typing import Any, Protocol, runtime_checkable
15
+
16
+ from pydantic import BaseModel
17
+
18
+
19
+ class LogsContext(Protocol):
20
+ async def debug(self, message: str, **kwargs: dict[str, Any]) -> None: ...
21
+
22
+ async def info(self, message: str, **kwargs: dict[str, Any]) -> None: ...
23
+
24
+ async def warning(self, message: str, **kwargs: dict[str, Any]) -> None: ...
25
+
26
+ async def error(self, message: str, **kwargs: dict[str, Any]) -> None: ...
27
+
28
+
29
+ class ProgressContext(Protocol):
30
+ async def report(
31
+ self, progress: float, total: float | None = None, message: str | None = None
32
+ ) -> None: ...
33
+
34
+
35
+ class ResourcesContext(Protocol):
36
+ async def list_(self) -> list[Any]: ...
37
+
38
+ async def get(self, uri: str) -> Any: ...
39
+
40
+ async def read(self, uri: str) -> list[Any]: ...
41
+
42
+ async def list_roots(self) -> list[Any]: ...
43
+
44
+ async def list_templates(self) -> list[Any]: ...
45
+
46
+
47
+ class ToolsContext(Protocol):
48
+ async def list_(self) -> list[Any]: ...
49
+
50
+ async def call_raw(self, name: str, params: dict[str, Any]) -> BaseModel: ...
51
+
52
+
53
+ class PromptsContext(Protocol):
54
+ async def list_(self) -> list[Any]: ...
55
+
56
+ async def get(self, name: str, arguments: dict[str, str] | None = None) -> Any: ...
57
+
58
+
59
+ class SamplingContext(Protocol):
60
+ async def create_message(
61
+ self,
62
+ messages: str | list[str | Any],
63
+ system_prompt: str | None = None,
64
+ include_context: str | None = None,
65
+ temperature: float | None = None,
66
+ max_tokens: int | None = None,
67
+ model_preferences: Any | None = None,
68
+ ) -> Any: ...
69
+
70
+
71
+ class UIContext(Protocol):
72
+ async def elicit(self, message: str, schema: dict[str, Any] | None = None) -> Any: ...
73
+
74
+
75
+ class NotificationsToolsContext(Protocol):
76
+ async def list_changed(self) -> None: ...
77
+
78
+
79
+ class NotificationsResourcesContext(Protocol):
80
+ async def list_changed(self) -> None: ...
81
+
82
+
83
+ class NotificationsPromptsContext(Protocol):
84
+ async def list_changed(self) -> None: ...
85
+
86
+
87
+ class NotificationsContext(Protocol):
88
+ @property
89
+ def tools(self) -> NotificationsToolsContext: ...
90
+
91
+ @property
92
+ def resources(self) -> NotificationsResourcesContext: ...
93
+
94
+ @property
95
+ def prompts(self) -> NotificationsPromptsContext: ...
96
+
97
+
98
+ @runtime_checkable
99
+ class ModelContext(Protocol):
100
+ @property
101
+ def log(self) -> LogsContext: ...
102
+
103
+ @property
104
+ def progress(self) -> ProgressContext: ...
105
+
106
+ @property
107
+ def resources(self) -> ResourcesContext: ...
108
+
109
+ @property
110
+ def tools(self) -> ToolsContext: ...
111
+
112
+ @property
113
+ def prompts(self) -> PromptsContext: ...
114
+
115
+ @property
116
+ def sampling(self) -> SamplingContext: ...
117
+
118
+ @property
119
+ def ui(self) -> UIContext: ...
120
+
121
+ @property
122
+ def notifications(self) -> NotificationsContext: ...
123
+
124
+ @property
125
+ def request_id(self) -> str | None: ...
126
+
127
+ @property
128
+ def session_id(self) -> str | None: ...
@@ -0,0 +1,220 @@
1
+ """Converter for converting Arcade ToolDefinition to OpenAI tool schema."""
2
+
3
+ from typing import Any, Literal, TypedDict
4
+
5
+ from arcade_core.catalog import MaterializedTool
6
+ from arcade_core.schema import InputParameter, ValueSchema
7
+
8
+ # ----------------------------------------------------------------------------
9
+ # Type definitions for JSON tool schemas used by OpenAI APIs.
10
+ # Defines the proper types for tool schemas to ensure
11
+ # compatibility with OpenAI's Responses and Chat Completions APIs.
12
+ # ----------------------------------------------------------------------------
13
+
14
+
15
+ class OpenAIFunctionParameterProperty(TypedDict, total=False):
16
+ """Type definition for a property within OpenAI function parameters schema."""
17
+
18
+ type: str | list[str]
19
+ """The JSON Schema type(s) for this property. Can be a single type or list for unions (e.g., ["string", "null"])."""
20
+
21
+ description: str
22
+ """Description of the property."""
23
+
24
+ enum: list[Any]
25
+ """Allowed values for enum properties."""
26
+
27
+ items: dict[str, Any]
28
+ """Schema for array items when type is 'array'."""
29
+
30
+ properties: dict[str, "OpenAIFunctionParameterProperty"]
31
+ """Nested properties when type is 'object'."""
32
+
33
+ required: list[str]
34
+ """Required fields for nested objects."""
35
+
36
+ additionalProperties: Literal[False]
37
+ """Must be False for strict mode compliance."""
38
+
39
+
40
+ class OpenAIFunctionParameters(TypedDict, total=False):
41
+ """Type definition for OpenAI function parameters schema."""
42
+
43
+ type: Literal["object"]
44
+ """Must be 'object' for function parameters."""
45
+
46
+ properties: dict[str, OpenAIFunctionParameterProperty]
47
+ """The properties of the function parameters."""
48
+
49
+ required: list[str]
50
+ """List of required parameter names. In strict mode, all properties should be listed here."""
51
+
52
+ additionalProperties: Literal[False]
53
+ """Must be False for strict mode compliance."""
54
+
55
+
56
+ class OpenAIFunctionSchema(TypedDict, total=False):
57
+ """Type definition for a function tool parameter matching OpenAI's API."""
58
+
59
+ name: str
60
+ """The name of the function to call."""
61
+
62
+ parameters: OpenAIFunctionParameters | None
63
+ """A JSON schema object describing the parameters of the function."""
64
+
65
+ strict: Literal[True]
66
+ """Always enforce strict parameter validation. Default `true`."""
67
+
68
+ description: str | None
69
+ """A description of the function.
70
+ Used by the model to determine whether or not to call the function.
71
+ """
72
+
73
+
74
+ class OpenAIToolSchema(TypedDict):
75
+ """
76
+ Schema for a tool definition passed to OpenAI's `tools` parameter.
77
+ A tool wraps a callable function for function-calling. Each tool
78
+ includes a type (always 'function') and a `function` payload that
79
+ specifies the callable via `OpenAIFunctionSchema`.
80
+ """
81
+
82
+ type: Literal["function"]
83
+ """The type field, always 'function'."""
84
+
85
+ function: OpenAIFunctionSchema
86
+ """The function definition."""
87
+
88
+
89
+ # Type alias for a list of openai tool schemas
90
+ OpenAIToolList = list[OpenAIToolSchema]
91
+
92
+
93
+ # ----------------------------------------------------------------------------
94
+ # Converters
95
+ # ----------------------------------------------------------------------------
96
+ def to_openai(tool: MaterializedTool) -> OpenAIToolSchema:
97
+ """Convert a MaterializedTool to OpenAI JsonToolSchema format.
98
+
99
+ Args:
100
+ tool: The MaterializedTool to convert
101
+ Returns:
102
+ The OpenAI JsonToolSchema format (what is passed to the OpenAI API)
103
+ """
104
+ name = tool.definition.fully_qualified_name.replace(".", "_")
105
+ description = tool.description
106
+ parameters_schema = _convert_input_parameters_to_json_schema(tool.definition.input.parameters)
107
+ return _create_tool_schema(name, description, parameters_schema)
108
+
109
+
110
+ def _create_tool_schema(
111
+ name: str, description: str, parameters: OpenAIFunctionParameters
112
+ ) -> OpenAIToolSchema:
113
+ """Create a properly typed tool schema.
114
+ Args:
115
+ name: The name of the function
116
+ description: Description of what the function does
117
+ parameters: JSON schema for the function parameters
118
+ strict: Whether to enforce strict validation (default: True for reliable function calls)
119
+ Returns:
120
+ A properly typed OpenAIToolSchema
121
+ """
122
+
123
+ function: OpenAIFunctionSchema = {
124
+ "name": name,
125
+ "description": description,
126
+ "parameters": parameters,
127
+ "strict": True,
128
+ }
129
+
130
+ tool: OpenAIToolSchema = {
131
+ "type": "function",
132
+ "function": function,
133
+ }
134
+
135
+ return tool
136
+
137
+
138
+ def _convert_value_schema_to_json_schema(
139
+ value_schema: ValueSchema,
140
+ ) -> OpenAIFunctionParameterProperty:
141
+ """Convert Arcade ValueSchema to JSON Schema format."""
142
+ type_mapping = {
143
+ "string": "string",
144
+ "integer": "integer",
145
+ "number": "number",
146
+ "boolean": "boolean",
147
+ "json": "object",
148
+ "array": "array",
149
+ }
150
+
151
+ schema: OpenAIFunctionParameterProperty = {"type": type_mapping[value_schema.val_type]}
152
+
153
+ if value_schema.val_type == "array" and value_schema.inner_val_type:
154
+ items_schema: dict[str, Any] = {"type": type_mapping[value_schema.inner_val_type]}
155
+
156
+ # For arrays, enum should be applied to the items, not the array itself
157
+ if value_schema.enum:
158
+ items_schema["enum"] = value_schema.enum
159
+
160
+ schema["items"] = items_schema
161
+ else:
162
+ # Handle enum for non-array types
163
+ if value_schema.enum:
164
+ schema["enum"] = value_schema.enum
165
+
166
+ # Handle object properties
167
+ if value_schema.val_type == "json" and value_schema.properties:
168
+ schema["properties"] = {
169
+ name: _convert_value_schema_to_json_schema(nested_schema)
170
+ for name, nested_schema in value_schema.properties.items()
171
+ }
172
+
173
+ return schema
174
+
175
+
176
+ def _convert_input_parameters_to_json_schema(
177
+ parameters: list[InputParameter],
178
+ ) -> OpenAIFunctionParameters:
179
+ """Convert list of InputParameter to JSON schema parameters object."""
180
+ if not parameters:
181
+ # Minimal JSON schema for a tool with no input parameters
182
+ return {
183
+ "type": "object",
184
+ "properties": {},
185
+ "additionalProperties": False,
186
+ }
187
+
188
+ properties = {}
189
+ required = []
190
+
191
+ for parameter in parameters:
192
+ param_schema = _convert_value_schema_to_json_schema(parameter.value_schema)
193
+
194
+ # For optional parameters in strict mode, we need to add "null" as a type option
195
+ if not parameter.required:
196
+ param_type = param_schema.get("type")
197
+ if isinstance(param_type, str):
198
+ # Convert single type to union with null
199
+ param_schema["type"] = [param_type, "null"]
200
+ elif isinstance(param_type, list) and "null" not in param_type:
201
+ param_schema["type"] = [*param_type, "null"]
202
+
203
+ if parameter.description:
204
+ param_schema["description"] = parameter.description
205
+ properties[parameter.name] = param_schema
206
+
207
+ # In strict mode, all parameters (including optional ones) go in required array
208
+ # Optional parameters are handled by adding "null" to their type
209
+ required.append(parameter.name)
210
+
211
+ json_schema: OpenAIFunctionParameters = {
212
+ "type": "object",
213
+ "properties": properties,
214
+ "required": required,
215
+ "additionalProperties": False,
216
+ }
217
+ if not required:
218
+ del json_schema["required"]
219
+
220
+ return json_schema
@@ -0,0 +1,253 @@
1
+ """
2
+ Discovery utilities for Arcade Tools.
3
+
4
+ Provides modular, testable functions to discover toolkits and local tool files,
5
+ load modules, collect tools, and build a ToolCatalog.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import importlib.util
11
+ from pathlib import Path
12
+ from types import ModuleType
13
+ from typing import Any
14
+
15
+ from loguru import logger
16
+
17
+ from arcade_core.catalog import ToolCatalog
18
+ from arcade_core.parse import get_tools_from_file
19
+ from arcade_core.toolkit import Toolkit, ToolkitLoadError
20
+
21
+ DISCOVERY_PATTERNS = ["*.py", "tools/*.py", "arcade_tools/*.py", "tools/**/*.py"]
22
+ FILTER_PATTERNS = ["_test.py", "test_*.py", "__pycache__", "*.lock", "*.egg-info", "*.pyc"]
23
+
24
+
25
+ def normalize_package_name(package_name: str) -> str:
26
+ """Normalize a package name for import resolution."""
27
+ return package_name.lower().replace("-", "_")
28
+
29
+
30
+ def load_toolkit_from_package(package_name: str, show_packages: bool = False) -> Toolkit:
31
+ """Attempt to load a Toolkit from an installed package name."""
32
+ toolkit = Toolkit.from_package(package_name)
33
+ if show_packages:
34
+ logger.info(f"Loading package: {toolkit.name}")
35
+ return toolkit
36
+
37
+
38
+ def load_package(package_name: str, show_packages: bool = False) -> Toolkit:
39
+ """Load a toolkit for a specific package name.
40
+
41
+ Raises ToolkitLoadError if the package is not found.
42
+ """
43
+ normalized = normalize_package_name(package_name)
44
+ try:
45
+ return load_toolkit_from_package(normalized, show_packages)
46
+ except ToolkitLoadError:
47
+ return load_toolkit_from_package(f"arcade_{normalized}", show_packages)
48
+
49
+
50
+ def find_candidate_tool_files(root: Path | None = None) -> list[Path]:
51
+ """Find candidate Python files for auto-discovery in common locations."""
52
+ cwd = root or Path.cwd()
53
+
54
+ candidates: list[Path] = []
55
+ for pattern in DISCOVERY_PATTERNS:
56
+ candidates.extend(cwd.glob(pattern))
57
+ # Deduplicate candidates (same file might match multiple patterns)
58
+ unique_candidates = list(set(candidates))
59
+ # Filter out private, cache, and tests
60
+ return [
61
+ p for p in unique_candidates if not any(p.match(pattern) for pattern in FILTER_PATTERNS)
62
+ ]
63
+
64
+
65
+ def analyze_files_for_tools(files: list[Path]) -> list[tuple[Path, list[str]]]:
66
+ """Parse files with a fast AST pass to find declared @tool function names."""
67
+ results: list[tuple[Path, list[str]]] = []
68
+ for file_path in files:
69
+ try:
70
+ names = get_tools_from_file(file_path)
71
+ if names:
72
+ logger.info(f"Found {len(names)} tool(s) in {file_path.name}: {', '.join(names)}")
73
+ results.append((file_path, names))
74
+ except Exception:
75
+ logger.exception(f"Could not parse {file_path}")
76
+ return results
77
+
78
+
79
+ def load_module_from_path(file_path: Path) -> ModuleType:
80
+ """Dynamically import a Python module from a file path."""
81
+ import sys
82
+
83
+ # Add the directory containing the file to sys.path temporarily
84
+ # This allows local imports to work
85
+ file_dir = str(file_path.parent)
86
+ path_added = False
87
+ if file_dir not in sys.path:
88
+ sys.path.insert(0, file_dir)
89
+ path_added = True
90
+
91
+ try:
92
+ spec = importlib.util.spec_from_file_location(
93
+ f"_tools_{file_path.stem}",
94
+ file_path,
95
+ )
96
+ if not spec or not spec.loader:
97
+ raise ToolkitLoadError(f"Unable to create import spec for {file_path}")
98
+
99
+ module = importlib.util.module_from_spec(spec)
100
+ try:
101
+ spec.loader.exec_module(module)
102
+ except Exception:
103
+ logger.exception(f"Failed to load {file_path}")
104
+ raise ToolkitLoadError(f"Failed to load {file_path}")
105
+
106
+ return module
107
+ finally:
108
+ # Remove the path we added
109
+ if path_added and file_dir in sys.path:
110
+ sys.path.remove(file_dir)
111
+
112
+
113
+ def collect_tools_from_modules(
114
+ files_with_tools: list[tuple[Path, list[str]]],
115
+ ) -> list[tuple[Any, ModuleType]]:
116
+ """Load modules and collect the expected tool callables.
117
+
118
+ Returns a list of (callable, module) pairs.
119
+ """
120
+ discovered: list[tuple[Any, ModuleType]] = []
121
+
122
+ for file_path, expected_names in files_with_tools:
123
+ logger.debug(f"Loading tools from {file_path}...")
124
+ try:
125
+ module = load_module_from_path(file_path)
126
+ except ToolkitLoadError:
127
+ continue
128
+
129
+ for name in expected_names:
130
+ if hasattr(module, name):
131
+ attr = getattr(module, name)
132
+ if callable(attr) and hasattr(attr, "__tool_name__"):
133
+ discovered.append((attr, module))
134
+ else:
135
+ logger.warning(
136
+ f"Expected {name} to be a tool but it wasn't (missing __tool_name__)\n\n"
137
+ )
138
+ return discovered
139
+
140
+
141
+ def build_minimal_toolkit(
142
+ server_name: str | None,
143
+ server_version: str | None,
144
+ description: str | None = None,
145
+ ) -> Toolkit:
146
+ """Create a minimal Toolkit to host locally discovered tools."""
147
+ name = server_name or "ArcadeMCP"
148
+ version = server_version or "0.1.0dev"
149
+ pkg = f"{name}.{Path.cwd().name}"
150
+ desc = description or f"MCP Server for {name} version {version}"
151
+ return Toolkit(name=name, package_name=pkg, version=version, description=desc)
152
+
153
+
154
+ def build_catalog_from_toolkits(toolkits: list[Toolkit]) -> ToolCatalog:
155
+ """Create a ToolCatalog and add the provided toolkits."""
156
+ catalog = ToolCatalog()
157
+ for tk in toolkits:
158
+ catalog.add_toolkit(tk)
159
+ return catalog
160
+
161
+
162
+ def add_discovered_tools(
163
+ catalog: ToolCatalog,
164
+ toolkit: Toolkit,
165
+ tools: list[tuple[Any, ModuleType]],
166
+ ) -> None:
167
+ """Add discovered local tools to the catalog, preserving module context."""
168
+ for tool_func, module in tools:
169
+ if module.__name__ not in __import__("sys").modules:
170
+ __import__("sys").modules[module.__name__] = module
171
+ catalog.add_tool(tool_func, toolkit, module)
172
+
173
+
174
+ def load_toolkits_for_option(tool_package: str, show_packages: bool = False) -> list[Toolkit]:
175
+ """
176
+ Load toolkits for a given package option.
177
+
178
+ Args:
179
+ tool_package: Package name or comma-separated list of package names
180
+ show_packages: Whether to log loaded packages
181
+
182
+ Returns:
183
+ List of loaded toolkits
184
+ """
185
+ toolkits = []
186
+ packages = [p.strip() for p in tool_package.split(",")]
187
+
188
+ for package in packages:
189
+ try:
190
+ toolkit = load_package(package, show_packages)
191
+ toolkits.append(toolkit)
192
+ except ToolkitLoadError as e:
193
+ logger.warning(f"Failed to load package '{package}': {e}")
194
+
195
+ return toolkits
196
+
197
+
198
+ def load_all_installed_toolkits(show_packages: bool = False) -> list[Toolkit]:
199
+ """
200
+ Discover and load all installed arcade toolkits.
201
+
202
+ Args:
203
+ show_packages: Whether to log loaded packages
204
+
205
+ Returns:
206
+ List of all installed toolkits
207
+ """
208
+ toolkits = Toolkit.find_all_arcade_toolkits()
209
+
210
+ if show_packages:
211
+ for toolkit in toolkits:
212
+ logger.info(f"Loading package: {toolkit.name}")
213
+
214
+ return toolkits
215
+
216
+
217
+ def discover_tools(
218
+ tool_package: str | None = None,
219
+ show_packages: bool = False,
220
+ discover_installed: bool = False,
221
+ server_name: str | None = None,
222
+ server_version: str | None = None,
223
+ ) -> ToolCatalog:
224
+ """High-level discovery that returns a ToolCatalog.
225
+
226
+ This function is pure (does not sys.exit); callers should handle errors.
227
+ """
228
+ # 1) Package-based discovery
229
+ if tool_package:
230
+ toolkits = load_toolkits_for_option(tool_package, show_packages)
231
+ return build_catalog_from_toolkits(toolkits)
232
+
233
+ # 2) Discover all installed packages
234
+ if discover_installed:
235
+ toolkits = load_all_installed_toolkits(show_packages)
236
+ return build_catalog_from_toolkits(toolkits)
237
+
238
+ # 3) Local file discovery
239
+ logger.info("Auto-discovering tools from current directory")
240
+ files = find_candidate_tool_files()
241
+ if not files:
242
+ # Return empty catalog; caller can decide how to handle
243
+ return ToolCatalog()
244
+
245
+ files_with_tools = analyze_files_for_tools(files)
246
+ if not files_with_tools:
247
+ return ToolCatalog()
248
+
249
+ discovered = collect_tools_from_modules(files_with_tools)
250
+ catalog = ToolCatalog()
251
+ toolkit = build_minimal_toolkit(server_name, server_version)
252
+ add_discovered_tools(catalog, toolkit, discovered)
253
+ return catalog
@@ -36,6 +36,18 @@ def get_function_name_if_decorated(
36
36
  and isinstance(decorator.func, ast.Name)
37
37
  and decorator.func.id in decorator_ids
38
38
  )
39
+ # Support MCPApp tools. e.g., @app.tool or @app.tool(...)
40
+ or (
41
+ isinstance(decorator, ast.Attribute)
42
+ and decorator.attr == "tool"
43
+ and isinstance(decorator.value, ast.Name)
44
+ )
45
+ or (
46
+ isinstance(decorator, ast.Call)
47
+ and isinstance(decorator.func, ast.Attribute)
48
+ and decorator.func.attr == "tool"
49
+ and isinstance(decorator.func.value, ast.Name)
50
+ )
39
51
  ):
40
52
  return node.name
41
53
  return None
@@ -1,3 +1,21 @@
1
+ """
2
+ Arcade Core Schema
3
+
4
+ Defines transport-agnostic tool schemas and runtime context protocols used
5
+ across Arcade libraries. This includes:
6
+
7
+ - Tool and toolkit specifications (parameters, outputs, requirements)
8
+ - Transport-agnostic ToolContext carrying authorization, secrets, metadata
9
+ - Runtime ModelContext Protocol and its namespaced sub-protocols for logs,
10
+ progress, resources, tools, prompts, sampling, UI, and notifications
11
+
12
+ Note: ToolContext does not embed runtime capabilities; those are provided by
13
+ implementations of ModelContext (e.g., in arcade-mcp-server) that subclasses ToolContext
14
+ to expose the namespaced APIs to tools without changing function signatures.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
1
19
  import os
2
20
  from dataclasses import dataclass
3
21
  from enum import Enum
@@ -23,10 +41,10 @@ class ValueSchema(BaseModel):
23
41
  enum: list[str] | None = None
24
42
  """The list of possible values for the value, if it is a closed list."""
25
43
 
26
- properties: dict[str, "ValueSchema"] | None = None
44
+ properties: dict[str, ValueSchema] | None = None
27
45
  """For object types (json), the schema of nested properties."""
28
46
 
29
- inner_properties: dict[str, "ValueSchema"] | None = None
47
+ inner_properties: dict[str, ValueSchema] | None = None
30
48
  """For array types with json items, the schema of properties for each array item."""
31
49
 
32
50
  description: str | None = None
@@ -100,7 +118,7 @@ class ToolAuthRequirement(BaseModel):
100
118
  # or
101
119
  # client.auth.authorize(provider=AuthProvider.google, scopes=["profile", "email"])
102
120
  #
103
- # The Arcade SDK translates these into the appropriate provider ID (Google) and type (OAuth2).
121
+ # The Arcade TDK translates these into the appropriate provider ID (Google) and type (OAuth2).
104
122
  # The only time the developer will set these is if they are using a custom auth provider.
105
123
  provider_id: str | None = None
106
124
  """The provider ID configured in Arcade that acts as an alias to well-known configuration."""
@@ -200,7 +218,7 @@ class FullyQualifiedName:
200
218
  (self.toolkit_version or "").lower(),
201
219
  ))
202
220
 
203
- def equals_ignoring_version(self, other: "FullyQualifiedName") -> bool:
221
+ def equals_ignoring_version(self, other: FullyQualifiedName) -> bool:
204
222
  """Check if two fully-qualified tool names are equal, ignoring the version."""
205
223
  return (
206
224
  self.name.lower() == other.name.lower()
@@ -208,7 +226,7 @@ class FullyQualifiedName:
208
226
  )
209
227
 
210
228
  @staticmethod
211
- def from_toolkit(tool_name: str, toolkit: ToolkitDefinition) -> "FullyQualifiedName":
229
+ def from_toolkit(tool_name: str, toolkit: ToolkitDefinition) -> FullyQualifiedName:
212
230
  """Creates a fully-qualified tool name from a tool name and a ToolkitDefinition."""
213
231
  return FullyQualifiedName(tool_name, toolkit.name, toolkit.version)
214
232
 
@@ -298,7 +316,16 @@ class ToolMetadataItem(BaseModel):
298
316
 
299
317
 
300
318
  class ToolContext(BaseModel):
301
- """The context for a tool invocation."""
319
+ """The context for a tool invocation.
320
+
321
+ This type is transport-agnostic and contains only authorization,
322
+ secret, and metadata information needed by the tool. Runtime-specific
323
+ capabilities (logging, resources, etc.) are provided by a separate
324
+ runtime context that wraps this object.
325
+
326
+ Recommendation: For new tools, annotate the parameter as
327
+ `arcade_mcp_server.Context` to access namespaced runtime APIs directly.
328
+ """
302
329
 
303
330
  authorization: ToolAuthorizationContext | None = None
304
331
  """The authorization context for the tool invocation that requires authorization."""
@@ -312,16 +339,35 @@ class ToolContext(BaseModel):
312
339
  user_id: str | None = None
313
340
  """The user ID for the tool invocation (if any)."""
314
341
 
342
+ model_config = {"arbitrary_types_allowed": True}
343
+
344
+ def set_secret(self, key: str, value: str) -> None:
345
+ """Add or update a secret to the tool context."""
346
+ if self.secrets is None:
347
+ self.secrets = []
348
+ # Update existing or add new
349
+ for secret in self.secrets:
350
+ if secret.key == key:
351
+ secret.value = value
352
+ return
353
+ self.secrets.append(ToolSecretItem(key=key, value=value))
354
+
315
355
  def get_auth_token_or_empty(self) -> str:
316
356
  """Retrieve the authorization token, or return an empty string if not available."""
317
357
  return self.authorization.token if self.authorization and self.authorization.token else ""
318
358
 
319
359
  def get_secret(self, key: str) -> str:
320
- """Retrieve the secret for the tool invocation."""
360
+ """Retrieve the secret for the tool invocation.
361
+
362
+ Raises a ValueError if the secret is not found.
363
+ """
321
364
  return self._get_item(key, self.secrets, "secret")
322
365
 
323
366
  def get_metadata(self, key: str) -> str:
324
- """Retrieve the metadata for the tool invocation."""
367
+ """Retrieve the metadata for the tool invocation.
368
+
369
+ Raises a ValueError if the metadata is not found.
370
+ """
325
371
  return self._get_item(key, self.metadata, "metadata")
326
372
 
327
373
  def _get_item(
@@ -335,21 +381,14 @@ class ToolContext(BaseModel):
335
381
  f"{item_name.capitalize()} key passed to get_{item_name} cannot be empty."
336
382
  )
337
383
  if not items:
338
- raise ValueError(f"{item_name.capitalize()}s not found in context.")
384
+ raise ValueError(f"{item_name.capitalize()} '{key}' not found in context.")
339
385
 
340
386
  normalized_key = key.lower()
341
387
  for item in items:
342
388
  if item.key.lower() == normalized_key:
343
389
  return item.value
344
390
 
345
- raise ValueError(f"{item_name.capitalize()} {key} not found in context.")
346
-
347
- def set_secret(self, key: str, value: str) -> None:
348
- """Set a secret for the tool invocation."""
349
- if not self.secrets:
350
- self.secrets = []
351
- secret = ToolSecretItem(key=str(key), value=str(value))
352
- self.secrets.append(secret)
391
+ raise ValueError(f"{item_name.capitalize()} '{key}' not found in context.")
353
392
 
354
393
 
355
394
  class ToolCallRequest(BaseModel):
@@ -6,6 +6,7 @@ import types
6
6
  from collections import defaultdict
7
7
  from pathlib import Path, PurePosixPath, PureWindowsPath
8
8
 
9
+ import toml
9
10
  from pydantic import BaseModel, ConfigDict, field_validator
10
11
 
11
12
  from arcade_core.errors import ToolkitLoadError
@@ -59,6 +60,71 @@ class Toolkit(BaseModel):
59
60
  """
60
61
  return cls.from_package(module.__name__)
61
62
 
63
+ @classmethod
64
+ def from_directory(cls, directory: Path) -> "Toolkit":
65
+ """
66
+ Load a Toolkit from a directory.
67
+ """
68
+ pyproject_path = directory / "pyproject.toml"
69
+ if not pyproject_path.exists():
70
+ raise ToolkitLoadError(f"pyproject.toml not found in {directory}")
71
+
72
+ try:
73
+ with open(pyproject_path) as f:
74
+ pyproject_data = toml.load(f)
75
+
76
+ project_data = pyproject_data.get("project", {})
77
+ name = project_data.get("name")
78
+ if not name:
79
+
80
+ def _missing_name_error() -> ToolkitLoadError:
81
+ return ToolkitLoadError("name not found in pyproject.toml")
82
+
83
+ raise _missing_name_error() # noqa: TRY301
84
+
85
+ package_name = name
86
+ version = project_data.get("version", "0.0.0")
87
+ description = project_data.get("description", "")
88
+ authors = project_data.get("authors", [])
89
+ author_names = [author.get("name", "") for author in authors]
90
+
91
+ # For homepage and repository, you might need to look under project.urls
92
+ urls = project_data.get("urls", {})
93
+ homepage = urls.get("Homepage")
94
+ repo = urls.get("Repository")
95
+
96
+ except Exception as e:
97
+ raise ToolkitLoadError(f"Failed to load metadata from {pyproject_path}: {e}")
98
+
99
+ # Determine the actual package directory (supports src/ layout and flat layout)
100
+ package_dir = directory
101
+ try:
102
+ src_candidate = directory / "src" / package_name
103
+ flat_candidate = directory / package_name
104
+ if src_candidate.is_dir():
105
+ package_dir = src_candidate
106
+ elif flat_candidate.is_dir():
107
+ package_dir = flat_candidate
108
+ else:
109
+ # Fallback to the provided directory; tools_from_directory will de-duplicate prefixes
110
+ package_dir = directory
111
+ except Exception:
112
+ package_dir = directory
113
+
114
+ toolkit = cls(
115
+ name=name,
116
+ package_name=package_name,
117
+ version=version,
118
+ description=description,
119
+ author=author_names,
120
+ homepage=homepage,
121
+ repository=repo,
122
+ )
123
+
124
+ toolkit.tools = cls.tools_from_directory(package_dir, package_name)
125
+
126
+ return toolkit
127
+
62
128
  @classmethod
63
129
  def from_package(cls, package: str) -> "Toolkit":
64
130
  """
@@ -232,9 +298,14 @@ class Toolkit(BaseModel):
232
298
  for module_path in modules:
233
299
  relative_path = module_path.relative_to(package_dir)
234
300
  cls.validate_file(module_path)
235
- import_path = ".".join(relative_path.with_suffix("").parts)
236
- import_path = f"{package_name}.{import_path}"
237
- tools[import_path] = get_tools_from_file(str(module_path))
301
+ # Build import path and avoid duplicating the package prefix if it already exists
302
+ relative_parts = relative_path.with_suffix("").parts
303
+ import_path = ".".join(relative_parts)
304
+ if relative_parts and relative_parts[0] == package_name:
305
+ full_import_path = import_path
306
+ else:
307
+ full_import_path = f"{package_name}.{import_path}" if import_path else package_name
308
+ tools[full_import_path] = get_tools_from_file(str(module_path))
238
309
 
239
310
  if not tools:
240
311
  raise ToolkitLoadError(f"No tools found in package {package_name}")
@@ -4,6 +4,7 @@ import ast
4
4
  import inspect
5
5
  import re
6
6
  from collections.abc import Callable, Iterable
7
+ from textwrap import dedent
7
8
  from types import UnionType
8
9
  from typing import Any, Literal, TypeVar, Union, get_args, get_origin
9
10
 
@@ -75,7 +76,9 @@ def does_function_return_value(func: Callable) -> bool:
75
76
  if source is None:
76
77
  raise ValueError("Source code not found")
77
78
 
78
- tree = ast.parse(source)
79
+ # dedent in case the function is an inner function
80
+ dedented_source = dedent(source)
81
+ tree = ast.parse(dedented_source)
79
82
 
80
83
  class ReturnVisitor(ast.NodeVisitor):
81
84
  def __init__(self) -> None:
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "arcade-core"
3
- version = "2.4.0"
3
+ version = "2.5.0rc2"
4
4
  description = "Arcade Core - Core library for Arcade platform"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -28,9 +28,6 @@ dependencies = [
28
28
  "types-python-dateutil==2.9.0.20241003",
29
29
  "types-pytz==2024.2.0.20241003",
30
30
  "types-toml==0.10.8.20240310",
31
- "opentelemetry-instrumentation-fastapi==0.49b2",
32
- "opentelemetry-exporter-otlp-proto-http==1.28.2",
33
- "opentelemetry-exporter-otlp-proto-common==1.28.2",
34
31
  ]
35
32
 
36
33
  [project.optional-dependencies]
@@ -1,130 +0,0 @@
1
- import logging
2
- import os
3
- import urllib.parse
4
- from typing import Optional
5
-
6
- from fastapi import FastAPI
7
- from opentelemetry import _logs, trace
8
- from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
9
- from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
10
- from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
11
- from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
12
- from opentelemetry.metrics import Meter, get_meter_provider, set_meter_provider
13
- from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
14
- from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
15
- from opentelemetry.sdk.metrics import MeterProvider
16
- from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
17
- from opentelemetry.sdk.resources import SERVICE_NAME, Resource
18
- from opentelemetry.sdk.trace import TracerProvider
19
- from opentelemetry.sdk.trace.export import BatchSpanProcessor
20
-
21
-
22
- class ShutdownError(Exception):
23
- pass
24
-
25
-
26
- class OTELHandler:
27
- def __init__(self, enable: bool = True, log_level: int = logging.INFO):
28
- self.enable = enable
29
- self.log_level = log_level
30
- self._tracer_provider: Optional[TracerProvider] = None
31
- self._tracer_span_exporter: Optional[OTLPSpanExporter] = None
32
- self._meter_provider: Optional[MeterProvider] = None
33
- self._meter_reader: Optional[PeriodicExportingMetricReader] = None
34
- self._otlp_metric_exporter: Optional[OTLPMetricExporter] = None
35
- self._logger_provider: Optional[LoggerProvider] = None
36
- self._log_processor: Optional[BatchLogRecordProcessor] = None
37
- self.environment = os.environ.get("ARCADE_ENVIRONMENT", "local")
38
-
39
- def instrument_app(self, app: FastAPI) -> None:
40
- if self.enable:
41
- logging.info(
42
- "🔎 Initializing OpenTelemetry. Use environment variables to configure the connection"
43
- )
44
- self.resource = Resource(
45
- attributes={SERVICE_NAME: "arcade-worker", "environment": self.environment}
46
- )
47
-
48
- self._init_tracer()
49
- self._init_metrics()
50
- self._init_logging(self.log_level)
51
- FastAPIInstrumentor().instrument_app(app)
52
-
53
- def _init_tracer(self) -> None:
54
- self._tracer_provider = TracerProvider(resource=self.resource)
55
- trace.set_tracer_provider(self._tracer_provider)
56
-
57
- # Create an OTLP exporter
58
- self._tracer_span_exporter = OTLPSpanExporter()
59
-
60
- try:
61
- self._tracer_span_exporter.export([trace.get_tracer(__name__).start_span("ping")])
62
- except Exception as e:
63
- raise ConnectionError(
64
- f"Could not connect to OpenTelemetry Tracer endpoint. Check OpenTelemetry configuration or disable: {e}"
65
- )
66
-
67
- # Create a batch span processor and add the exporter
68
- span_processor = BatchSpanProcessor(self._tracer_span_exporter)
69
- self._tracer_provider.add_span_processor(span_processor)
70
-
71
- def _init_metrics(self) -> None:
72
- self._otlp_metric_exporter = OTLPMetricExporter()
73
-
74
- self._meter_reader = PeriodicExportingMetricReader(self._otlp_metric_exporter)
75
-
76
- self._meter_provider = MeterProvider(
77
- metric_readers=[self._meter_reader], resource=self.resource
78
- )
79
-
80
- set_meter_provider(self._meter_provider)
81
-
82
- def get_meter(self) -> Meter:
83
- return get_meter_provider().get_meter(__name__)
84
-
85
- def _init_logging(self, log_level: int) -> None:
86
- otlp_log_exporter = OTLPLogExporter()
87
-
88
- self._logger_provider = LoggerProvider(resource=self.resource)
89
- _logs.set_logger_provider(self._logger_provider)
90
-
91
- # Create a batch span processor and add the exporter
92
- self._log_processor = BatchLogRecordProcessor(otlp_log_exporter)
93
- self._logger_provider.add_log_record_processor(self._log_processor)
94
-
95
- handler = LoggingHandler(level=log_level, logger_provider=self._logger_provider)
96
- logging.getLogger().addHandler(handler)
97
-
98
- # Create a filter for urllib3 connection logs related to OpenTelemetry
99
- class OTELConnectionFilter(logging.Filter):
100
- def filter(self, record: logging.LogRecord) -> bool:
101
- # Filter out connection logs to OpenTelemetry endpoints
102
- parsed_url = urllib.parse.urlparse(
103
- os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT", "")
104
- )
105
- domain = parsed_url.netloc.split(":")[0]
106
- return not (domain and domain in str(getattr(record, "args", ())))
107
-
108
- # Apply the filter to the urllib3 logger
109
- urllib3_logger = logging.getLogger("urllib3.connectionpool")
110
- urllib3_logger.addFilter(OTELConnectionFilter())
111
-
112
- def _shutdown_tracer(self) -> None:
113
- if self._tracer_span_exporter is None:
114
- raise ShutdownError("Tracer provider not initialized. Failed to shutdown")
115
- self._tracer_span_exporter.shutdown()
116
-
117
- def _shutdown_metrics(self) -> None:
118
- if self._otlp_metric_exporter is None:
119
- raise ShutdownError("Meter provider not initialized. Failed to shutdown")
120
- self._otlp_metric_exporter.shutdown()
121
-
122
- def _shutdown_logging(self) -> None:
123
- if self._logger_provider is None:
124
- raise ShutdownError("Log provider not initialized. Failed to shutdown")
125
- self._logger_provider.shutdown()
126
-
127
- def shutdown(self) -> None:
128
- self._shutdown_tracer()
129
- self._shutdown_metrics()
130
- self._shutdown_logging()
File without changes