gllm-core-binary 0.4.4__py3-none-manylinux_2_31_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. gllm_core/__init__.py +1 -0
  2. gllm_core/__init__.pyi +0 -0
  3. gllm_core/adapters/__init__.py +5 -0
  4. gllm_core/adapters/__init__.pyi +3 -0
  5. gllm_core/adapters/tool/__init__.py +6 -0
  6. gllm_core/adapters/tool/__init__.pyi +4 -0
  7. gllm_core/adapters/tool/google_adk.py +91 -0
  8. gllm_core/adapters/tool/google_adk.pyi +23 -0
  9. gllm_core/adapters/tool/langchain.py +130 -0
  10. gllm_core/adapters/tool/langchain.pyi +31 -0
  11. gllm_core/constants.py +55 -0
  12. gllm_core/constants.pyi +36 -0
  13. gllm_core/event/__init__.py +6 -0
  14. gllm_core/event/__init__.pyi +4 -0
  15. gllm_core/event/event_emitter.py +211 -0
  16. gllm_core/event/event_emitter.pyi +155 -0
  17. gllm_core/event/handler/__init__.py +7 -0
  18. gllm_core/event/handler/__init__.pyi +5 -0
  19. gllm_core/event/handler/console_event_handler.py +48 -0
  20. gllm_core/event/handler/console_event_handler.pyi +32 -0
  21. gllm_core/event/handler/event_handler.py +89 -0
  22. gllm_core/event/handler/event_handler.pyi +51 -0
  23. gllm_core/event/handler/print_event_handler.py +130 -0
  24. gllm_core/event/handler/print_event_handler.pyi +33 -0
  25. gllm_core/event/handler/stream_event_handler.py +85 -0
  26. gllm_core/event/handler/stream_event_handler.pyi +62 -0
  27. gllm_core/event/hook/__init__.py +5 -0
  28. gllm_core/event/hook/__init__.pyi +3 -0
  29. gllm_core/event/hook/event_hook.py +30 -0
  30. gllm_core/event/hook/event_hook.pyi +18 -0
  31. gllm_core/event/hook/json_stringify_event_hook.py +32 -0
  32. gllm_core/event/hook/json_stringify_event_hook.pyi +16 -0
  33. gllm_core/event/messenger.py +133 -0
  34. gllm_core/event/messenger.pyi +66 -0
  35. gllm_core/schema/__init__.py +8 -0
  36. gllm_core/schema/__init__.pyi +6 -0
  37. gllm_core/schema/chunk.py +148 -0
  38. gllm_core/schema/chunk.pyi +66 -0
  39. gllm_core/schema/component.py +546 -0
  40. gllm_core/schema/component.pyi +205 -0
  41. gllm_core/schema/event.py +50 -0
  42. gllm_core/schema/event.pyi +33 -0
  43. gllm_core/schema/schema_generator.py +150 -0
  44. gllm_core/schema/schema_generator.pyi +35 -0
  45. gllm_core/schema/tool.py +418 -0
  46. gllm_core/schema/tool.pyi +198 -0
  47. gllm_core/utils/__init__.py +32 -0
  48. gllm_core/utils/__init__.pyi +13 -0
  49. gllm_core/utils/analyzer.py +256 -0
  50. gllm_core/utils/analyzer.pyi +123 -0
  51. gllm_core/utils/binary_handler_factory.py +99 -0
  52. gllm_core/utils/binary_handler_factory.pyi +62 -0
  53. gllm_core/utils/chunk_metadata_merger.py +102 -0
  54. gllm_core/utils/chunk_metadata_merger.pyi +41 -0
  55. gllm_core/utils/concurrency.py +184 -0
  56. gllm_core/utils/concurrency.pyi +94 -0
  57. gllm_core/utils/event_formatter.py +69 -0
  58. gllm_core/utils/event_formatter.pyi +30 -0
  59. gllm_core/utils/google_sheets.py +115 -0
  60. gllm_core/utils/google_sheets.pyi +18 -0
  61. gllm_core/utils/imports.py +91 -0
  62. gllm_core/utils/imports.pyi +42 -0
  63. gllm_core/utils/logger_manager.py +339 -0
  64. gllm_core/utils/logger_manager.pyi +176 -0
  65. gllm_core/utils/main_method_resolver.py +185 -0
  66. gllm_core/utils/main_method_resolver.pyi +54 -0
  67. gllm_core/utils/merger_method.py +130 -0
  68. gllm_core/utils/merger_method.pyi +49 -0
  69. gllm_core/utils/retry.py +258 -0
  70. gllm_core/utils/retry.pyi +41 -0
  71. gllm_core/utils/similarity.py +29 -0
  72. gllm_core/utils/similarity.pyi +10 -0
  73. gllm_core/utils/validation.py +26 -0
  74. gllm_core/utils/validation.pyi +12 -0
  75. gllm_core_binary-0.4.4.dist-info/METADATA +177 -0
  76. gllm_core_binary-0.4.4.dist-info/RECORD +78 -0
  77. gllm_core_binary-0.4.4.dist-info/WHEEL +5 -0
  78. gllm_core_binary-0.4.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,418 @@
1
+ """Defines the Tool class for LM tools in the Model Context Protocol standard.
2
+
3
+ Authors:
4
+ Dimitrij Ray (dimitrij.ray@gdplabs.id)
5
+
6
+ References:
7
+ [1] https://modelcontextprotocol.io/
8
+ """
9
+ from __future__ import annotations
10
+
11
+ import asyncio
12
+ import inspect
13
+ import logging
14
+ from typing import Any, Callable, ParamSpec, TypeVar, get_type_hints
15
+
16
+ from pydantic import BaseModel, ConfigDict, Field, create_model, field_validator
17
+
18
+ P = ParamSpec("P")
19
+ R = TypeVar("R")
20
+
21
+ TYPE_HINT_RETURN = "return"
22
+ ARGUMENTS_FIELDS = ("args", "arguments", "parameters")
23
+ TERMINATING_FIELDS = ("returns", "yields", "raises", "attributes", "examples", "notes")
24
+
25
+
26
+ def _build_field_definitions(
27
+ params: dict[str, inspect.Parameter], type_hints: dict[str, Any], tool_description: str | None = None
28
+ ) -> dict[str, tuple[Any, Field]]:
29
+ """Build field definitions for creating a Pydantic model from function parameters.
30
+
31
+ The result is a dictionary mapping parameter names to tuples of (type_hint, Field).
32
+ For example, if the function has parameters `a: int` and `b: str`, the result would be:
33
+
34
+ ```python
35
+ {
36
+ "a": (int, Field(...)),
37
+ "b": (str, Field(...)),
38
+ }
39
+ ```
40
+
41
+ Args:
42
+ params (dict[str, inspect.Parameter]): Dictionary mapping parameter names to Parameter objects.
43
+ type_hints (dict[str, Any]): Dictionary mapping parameter names to their type hints.
44
+ tool_description (str | None, optional): Docstring that may contain parameter documentation.
45
+ Defaults to None.
46
+
47
+ Returns:
48
+ dict[str, tuple[Any, Field]]: Dictionary mapping parameter names to tuples of (type_hint, Field).
49
+ """
50
+ field_definitions = {}
51
+ for param_name, param in params.items():
52
+ if param.kind in {inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD}:
53
+ continue
54
+
55
+ type_hint = type_hints.get(param_name, Any)
56
+ default = ... if param.default is inspect.Parameter.empty else param.default
57
+
58
+ field_kwargs = {}
59
+ if tool_description:
60
+ param_docs = _extract_param_doc(tool_description, param_name)
61
+ if param_docs:
62
+ field_kwargs["description"] = param_docs
63
+
64
+ field_definitions[param_name] = (type_hint, Field(default=default, **field_kwargs))
65
+
66
+ return field_definitions
67
+
68
+
69
+ def _extract_param_doc(docstring: str, param_name: str) -> str | None:
70
+ """Extracts parameter description from a Google-style docstring.
71
+
72
+ Args:
73
+ docstring (str): The function's docstring.
74
+ param_name (str): The parameter name to extract documentation for.
75
+
76
+ Returns:
77
+ str | None: The parameter description if found, None otherwise.
78
+ """
79
+ if not docstring:
80
+ return None
81
+
82
+ lines = docstring.split("\n")
83
+ in_args_section = False
84
+ param_doc = None
85
+
86
+ for line_text in lines:
87
+ current_line = line_text.strip()
88
+
89
+ if current_line.lower().startswith(ARGUMENTS_FIELDS):
90
+ in_args_section = True
91
+ continue
92
+
93
+ if not in_args_section:
94
+ continue
95
+
96
+ if current_line.lower().startswith(TERMINATING_FIELDS):
97
+ break
98
+
99
+ if current_line.startswith(f"{param_name}:"):
100
+ param_doc = current_line[len(param_name) + 1 :].strip()
101
+ break
102
+
103
+ if current_line.startswith(f"{param_name} ("):
104
+ param_doc = current_line[current_line.find(")") + 1 :].strip()
105
+ break
106
+
107
+ if param_doc is not None:
108
+ param_doc = param_doc.lstrip(":").strip()
109
+
110
+ return param_doc
111
+
112
+
113
+ def tool(
114
+ _func: Callable[P, R] | None = None,
115
+ *,
116
+ name: str | None = None,
117
+ description: str | None = None,
118
+ title: str | None = None,
119
+ ) -> Callable[[Callable[P, R]], Callable[P, R]] | Callable[P, R]:
120
+ """Decorator to convert a function into a Tool.
121
+
122
+ This decorator analyzes the function signature and type hints to generate
123
+ the appropriate input_schema and output_schema for the tool.
124
+
125
+ Note that the output_schema is derived from the function's return type.
126
+ If the function is annotated with `-> None`, the output_schema will be empty.
127
+
128
+ Args:
129
+ name (str | None, optional): Optional name for the tool.
130
+ Defaults to None, in which case the function name is used.
131
+ description (str | None, optional): Optional description for the tool.
132
+ Defaults to None, in which case the function's docstring is used.
133
+ title (str | None, optional): Optional display title for the tool.
134
+ Defaults to None, in which case the function name is used.
135
+
136
+ Returns:
137
+ Callable[[Callable[P, R]], Callable[P, R]] | Callable[P, R]:
138
+ If _func is provided, returns a decorated function.
139
+ Otherwise, returns a decorator that transforms a function into a Tool.
140
+
141
+ Examples:
142
+ ```python
143
+ @tool(description="Get weather information")
144
+ async def fetch_weather(location: str, units: str = "metric") -> dict:
145
+ '''Get weather information for a location.'''
146
+ # Implementation
147
+ return {"temperature": 22.5, "conditions": "sunny"}
148
+
149
+ # The function can be used normally
150
+ result = await fetch_weather("New York", "imperial")
151
+ ```
152
+
153
+ The decorator returns an instance of `Tool` that is callable and preserves key function metadata
154
+ (e.g., `__signature__`, `__doc__`). You can access the `Tool` attributes directly on the decorated
155
+ function name:
156
+
157
+ ```python
158
+ # After decoration, `fetch_weather` is a Tool instance
159
+ fetch_weather.name # str: tool identifier (defaults to function name)
160
+ fetch_weather.title # str | None: display title if provided
161
+ fetch_weather.description # str | None: description (defaults to function docstring)
162
+ fetch_weather.input_schema # BaseModel: Constructed Pydantic model for input (derived from type hints)
163
+ fetch_weather.output_schema # BaseModel | None: Constructed Pydantic model for output (derived from return type)
164
+ fetch_weather.is_async # bool: whether the underlying function is async
165
+
166
+ # You can call it directly (mirrors the original function semantics)
167
+ result = await fetch_weather(location="Tokyo", units="metric")
168
+
169
+ # Or use the unified invoke() helper (works for both sync and async implementations)
170
+ result = await fetch_weather.invoke(location="Tokyo", units="metric")
171
+ ```
172
+ """
173
+
174
+ def decorator(func: Callable[P, R]) -> Callable[P, R]:
175
+ tool_name = name or func.__name__
176
+ tool_description = description
177
+ if tool_description is None and func.__doc__:
178
+ tool_description = inspect.cleandoc(func.__doc__)
179
+
180
+ is_async = asyncio.iscoroutinefunction(func)
181
+ sig = inspect.signature(func)
182
+ params = sig.parameters
183
+ type_hints = get_type_hints(func)
184
+
185
+ field_definitions = _build_field_definitions(params, type_hints, tool_description)
186
+
187
+ tool_input_schema = create_model(f"{func.__name__}_input", **field_definitions)
188
+
189
+ tool_output_schema = None
190
+ if TYPE_HINT_RETURN in type_hints and type_hints[TYPE_HINT_RETURN] is not type(None):
191
+ return_type = type_hints[TYPE_HINT_RETURN]
192
+ tool_output_schema = create_model(f"{func.__name__}_output", result=(return_type, ...))
193
+
194
+ tool_instance = Tool(
195
+ name=tool_name,
196
+ description=tool_description,
197
+ title=title,
198
+ input_schema=tool_input_schema,
199
+ output_schema=tool_output_schema,
200
+ func=func,
201
+ is_async=is_async,
202
+ )
203
+
204
+ # Copy key function metadata for introspection and IDEs.
205
+ # This will allow `Tool` to behave like a function when used as a decorator.
206
+ for attr, default in (
207
+ ("__name__", tool_name),
208
+ ("__qualname__", tool_name),
209
+ ("__module__", __name__),
210
+ ("__doc__", tool_description),
211
+ ("__wrapped__", func),
212
+ ):
213
+ object.__setattr__(tool_instance, attr, getattr(func, attr, default))
214
+
215
+ return tool_instance
216
+
217
+ if _func is None:
218
+ return decorator
219
+
220
+ return decorator(_func)
221
+
222
+
223
+ class Tool(BaseModel):
224
+ """Model Context Protocol (MCP)-style Tool definition.
225
+
226
+ This class represents a tool that can be used by a language model to interact with the outside world,
227
+ following the Model Context Protocol (MCP) specification. Tools are defined by their name, description,
228
+ input and output schemas, and an optional function implementation.
229
+
230
+ The Tool class supports flexible schema handling, accepting either:
231
+ 1. Dictionary-based JSON Schema objects
232
+ 2. Pydantic BaseModel classes
233
+
234
+ When a Pydantic model is provided, it is automatically converted to a JSON Schema using
235
+ Pydantic's model_json_schema() method.
236
+
237
+ Supported use cases include:
238
+ 1. Creating a tool with dictionary schemas for input/output
239
+ 2. Creating a tool with Pydantic models for input/output
240
+ 3. Using the @tool decorator to create a tool from a function's type hints
241
+
242
+ Attributes:
243
+ name (str): A string identifier for the tool, used for programmatic access.
244
+ description (str): A human-readable description of what the tool does.
245
+ input_schema (dict[str, Any] | type[BaseModel]): JSON Schema object or Pydantic model defining the expected
246
+ parameters.
247
+ title (str | None): Optional display name for the tool.
248
+ output_schema (dict[str, Any] | type[BaseModel] | None): Optional JSON Schema object or Pydantic model defining
249
+ the structure of the output.
250
+ annotations (dict[str, Any] | None): Optional additional tool information for enriching the tool definition.
251
+ According to MCP, display name precedence is: title, annotations.title, then name.
252
+ meta (dict[str, Any] | None): Optional additional metadata for internal use by the system.
253
+ Unlike annotations which provide additional information about the tool for clients,
254
+ meta is meant for private system-level metadata that shouldn't be exposed to end users.
255
+ func (Callable): The callable function that implements this tool's behavior.
256
+ is_async (bool): Whether the tool's function is asynchronous.
257
+ """
258
+
259
+ _log_level = logging.DEBUG
260
+
261
+ name: str
262
+ input_schema: dict[str, Any] | type[BaseModel] = Field(alias="inputSchema")
263
+ description: str | None = None
264
+ title: str | None = None
265
+ output_schema: dict[str, Any] | type[BaseModel] | None = Field(default=None, alias="outputSchema")
266
+ annotations: dict[str, Any] | None = None
267
+ meta: dict[str, Any] | None = Field(default=None, alias="_meta")
268
+
269
+ func: Callable | None = Field(default=None)
270
+ is_async: bool = Field(default=False)
271
+
272
+ model_config = ConfigDict(validate_by_name=True, arbitrary_types_allowed=True)
273
+
274
+ @classmethod
275
+ def from_langchain(cls, langchain_tool: Any) -> "Tool":
276
+ """Create a Tool from a LangChain tool instance.
277
+
278
+ Args:
279
+ langchain_tool (Any): LangChain tool implementation to convert.
280
+
281
+ Returns:
282
+ Tool: Tool instance derived from the LangChain representation.
283
+ """
284
+ from gllm_core.adapters.tool import from_langchain_tool # noqa: PLC0415
285
+
286
+ return from_langchain_tool(langchain_tool)
287
+
288
+ @classmethod
289
+ def from_google_adk(cls, function_declaration: Any, func: Callable | None = None) -> "Tool":
290
+ """Create a Tool from a Google ADK function declaration.
291
+
292
+ Args:
293
+ function_declaration (Any): Google ADK function declaration to convert.
294
+ func (Callable | None): Optional implementation callable for the tool.
295
+
296
+ Returns:
297
+ Tool: Tool instance derived from the Google ADK definition.
298
+ """
299
+ from gllm_core.adapters.tool import from_google_function # noqa: PLC0415
300
+
301
+ return from_google_function(function_declaration, func=func)
302
+
303
+ @field_validator("input_schema", mode="before")
304
+ @classmethod
305
+ def validate_input_schema(cls, v: Any):
306
+ """Validate and convert input_schema to JSON Schema dict if it's a Pydantic model.
307
+
308
+ Args:
309
+ v (Any): The input schema value (dict or Pydantic model).
310
+
311
+ Returns:
312
+ dict: A JSON Schema dict.
313
+
314
+ Raises:
315
+ ValueError: If the input schema is not a dict or Pydantic model.
316
+ """
317
+ if isinstance(v, dict):
318
+ return v
319
+ if isinstance(v, type) and issubclass(v, BaseModel):
320
+ return v.model_json_schema()
321
+ raise ValueError("input_schema must be a dict or Pydantic model")
322
+
323
+ @field_validator("output_schema", mode="before")
324
+ @classmethod
325
+ def validate_output_schema(cls, v: Any):
326
+ """Validate and convert output_schema to JSON Schema dict if it's a Pydantic model.
327
+
328
+ Args:
329
+ v (Any): The output schema value (dict, Pydantic model, or None).
330
+
331
+ Returns:
332
+ dict | None: A JSON Schema dict or None.
333
+
334
+ Raises:
335
+ ValueError: If the output schema is not None, dict, or Pydantic model.
336
+ """
337
+ if v is None:
338
+ return None
339
+ if isinstance(v, dict):
340
+ return v
341
+ if isinstance(v, type) and issubclass(v, BaseModel):
342
+ return v.model_json_schema()
343
+ raise ValueError("output_schema must be None, a dict, or Pydantic model")
344
+
345
+ @property
346
+ def _logger(self) -> logging.Logger:
347
+ """Returns a logger instance for this tool.
348
+
349
+ Returns:
350
+ logging.Logger: A logger configured with the class name and appropriate log level.
351
+ """
352
+ logger = logging.getLogger(f"{self.__class__.__module__}.{self.__class__.__name__}")
353
+ logger.setLevel(self._log_level)
354
+ return logger
355
+
356
+ def __call__(self, *args: Any, **kwargs: Any) -> Any:
357
+ """Call the underlying function.
358
+
359
+ Mirrors the original function's call semantics:
360
+ 1. If the underlying function is synchronous, returns the result directly.
361
+ 2. If asynchronous, returns a coroutine that must be awaited.
362
+
363
+ Args:
364
+ *args: Positional arguments for the function.
365
+ **kwargs: Keyword arguments for the function.
366
+
367
+ Returns:
368
+ Any: Result or coroutine depending on the underlying function.
369
+
370
+ Raises:
371
+ ValueError: If no implementation function is defined.
372
+ """
373
+ if self.func is None:
374
+ raise ValueError(f"Tool '{self.name}' has no implementation function defined.")
375
+ return self.func(*args, **kwargs)
376
+
377
+ @property
378
+ def __signature__(self) -> inspect.Signature: # type: ignore[override]
379
+ """Expose the underlying function's signature for introspection.
380
+
381
+ Returns:
382
+ inspect.Signature: Signature of the underlying function, or an empty signature if missing.
383
+ """
384
+ return inspect.signature(self.func) if self.func else inspect.Signature()
385
+
386
+ async def invoke(self, **kwargs: Any) -> Any:
387
+ """Executes the defined tool with the given parameters.
388
+
389
+ This method handles both synchronous and asynchronous underlying functions.
390
+
391
+ Args:
392
+ **kwargs: The parameters to pass to the tool function.
393
+ These should match the input_schema definition.
394
+
395
+ Returns:
396
+ Any: The result of the tool execution.
397
+
398
+ Raises:
399
+ ValueError: If the tool function has not been defined.
400
+ TypeError: If the provided parameters don't match the expected schema.
401
+ """
402
+ if self.func is None:
403
+ raise ValueError(f"Tool '{self.name}' has no implementation function defined.")
404
+
405
+ self._logger.debug(f"Invoking tool '{self.name}' with params: {kwargs}")
406
+
407
+ try:
408
+ if self.is_async:
409
+ result = await self.func(**kwargs)
410
+ else:
411
+ loop = asyncio.get_running_loop()
412
+ result = await loop.run_in_executor(None, lambda: self.func(**kwargs))
413
+
414
+ self._logger.debug(f"Tool '{self.name}' completed successfully with result: {result}")
415
+ return result
416
+ except Exception as e:
417
+ self._logger.error(f"Tool '{self.name}' failed with error: {str(e)}")
418
+ raise
@@ -0,0 +1,198 @@
1
+ import inspect
2
+ from _typeshed import Incomplete
3
+ from pydantic import BaseModel
4
+ from typing import Any, Callable, ParamSpec, TypeVar
5
+
6
+ P = ParamSpec('P')
7
+ R = TypeVar('R')
8
+ TYPE_HINT_RETURN: str
9
+ ARGUMENTS_FIELDS: Incomplete
10
+ TERMINATING_FIELDS: Incomplete
11
+
12
+ def tool(_func: Callable[P, R] | None = None, *, name: str | None = None, description: str | None = None, title: str | None = None) -> Callable[[Callable[P, R]], Callable[P, R]] | Callable[P, R]:
13
+ '''Decorator to convert a function into a Tool.
14
+
15
+ This decorator analyzes the function signature and type hints to generate
16
+ the appropriate input_schema and output_schema for the tool.
17
+
18
+ Note that the output_schema is derived from the function\'s return type.
19
+ If the function is annotated with `-> None`, the output_schema will be empty.
20
+
21
+ Args:
22
+ name (str | None, optional): Optional name for the tool.
23
+ Defaults to None, in which case the function name is used.
24
+ description (str | None, optional): Optional description for the tool.
25
+ Defaults to None, in which case the function\'s docstring is used.
26
+ title (str | None, optional): Optional display title for the tool.
27
+ Defaults to None, in which case the function name is used.
28
+
29
+ Returns:
30
+ Callable[[Callable[P, R]], Callable[P, R]] | Callable[P, R]:
31
+ If _func is provided, returns a decorated function.
32
+ Otherwise, returns a decorator that transforms a function into a Tool.
33
+
34
+ Examples:
35
+ ```python
36
+ @tool(description="Get weather information")
37
+ async def fetch_weather(location: str, units: str = "metric") -> dict:
38
+ \'\'\'Get weather information for a location.\'\'\'
39
+ # Implementation
40
+ return {"temperature": 22.5, "conditions": "sunny"}
41
+
42
+ # The function can be used normally
43
+ result = await fetch_weather("New York", "imperial")
44
+ ```
45
+
46
+ The decorator returns an instance of `Tool` that is callable and preserves key function metadata
47
+ (e.g., `__signature__`, `__doc__`). You can access the `Tool` attributes directly on the decorated
48
+ function name:
49
+
50
+ ```python
51
+ # After decoration, `fetch_weather` is a Tool instance
52
+ fetch_weather.name # str: tool identifier (defaults to function name)
53
+ fetch_weather.title # str | None: display title if provided
54
+ fetch_weather.description # str | None: description (defaults to function docstring)
55
+ fetch_weather.input_schema # BaseModel: Constructed Pydantic model for input (derived from type hints)
56
+ fetch_weather.output_schema # BaseModel | None: Constructed Pydantic model for output (derived from return type)
57
+ fetch_weather.is_async # bool: whether the underlying function is async
58
+
59
+ # You can call it directly (mirrors the original function semantics)
60
+ result = await fetch_weather(location="Tokyo", units="metric")
61
+
62
+ # Or use the unified invoke() helper (works for both sync and async implementations)
63
+ result = await fetch_weather.invoke(location="Tokyo", units="metric")
64
+ ```
65
+ '''
66
+
67
+ class Tool(BaseModel):
68
+ """Model Context Protocol (MCP)-style Tool definition.
69
+
70
+ This class represents a tool that can be used by a language model to interact with the outside world,
71
+ following the Model Context Protocol (MCP) specification. Tools are defined by their name, description,
72
+ input and output schemas, and an optional function implementation.
73
+
74
+ The Tool class supports flexible schema handling, accepting either:
75
+ 1. Dictionary-based JSON Schema objects
76
+ 2. Pydantic BaseModel classes
77
+
78
+ When a Pydantic model is provided, it is automatically converted to a JSON Schema using
79
+ Pydantic's model_json_schema() method.
80
+
81
+ Supported use cases include:
82
+ 1. Creating a tool with dictionary schemas for input/output
83
+ 2. Creating a tool with Pydantic models for input/output
84
+ 3. Using the @tool decorator to create a tool from a function's type hints
85
+
86
+ Attributes:
87
+ name (str): A string identifier for the tool, used for programmatic access.
88
+ description (str): A human-readable description of what the tool does.
89
+ input_schema (dict[str, Any] | type[BaseModel]): JSON Schema object or Pydantic model defining the expected
90
+ parameters.
91
+ title (str | None): Optional display name for the tool.
92
+ output_schema (dict[str, Any] | type[BaseModel] | None): Optional JSON Schema object or Pydantic model defining
93
+ the structure of the output.
94
+ annotations (dict[str, Any] | None): Optional additional tool information for enriching the tool definition.
95
+ According to MCP, display name precedence is: title, annotations.title, then name.
96
+ meta (dict[str, Any] | None): Optional additional metadata for internal use by the system.
97
+ Unlike annotations which provide additional information about the tool for clients,
98
+ meta is meant for private system-level metadata that shouldn't be exposed to end users.
99
+ func (Callable): The callable function that implements this tool's behavior.
100
+ is_async (bool): Whether the tool's function is asynchronous.
101
+ """
102
+ name: str
103
+ input_schema: dict[str, Any] | type[BaseModel]
104
+ description: str | None
105
+ title: str | None
106
+ output_schema: dict[str, Any] | type[BaseModel] | None
107
+ annotations: dict[str, Any] | None
108
+ meta: dict[str, Any] | None
109
+ func: Callable | None
110
+ is_async: bool
111
+ model_config: Incomplete
112
+ @classmethod
113
+ def from_langchain(cls, langchain_tool: Any) -> Tool:
114
+ """Create a Tool from a LangChain tool instance.
115
+
116
+ Args:
117
+ langchain_tool (Any): LangChain tool implementation to convert.
118
+
119
+ Returns:
120
+ Tool: Tool instance derived from the LangChain representation.
121
+ """
122
+ @classmethod
123
+ def from_google_adk(cls, function_declaration: Any, func: Callable | None = None) -> Tool:
124
+ """Create a Tool from a Google ADK function declaration.
125
+
126
+ Args:
127
+ function_declaration (Any): Google ADK function declaration to convert.
128
+ func (Callable | None): Optional implementation callable for the tool.
129
+
130
+ Returns:
131
+ Tool: Tool instance derived from the Google ADK definition.
132
+ """
133
+ @classmethod
134
+ def validate_input_schema(cls, v: Any):
135
+ """Validate and convert input_schema to JSON Schema dict if it's a Pydantic model.
136
+
137
+ Args:
138
+ v (Any): The input schema value (dict or Pydantic model).
139
+
140
+ Returns:
141
+ dict: A JSON Schema dict.
142
+
143
+ Raises:
144
+ ValueError: If the input schema is not a dict or Pydantic model.
145
+ """
146
+ @classmethod
147
+ def validate_output_schema(cls, v: Any):
148
+ """Validate and convert output_schema to JSON Schema dict if it's a Pydantic model.
149
+
150
+ Args:
151
+ v (Any): The output schema value (dict, Pydantic model, or None).
152
+
153
+ Returns:
154
+ dict | None: A JSON Schema dict or None.
155
+
156
+ Raises:
157
+ ValueError: If the output schema is not None, dict, or Pydantic model.
158
+ """
159
+ def __call__(self, *args: Any, **kwargs: Any) -> Any:
160
+ """Call the underlying function.
161
+
162
+ Mirrors the original function's call semantics:
163
+ 1. If the underlying function is synchronous, returns the result directly.
164
+ 2. If asynchronous, returns a coroutine that must be awaited.
165
+
166
+ Args:
167
+ *args: Positional arguments for the function.
168
+ **kwargs: Keyword arguments for the function.
169
+
170
+ Returns:
171
+ Any: Result or coroutine depending on the underlying function.
172
+
173
+ Raises:
174
+ ValueError: If no implementation function is defined.
175
+ """
176
+ @property
177
+ def __signature__(self) -> inspect.Signature:
178
+ """Expose the underlying function's signature for introspection.
179
+
180
+ Returns:
181
+ inspect.Signature: Signature of the underlying function, or an empty signature if missing.
182
+ """
183
+ async def invoke(self, **kwargs: Any) -> Any:
184
+ """Executes the defined tool with the given parameters.
185
+
186
+ This method handles both synchronous and asynchronous underlying functions.
187
+
188
+ Args:
189
+ **kwargs: The parameters to pass to the tool function.
190
+ These should match the input_schema definition.
191
+
192
+ Returns:
193
+ Any: The result of the tool execution.
194
+
195
+ Raises:
196
+ ValueError: If the tool function has not been defined.
197
+ TypeError: If the provided parameters don't match the expected schema.
198
+ """
@@ -0,0 +1,32 @@
1
+ """Utility modules for use in the GLLM Core package."""
2
+
3
+ from gllm_core.utils.analyzer import RunAnalyzer
4
+ from gllm_core.utils.binary_handler_factory import BinaryHandlingStrategy, binary_handler_factory
5
+ from gllm_core.utils.chunk_metadata_merger import ChunkMetadataMerger
6
+ from gllm_core.utils.concurrency import asyncify, get_default_portal, syncify
7
+ from gllm_core.utils.event_formatter import format_chunk_message, get_placeholder_keys
8
+ from gllm_core.utils.google_sheets import load_gsheets
9
+ from gllm_core.utils.logger_manager import LoggerManager
10
+ from gllm_core.utils.main_method_resolver import MainMethodResolver
11
+ from gllm_core.utils.merger_method import MergerMethod
12
+ from gllm_core.utils.retry import RetryConfig, retry
13
+ from gllm_core.utils.validation import validate_string_enum
14
+
15
+ __all__ = [
16
+ "BinaryHandlingStrategy",
17
+ "ChunkMetadataMerger",
18
+ "LoggerManager",
19
+ "MainMethodResolver",
20
+ "MergerMethod",
21
+ "RunAnalyzer",
22
+ "RetryConfig",
23
+ "asyncify",
24
+ "get_default_portal",
25
+ "binary_handler_factory",
26
+ "format_chunk_message",
27
+ "get_placeholder_keys",
28
+ "load_gsheets",
29
+ "syncify",
30
+ "retry",
31
+ "validate_string_enum",
32
+ ]
@@ -0,0 +1,13 @@
1
+ from gllm_core.utils.analyzer import RunAnalyzer as RunAnalyzer
2
+ from gllm_core.utils.binary_handler_factory import BinaryHandlingStrategy as BinaryHandlingStrategy, binary_handler_factory as binary_handler_factory
3
+ from gllm_core.utils.chunk_metadata_merger import ChunkMetadataMerger as ChunkMetadataMerger
4
+ from gllm_core.utils.concurrency import asyncify as asyncify, get_default_portal as get_default_portal, syncify as syncify
5
+ from gllm_core.utils.event_formatter import format_chunk_message as format_chunk_message, get_placeholder_keys as get_placeholder_keys
6
+ from gllm_core.utils.google_sheets import load_gsheets as load_gsheets
7
+ from gllm_core.utils.logger_manager import LoggerManager as LoggerManager
8
+ from gllm_core.utils.main_method_resolver import MainMethodResolver as MainMethodResolver
9
+ from gllm_core.utils.merger_method import MergerMethod as MergerMethod
10
+ from gllm_core.utils.retry import RetryConfig as RetryConfig, retry as retry
11
+ from gllm_core.utils.validation import validate_string_enum as validate_string_enum
12
+
13
+ __all__ = ['BinaryHandlingStrategy', 'ChunkMetadataMerger', 'LoggerManager', 'MainMethodResolver', 'MergerMethod', 'RunAnalyzer', 'RetryConfig', 'asyncify', 'get_default_portal', 'binary_handler_factory', 'format_chunk_message', 'get_placeholder_keys', 'load_gsheets', 'syncify', 'retry', 'validate_string_enum']