polos-sdk 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. polos/__init__.py +105 -0
  2. polos/agents/__init__.py +7 -0
  3. polos/agents/agent.py +746 -0
  4. polos/agents/conversation_history.py +121 -0
  5. polos/agents/stop_conditions.py +280 -0
  6. polos/agents/stream.py +635 -0
  7. polos/core/__init__.py +0 -0
  8. polos/core/context.py +143 -0
  9. polos/core/state.py +26 -0
  10. polos/core/step.py +1380 -0
  11. polos/core/workflow.py +1192 -0
  12. polos/features/__init__.py +0 -0
  13. polos/features/events.py +456 -0
  14. polos/features/schedules.py +110 -0
  15. polos/features/tracing.py +605 -0
  16. polos/features/wait.py +82 -0
  17. polos/llm/__init__.py +9 -0
  18. polos/llm/generate.py +152 -0
  19. polos/llm/providers/__init__.py +5 -0
  20. polos/llm/providers/anthropic.py +615 -0
  21. polos/llm/providers/azure.py +42 -0
  22. polos/llm/providers/base.py +196 -0
  23. polos/llm/providers/fireworks.py +41 -0
  24. polos/llm/providers/gemini.py +40 -0
  25. polos/llm/providers/groq.py +40 -0
  26. polos/llm/providers/openai.py +1021 -0
  27. polos/llm/providers/together.py +40 -0
  28. polos/llm/stream.py +183 -0
  29. polos/middleware/__init__.py +0 -0
  30. polos/middleware/guardrail.py +148 -0
  31. polos/middleware/guardrail_executor.py +253 -0
  32. polos/middleware/hook.py +164 -0
  33. polos/middleware/hook_executor.py +104 -0
  34. polos/runtime/__init__.py +0 -0
  35. polos/runtime/batch.py +87 -0
  36. polos/runtime/client.py +841 -0
  37. polos/runtime/queue.py +42 -0
  38. polos/runtime/worker.py +1365 -0
  39. polos/runtime/worker_server.py +249 -0
  40. polos/tools/__init__.py +0 -0
  41. polos/tools/tool.py +587 -0
  42. polos/types/__init__.py +23 -0
  43. polos/types/types.py +116 -0
  44. polos/utils/__init__.py +27 -0
  45. polos/utils/agent.py +27 -0
  46. polos/utils/client_context.py +41 -0
  47. polos/utils/config.py +12 -0
  48. polos/utils/output_schema.py +311 -0
  49. polos/utils/retry.py +47 -0
  50. polos/utils/serializer.py +167 -0
  51. polos/utils/tracing.py +27 -0
  52. polos/utils/worker_singleton.py +40 -0
  53. polos_sdk-0.1.0.dist-info/METADATA +650 -0
  54. polos_sdk-0.1.0.dist-info/RECORD +55 -0
  55. polos_sdk-0.1.0.dist-info/WHEEL +4 -0
polos/tools/tool.py ADDED
@@ -0,0 +1,587 @@
1
+ """Tool class and decorator for defining tools that can be called by LLM agents."""
2
+
3
+ import inspect
4
+ from collections.abc import Callable
5
+ from typing import TYPE_CHECKING, Any, Union
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from ..core.context import WorkflowContext
10
+ from ..core.workflow import _WORKFLOW_REGISTRY, Workflow
11
+ from ..runtime.client import PolosClient
12
+ from ..runtime.queue import Queue
13
+
14
+ if TYPE_CHECKING:
15
+ from ..middleware.hook import Hook
16
+
17
+
18
+ class Tool(Workflow):
19
+ """
20
+ Base class for tools that can be called by LLM agents.
21
+
22
+ Tools are workflows with additional metadata for LLM function calling.
23
+ They can be:
24
+ - Invoked via .invoke() without waiting for completion -
25
+ This is the recommended way to invoke the tool.
26
+ - Executed directly via .run(): This is a convenience method to invoke
27
+ the tool and wait for completion.
28
+ - Used in Agent tool lists
29
+
30
+ Subclass this to create built-in tools like CodeInterpreter, BrowserTool, etc.
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ id: str,
36
+ description: str | None = None,
37
+ parameters: dict[str, Any] | None = None,
38
+ func: Callable | None = None,
39
+ queue: str | Queue | dict[str, Any] | None = None,
40
+ on_start: Union[str, list[str], "Hook", list["Hook"]] | None = None,
41
+ on_end: Union[str, list[str], "Hook", list["Hook"]] | None = None,
42
+ **kwargs,
43
+ ):
44
+ """
45
+ Initialize a tool.
46
+
47
+ Args:
48
+ id: Unique tool identifier
49
+ description: Description for LLM (what this tool does)
50
+ parameters: JSON schema for tool parameters
51
+ func: Optional function to execute (for simple tools)
52
+ queue: Optional queue configuration
53
+ on_start: Optional lifecycle hook(s) to run before tool execution
54
+ on_end: Optional lifecycle hook(s) to run after tool execution
55
+ **kwargs: Additional workflow configuration
56
+ """
57
+ # Parse queue configuration
58
+ queue_name = self._parse_queue_name(queue)
59
+ queue_concurrency_limit = self._parse_queue_concurrency(queue)
60
+
61
+ # Initialize as Workflow
62
+ super().__init__(
63
+ id=id,
64
+ func=func or self._default_execute,
65
+ workflow_type="tool",
66
+ queue_name=queue_name,
67
+ queue_concurrency_limit=queue_concurrency_limit,
68
+ on_start=on_start,
69
+ on_end=on_end,
70
+ )
71
+
72
+ # Register tool in global registry (so it can be found by agents)
73
+ _WORKFLOW_REGISTRY[id] = self
74
+
75
+ # Tool-specific metadata for LLM function calling
76
+ self._tool_description = description or self.__doc__ or ""
77
+ # If parameters not provided, use empty schema (subclasses should provide explicitly)
78
+ if parameters is None:
79
+ parameters = {"type": "object", "properties": {}}
80
+ self._tool_parameters = parameters
81
+ # Store input schema class for validation (set by decorator)
82
+ self._input_schema_class: type[BaseModel] | None = None
83
+
84
+ def _default_execute(self, ctx: WorkflowContext, payload: BaseModel | None):
85
+ """
86
+ Default execution function - subclasses should override.
87
+ This is called when the tool is triggered as a workflow.
88
+ """
89
+ raise NotImplementedError(f"Tool {self.id} must implement _default_execute or provide func")
90
+
91
+ async def run(
92
+ self,
93
+ client: PolosClient,
94
+ payload: BaseModel | dict[str, Any] | None = None,
95
+ queue: str | None = None,
96
+ concurrency_key: str | None = None,
97
+ session_id: str | None = None,
98
+ user_id: str | None = None,
99
+ timeout: float | None = 600.0,
100
+ ) -> Any:
101
+ """
102
+ Run tool and return final result (wait for completion).
103
+
104
+ This method cannot be called from within an execution context
105
+ (e.g., from within a workflow).
106
+ Use step.invoke_and_wait() to call tools from within workflows.
107
+
108
+ Args:
109
+ payload: Tool payload - Pydantic BaseModel instance if tool has
110
+ input schema, dict, or None
111
+ queue: Optional queue name (overrides tool-level queue)
112
+ concurrency_key: Optional concurrency key for per-tenant queuing
113
+ session_id: Optional session ID
114
+ user_id: Optional user ID
115
+ timeout: Optional timeout in seconds (default: 600 seconds / 10 minutes)
116
+
117
+ Returns:
118
+ Result from tool execution
119
+
120
+ Raises:
121
+ WorkflowTimeoutError: If the execution exceeds the timeout
122
+
123
+ Example:
124
+ result = await my_tool.run(MyInputModel(param="value"))
125
+ """
126
+ # Handle Pydantic payload conversion if tool has input schema
127
+ prepared_payload = payload
128
+ if self._input_schema_class is not None:
129
+ if payload is None:
130
+ raise ValueError(f"Tool '{self.id}' requires input schema but payload is None")
131
+ # If payload is a Pydantic model, validate it's the correct type and convert to dict
132
+ if isinstance(payload, BaseModel):
133
+ if not isinstance(payload, self._input_schema_class):
134
+ raise ValueError(
135
+ f"Tool '{self.id}' expects payload of type "
136
+ f"{self._input_schema_class.__name__}, "
137
+ f"got {type(payload).__name__}"
138
+ )
139
+ # Convert to dict for invoke
140
+ prepared_payload = payload.model_dump(mode="json")
141
+ elif isinstance(payload, dict):
142
+ # Validate dict against Pydantic model
143
+ try:
144
+ validated_model = self._input_schema_class.model_validate(payload)
145
+ prepared_payload = validated_model.model_dump(mode="json")
146
+ except Exception as e:
147
+ raise ValueError(f"Invalid payload for tool '{self.id}': {e}") from e
148
+ else:
149
+ raise ValueError(
150
+ f"Tool '{self.id}' expects payload of type "
151
+ f"{self._input_schema_class.__name__} or dict, "
152
+ f"got {type(payload).__name__}"
153
+ )
154
+ else:
155
+ # No input schema - payload should be None
156
+ if payload is not None:
157
+ raise ValueError(
158
+ f"Tool '{self.id}' does not accept a payload, but got {type(payload).__name__}"
159
+ )
160
+ prepared_payload = None
161
+
162
+ # Call parent run() method
163
+ return await super().run(
164
+ client=client,
165
+ payload=prepared_payload,
166
+ queue=queue,
167
+ concurrency_key=concurrency_key,
168
+ session_id=session_id,
169
+ user_id=user_id,
170
+ timeout=timeout,
171
+ )
172
+
173
+ def get_tool_type(self) -> str:
174
+ """
175
+ Get the tool type identifier.
176
+
177
+ Returns:
178
+ Tool type string (e.g., "default", "browser_tool")
179
+ Default implementation returns "default".
180
+ """
181
+ return "default"
182
+
183
+ def get_tool_metadata(self) -> dict[str, Any] | None:
184
+ """
185
+ Get tool metadata (constructor properties and configuration).
186
+
187
+ Returns:
188
+ Dictionary containing tool-specific metadata, or None if no metadata.
189
+ Default implementation returns None.
190
+ Subclasses should override to return their constructor properties.
191
+ """
192
+ return None
193
+
194
+ def to_llm_tool_definition(self) -> dict[str, Any]:
195
+ """
196
+ Convert tool to LLM function calling format.
197
+
198
+ Returns format compatible with OpenAI/Anthropic function calling:
199
+ {
200
+ "type": "function",
201
+ "function": {
202
+ "name": "tool_id",
203
+ "description": "...",
204
+ "parameters": {...}
205
+ }
206
+ }
207
+ """
208
+ return {
209
+ "type": "function",
210
+ "function": {
211
+ "name": self.id,
212
+ "description": self._tool_description,
213
+ "parameters": self._tool_parameters,
214
+ },
215
+ }
216
+
217
+ @staticmethod
218
+ def _parse_queue_name(queue: Any) -> str | None:
219
+ """Extract queue name from queue configuration."""
220
+ if queue is None:
221
+ return None
222
+ if isinstance(queue, str):
223
+ return queue
224
+ if isinstance(queue, dict):
225
+ return queue.get("name")
226
+ if hasattr(queue, "name"):
227
+ return queue.name
228
+ return None
229
+
230
+ @staticmethod
231
+ def _parse_queue_concurrency(queue: Any) -> int | None:
232
+ """Extract concurrency limit from queue configuration."""
233
+ if queue is None:
234
+ return None
235
+ if isinstance(queue, dict):
236
+ return queue.get("concurrency_limit")
237
+ if hasattr(queue, "concurrency_limit"):
238
+ return queue.concurrency_limit
239
+ return None
240
+
241
+
242
+ def tool(
243
+ id: str | None = None,
244
+ description: str | None = None,
245
+ parameters: dict[str, Any] | None = None,
246
+ queue: str | Queue | dict[str, Any] | None = None,
247
+ on_start: Union[str, list[str], "Workflow", list["Workflow"]] | None = None,
248
+ on_end: Union[str, list[str], "Workflow", list["Workflow"]] | None = None,
249
+ **kwargs,
250
+ ):
251
+ """
252
+ Decorator to mark a function as a tool callable by LLM agents.
253
+
254
+ Creates a simple Tool instance from a function.
255
+ For complex tools (CodeInterpreter, BrowserTool), use classes instead.
256
+
257
+ Args:
258
+ id: Optional tool ID (defaults to function name)
259
+ description: Optional tool description (for LLM)
260
+ parameters: Optional parameter schema (auto-inferred if not provided)
261
+ queue: Optional queue configuration
262
+ on_start: Optional lifecycle hook(s) to run before tool execution
263
+ on_end: Optional lifecycle hook(s) to run after tool execution
264
+ **kwargs: Additional workflow configuration
265
+
266
+ Example:
267
+ @tool(description="Search the knowledge base")
268
+ async def search_kb(query: str) -> dict:
269
+ results = await db.search(query)
270
+ return {"results": results}
271
+
272
+ # Use in agent
273
+ agent = Agent(tools=[search_kb])
274
+ """
275
+
276
+ def decorator(func: Callable) -> Tool:
277
+ tool_id = id or func.__name__
278
+
279
+ # Validate function signature
280
+ input_schema_class, return_type = _validate_tool_signature(func)
281
+
282
+ # Capture parameters from outer scope and infer if needed
283
+ tool_parameters = parameters
284
+ if tool_parameters is None:
285
+ if input_schema_class is not None:
286
+ # Extract schema from Pydantic model
287
+ tool_parameters = input_schema_class.model_json_schema()
288
+ else:
289
+ # No input schema - empty schema
290
+ tool_parameters = {"type": "object", "properties": {}}
291
+
292
+ # Wrap function to work as workflow function
293
+ wrapped_func = _wrap_function_for_tool(func, input_schema_class)
294
+
295
+ # Create Tool instance from function
296
+ tool_obj = Tool(
297
+ id=tool_id,
298
+ description=description or func.__doc__ or "",
299
+ parameters=tool_parameters,
300
+ func=wrapped_func,
301
+ queue=queue,
302
+ on_start=on_start,
303
+ on_end=on_end,
304
+ **kwargs,
305
+ )
306
+
307
+ # Store input schema class for validation (used by inherited run() method)
308
+ tool_obj._input_schema_class = input_schema_class
309
+
310
+ # Register the tool
311
+ _WORKFLOW_REGISTRY[tool_id] = tool_obj
312
+ return tool_obj
313
+
314
+ # Handle both @tool and @tool(...) syntax
315
+ if id is not None and callable(id):
316
+ # Called as @tool without parentheses
317
+ func = id
318
+ tool_id = func.__name__
319
+
320
+ # Validate function signature
321
+ input_schema_class, return_type = _validate_tool_signature(func)
322
+
323
+ # Capture parameters from outer scope and infer if needed
324
+ tool_parameters = parameters
325
+ if tool_parameters is None:
326
+ if input_schema_class is not None:
327
+ # Extract schema from Pydantic model
328
+ tool_parameters = input_schema_class.model_json_schema()
329
+ else:
330
+ # No input schema - empty schema
331
+ tool_parameters = {"type": "object", "properties": {}}
332
+
333
+ wrapped_func = _wrap_function_for_tool(func, input_schema_class)
334
+
335
+ tool_obj = Tool(
336
+ id=tool_id,
337
+ description=description or func.__doc__ or "",
338
+ parameters=tool_parameters,
339
+ func=wrapped_func,
340
+ queue=queue,
341
+ **kwargs,
342
+ )
343
+
344
+ # Store input schema class for validation (used by inherited run() method)
345
+ tool_obj._input_schema_class = input_schema_class
346
+
347
+ # Register the tool
348
+ _WORKFLOW_REGISTRY[tool_id] = tool_obj
349
+ return tool_obj
350
+
351
+ return decorator
352
+
353
+
354
+ def is_json_serializable(annotation: Any) -> bool:
355
+ """
356
+ Check if a type annotation represents a JSON-serializable type.
357
+
358
+ Args:
359
+ annotation: Type annotation to check
360
+
361
+ Returns:
362
+ True if the type is JSON serializable, False otherwise
363
+ """
364
+ json_serializable_types = (str, int, float, bool, type(None), dict, list)
365
+ return annotation in json_serializable_types
366
+
367
+
368
+ def _validate_tool_signature(func: Callable) -> tuple[type[BaseModel] | None, Any | None]:
369
+ """
370
+ Validate tool function signature and return input schema class and return type.
371
+
372
+ Args:
373
+ func: Function to validate
374
+
375
+ Returns:
376
+ Tuple of (input_schema_class, return_type):
377
+ - input_schema_class: Pydantic BaseModel class if second parameter exists, None otherwise
378
+ - return_type: Return type annotation if exists, None otherwise
379
+
380
+ Raises:
381
+ TypeError: If signature is invalid
382
+ """
383
+ sig = inspect.signature(func)
384
+ params = list(sig.parameters.values())
385
+
386
+ # Tool function must have at least 1 parameter (WorkflowContext)
387
+ if len(params) < 1:
388
+ raise TypeError(
389
+ f"Tool function '{func.__name__}' must have at least 1 parameter: "
390
+ f"(ctx: WorkflowContext) or (ctx: WorkflowContext, input: BaseModel)"
391
+ )
392
+
393
+ # Tool function must have at most 2 parameters
394
+ if len(params) > 2:
395
+ raise TypeError(
396
+ f"Tool function '{func.__name__}' must have at most 2 parameters: "
397
+ f"(ctx: WorkflowContext) or (ctx: WorkflowContext, input: BaseModel)"
398
+ )
399
+
400
+ # Check first parameter (context)
401
+ first_param = params[0]
402
+ first_annotation = first_param.annotation
403
+
404
+ # Allow untyped parameters or anything that ends with WorkflowContext
405
+ first_type_valid = False
406
+ if first_annotation == inspect.Parameter.empty:
407
+ # Untyped is allowed
408
+ first_type_valid = True
409
+ elif isinstance(first_annotation, str):
410
+ # String annotation - check if it ends with WorkflowContext
411
+ if first_annotation.endswith("WorkflowContext") or "WorkflowContext" in first_annotation:
412
+ first_type_valid = True
413
+ else:
414
+ # Type annotation - check if class name ends with WorkflowContext
415
+ try:
416
+ # Get the class name
417
+ type_name = getattr(first_annotation, "__name__", None) or str(first_annotation)
418
+ if type_name.endswith("WorkflowContext") or "WorkflowContext" in type_name:
419
+ first_type_valid = True
420
+ # Also check if it's the actual WorkflowContext class
421
+ from ..core.context import WorkflowContext
422
+
423
+ if first_annotation is WorkflowContext or first_annotation == WorkflowContext:
424
+ first_type_valid = True
425
+ elif hasattr(first_annotation, "__origin__"):
426
+ args = getattr(first_annotation, "__args__", ())
427
+ if WorkflowContext in args:
428
+ first_type_valid = True
429
+ except (ImportError, AttributeError):
430
+ # If we can't check, allow it if the name suggests it's WorkflowContext
431
+ type_name = getattr(first_annotation, "__name__", None) or str(first_annotation)
432
+ if "WorkflowContext" in type_name:
433
+ first_type_valid = True
434
+
435
+ if not first_type_valid:
436
+ raise TypeError(
437
+ f"Tool function '{func.__name__}': first parameter "
438
+ f"'{first_param.name}' must be typed as WorkflowContext "
439
+ f"(or untyped), got {first_annotation}"
440
+ )
441
+
442
+ # Check second parameter (input schema) if it exists
443
+ input_schema_class = None
444
+ if len(params) >= 2:
445
+ second_param = params[1]
446
+ second_annotation = second_param.annotation
447
+ if second_annotation == inspect.Parameter.empty:
448
+ raise TypeError(
449
+ f"Tool function '{func.__name__}': second parameter "
450
+ f"'{second_param.name}' must be typed as a Pydantic BaseModel class"
451
+ )
452
+
453
+ # Check if second parameter is a Pydantic BaseModel
454
+ second_type_valid = False
455
+ if inspect.isclass(second_annotation) and issubclass(second_annotation, BaseModel):
456
+ second_type_valid = True
457
+ input_schema_class = second_annotation
458
+
459
+ if not second_type_valid:
460
+ raise TypeError(
461
+ f"Tool function '{func.__name__}': second parameter "
462
+ f"'{second_param.name}' must be typed as a Pydantic BaseModel "
463
+ f"class, got {second_annotation}"
464
+ )
465
+
466
+ # Validate return type if specified
467
+ return_type = sig.return_annotation
468
+ if return_type != inspect.Signature.empty and return_type is not None:
469
+ # Check if return type is valid (Pydantic BaseModel or JSON serializable)
470
+ return_type_valid = False
471
+
472
+ # Check if it's a Pydantic BaseModel
473
+ if (
474
+ inspect.isclass(return_type)
475
+ and issubclass(return_type, BaseModel)
476
+ or is_json_serializable(return_type)
477
+ ):
478
+ return_type_valid = True
479
+ # Check if it's a Union/Optional with JSON serializable types or Pydantic models
480
+ elif hasattr(return_type, "__origin__"):
481
+ origin = return_type.__origin__
482
+ args = getattr(return_type, "__args__", ())
483
+ # Check for Union/Optional
484
+ if origin is Union:
485
+ return_type_valid = all(
486
+ (inspect.isclass(arg) and issubclass(arg, BaseModel))
487
+ or arg is type(None)
488
+ or is_json_serializable(arg)
489
+ for arg in args
490
+ )
491
+ # Check for Dict[str, Any] or List[str] etc.
492
+ elif origin in (dict, list):
493
+ # Allow Dict and List with any args (runtime will validate)
494
+ return_type_valid = True
495
+ # Check if it's a string annotation (forward reference)
496
+ elif isinstance(return_type, str) and any(
497
+ keyword in return_type
498
+ for keyword in [
499
+ "dict",
500
+ "Dict",
501
+ "list",
502
+ "List",
503
+ "str",
504
+ "int",
505
+ "float",
506
+ "bool",
507
+ "None",
508
+ ]
509
+ ):
510
+ return_type_valid = True
511
+
512
+ if not return_type_valid:
513
+ raise TypeError(
514
+ f"Tool function '{func.__name__}': return type must be a "
515
+ f"Pydantic BaseModel or JSON serializable type, "
516
+ f"got {return_type}"
517
+ )
518
+
519
+ return input_schema_class, return_type if return_type != inspect.Signature.empty else None
520
+
521
+
522
+ def _wrap_function_for_tool(
523
+ func: Callable, input_schema_class: type[BaseModel] | None = None
524
+ ) -> Callable:
525
+ """Wrap user function to work as workflow function.
526
+
527
+ Tool functions must have one of these signatures:
528
+ - (ctx: WorkflowContext) -> no input schema
529
+ - (ctx: WorkflowContext, input: BaseModel) -> Pydantic input schema
530
+
531
+ The wrapper receives payload as a dict (from workflow execution system) and converts it
532
+ to a Pydantic model instance if input_schema_class is provided.
533
+ """
534
+ sig = inspect.signature(func)
535
+ params = list(sig.parameters.values())
536
+
537
+ # Tool function must have WorkflowContext as first param (already validated in decorator)
538
+ # Check if there's a second parameter (input schema)
539
+ has_input_schema = len(params) >= 2
540
+
541
+ async def wrapper(ctx: WorkflowContext, payload: dict[str, Any] | None):
542
+ # Build arguments
543
+ args = [ctx]
544
+
545
+ if has_input_schema:
546
+ # Convert dict payload to Pydantic model instance if input_schema_class is provided
547
+ if input_schema_class is not None:
548
+ if payload is None:
549
+ raise ValueError(
550
+ f"Tool function '{func.__name__}' requires input schema but payload is None"
551
+ )
552
+ try:
553
+ # Payload comes as a dict from workflow execution system
554
+ # Convert it to the Pydantic model instance
555
+ if isinstance(payload, dict):
556
+ input_instance = input_schema_class.model_validate(payload)
557
+ elif isinstance(payload, input_schema_class):
558
+ # Already the correct type (shouldn't happen, but handle it)
559
+ input_instance = payload
560
+ elif isinstance(payload, BaseModel):
561
+ # Different Pydantic model - convert to dict and validate
562
+ input_instance = input_schema_class.model_validate(
563
+ payload.model_dump(mode="json")
564
+ )
565
+ else:
566
+ raise ValueError(
567
+ f"Payload must be a dict or Pydantic BaseModel "
568
+ f"instance, got {type(payload)}"
569
+ )
570
+ args.append(input_instance)
571
+ except Exception as e:
572
+ raise ValueError(
573
+ f"Invalid payload for tool '{func.__name__}': {e}. Payload: {payload}"
574
+ ) from e
575
+ else:
576
+ # Should not happen - has_input_schema but no input_schema_class
577
+ raise ValueError(
578
+ f"Tool function '{func.__name__}' expects input schema but none provided"
579
+ )
580
+
581
+ # Call original function with appropriate arguments
582
+ if inspect.iscoroutinefunction(func):
583
+ return await func(*args)
584
+ else:
585
+ return func(*args)
586
+
587
+ return wrapper
@@ -0,0 +1,23 @@
1
+ from .types import (
2
+ AgentConfig,
3
+ AgentResult,
4
+ BatchStepResult,
5
+ BatchWorkflowInput,
6
+ Step,
7
+ ToolCall,
8
+ ToolCallFunction,
9
+ ToolResult,
10
+ Usage,
11
+ )
12
+
13
+ __all__ = [
14
+ "AgentConfig",
15
+ "AgentResult",
16
+ "BatchStepResult",
17
+ "BatchWorkflowInput",
18
+ "Step",
19
+ "ToolCall",
20
+ "ToolCallFunction",
21
+ "ToolResult",
22
+ "Usage",
23
+ ]