hammad-python 0.0.23__py3-none-any.whl → 0.0.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +62 -14
- hammad/_main.py +226 -0
- hammad/cli/__init__.py +0 -2
- hammad/cli/plugins.py +3 -1
- hammad/data/__init__.py +4 -5
- hammad/data/types/__init__.py +37 -1
- hammad/data/types/file.py +74 -1
- hammad/data/types/multimodal/__init__.py +14 -2
- hammad/data/types/multimodal/audio.py +106 -2
- hammad/data/types/multimodal/image.py +104 -2
- hammad/data/types/text.py +242 -0
- hammad/genai/__init__.py +73 -0
- hammad/genai/a2a/__init__.py +32 -0
- hammad/genai/a2a/workers.py +552 -0
- hammad/genai/agents/__init__.py +8 -0
- hammad/genai/agents/agent.py +747 -214
- hammad/genai/agents/run.py +421 -12
- hammad/genai/agents/types/agent_response.py +2 -1
- hammad/genai/graphs/__init__.py +125 -0
- hammad/genai/graphs/base.py +1786 -0
- hammad/genai/graphs/plugins.py +316 -0
- hammad/genai/graphs/types.py +638 -0
- hammad/genai/models/language/__init__.py +6 -1
- hammad/genai/models/language/model.py +46 -0
- hammad/genai/models/language/run.py +330 -4
- hammad/genai/models/language/types/language_model_response.py +1 -1
- hammad/genai/types/tools.py +1 -1
- hammad/logging/logger.py +60 -5
- hammad/mcp/__init__.py +3 -0
- hammad/types.py +288 -0
- {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/METADATA +6 -1
- {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/RECORD +34 -32
- hammad/_main/__init__.py +0 -4
- hammad/_main/_fn.py +0 -20
- hammad/_main/_new.py +0 -52
- hammad/_main/_run.py +0 -50
- hammad/_main/_to.py +0 -19
- hammad/cli/_runner.py +0 -265
- {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/licenses/LICENSE +0 -0
@@ -100,6 +100,8 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
100
100
|
base_url: Optional[str] = None,
|
101
101
|
api_key: Optional[str] = None,
|
102
102
|
instructor_mode: LanguageModelInstructorMode = "tool_call",
|
103
|
+
verbose: bool = False,
|
104
|
+
debug: bool = False,
|
103
105
|
**kwargs: Any,
|
104
106
|
):
|
105
107
|
"""Initialize the language model.
|
@@ -109,6 +111,8 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
109
111
|
base_url: Custom base URL for the API
|
110
112
|
api_key: API key for authentication
|
111
113
|
instructor_mode: Default instructor mode for structured outputs
|
114
|
+
verbose: If True, set logger to INFO level for detailed output
|
115
|
+
debug: If True, set logger to DEBUG level for maximum verbosity
|
112
116
|
**kwargs: Additional arguments passed to BaseGenAIModel
|
113
117
|
"""
|
114
118
|
# Initialize BaseGenAIModel via super()
|
@@ -116,6 +120,14 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
116
120
|
|
117
121
|
# Initialize LanguageModel-specific attributes
|
118
122
|
self._instructor_client = None
|
123
|
+
self.verbose = verbose
|
124
|
+
self.debug = debug
|
125
|
+
|
126
|
+
# Set logger level based on verbose/debug flags
|
127
|
+
if debug:
|
128
|
+
logger.setLevel("DEBUG")
|
129
|
+
elif verbose:
|
130
|
+
logger.setLevel("INFO")
|
119
131
|
|
120
132
|
logger.info(f"Initialized LanguageModel w/ model: {self.model}")
|
121
133
|
logger.debug(f"LanguageModel settings: {self.settings}")
|
@@ -337,6 +349,8 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
337
349
|
messages: LanguageModelMessages,
|
338
350
|
instructions: Optional[str] = None,
|
339
351
|
mock_response: Optional[str] = None,
|
352
|
+
verbose: Optional[bool] = None,
|
353
|
+
debug: Optional[bool] = None,
|
340
354
|
**kwargs: Any,
|
341
355
|
) -> Union[LanguageModelResponse[Any], LanguageModelStream[Any]]:
|
342
356
|
"""Run a language model request.
|
@@ -345,11 +359,20 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
345
359
|
messages: The input messages/content for the request
|
346
360
|
instructions: Optional system instructions to prepend
|
347
361
|
mock_response: Mock response string for testing (saves API costs)
|
362
|
+
verbose: If True, set logger to INFO level for this request
|
363
|
+
debug: If True, set logger to DEBUG level for this request
|
348
364
|
**kwargs: Additional request parameters
|
349
365
|
|
350
366
|
Returns:
|
351
367
|
LanguageModelResponse or LanguageModelStream depending on parameters
|
352
368
|
"""
|
369
|
+
# Set logger level for this request if specified
|
370
|
+
original_level = logger.level
|
371
|
+
if debug or (debug is None and self.debug):
|
372
|
+
logger.setLevel("DEBUG")
|
373
|
+
elif verbose or (verbose is None and self.verbose):
|
374
|
+
logger.setLevel("INFO")
|
375
|
+
|
353
376
|
logger.info(f"Running LanguageModel request with model: {self.model}")
|
354
377
|
logger.debug(f"LanguageModel request kwargs: {kwargs}")
|
355
378
|
|
@@ -388,6 +411,10 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
388
411
|
|
389
412
|
except Exception as e:
|
390
413
|
raise LanguageModelError(f"Error in language model request: {e}") from e
|
414
|
+
finally:
|
415
|
+
# Restore original logger level
|
416
|
+
if debug is not None or verbose is not None:
|
417
|
+
logger.setLevel(original_level)
|
391
418
|
|
392
419
|
# Overloaded async_run methods for different return types
|
393
420
|
|
@@ -574,6 +601,8 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
574
601
|
messages: LanguageModelMessages,
|
575
602
|
instructions: Optional[str] = None,
|
576
603
|
mock_response: Optional[str] = None,
|
604
|
+
verbose: Optional[bool] = None,
|
605
|
+
debug: Optional[bool] = None,
|
577
606
|
**kwargs: Any,
|
578
607
|
) -> Union[LanguageModelResponse[Any], LanguageModelStream[Any]]:
|
579
608
|
"""Run an async language model request.
|
@@ -582,11 +611,20 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
582
611
|
messages: The input messages/content for the request
|
583
612
|
instructions: Optional system instructions to prepend
|
584
613
|
mock_response: Mock response string for testing (saves API costs)
|
614
|
+
verbose: If True, set logger to INFO level for this request
|
615
|
+
debug: If True, set logger to DEBUG level for this request
|
585
616
|
**kwargs: Additional request parameters
|
586
617
|
|
587
618
|
Returns:
|
588
619
|
LanguageModelResponse or LanguageModelAsyncStream depending on parameters
|
589
620
|
"""
|
621
|
+
# Set logger level for this request if specified
|
622
|
+
original_level = logger.level
|
623
|
+
if debug or (debug is None and self.debug):
|
624
|
+
logger.setLevel("DEBUG")
|
625
|
+
elif verbose or (verbose is None and self.verbose):
|
626
|
+
logger.setLevel("INFO")
|
627
|
+
|
590
628
|
logger.info(f"Running async LanguageModel request with model: {self.model}")
|
591
629
|
logger.debug(f"LanguageModel request kwargs: {kwargs}")
|
592
630
|
|
@@ -631,6 +669,10 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
631
669
|
raise LanguageModelError(
|
632
670
|
f"Error in async language model request: {e}"
|
633
671
|
) from e
|
672
|
+
finally:
|
673
|
+
# Restore original logger level
|
674
|
+
if debug is not None or verbose is not None:
|
675
|
+
logger.setLevel(original_level)
|
634
676
|
|
635
677
|
def _handle_completion_request(
|
636
678
|
self, request: LanguageModelRequestBuilder, parsed_messages: List[Any]
|
@@ -1038,6 +1080,8 @@ def create_language_model(
|
|
1038
1080
|
deployment_id: Optional[str] = None,
|
1039
1081
|
model_list: Optional[List[Any]] = None,
|
1040
1082
|
extra_headers: Optional[Dict[str, str]] = None,
|
1083
|
+
verbose: bool = False,
|
1084
|
+
debug: bool = False,
|
1041
1085
|
) -> LanguageModel:
|
1042
1086
|
"""Create a language model instance."""
|
1043
1087
|
return LanguageModel(
|
@@ -1049,4 +1093,6 @@ def create_language_model(
|
|
1049
1093
|
deployment_id=deployment_id,
|
1050
1094
|
model_list=model_list,
|
1051
1095
|
extra_headers=extra_headers,
|
1096
|
+
verbose=verbose,
|
1097
|
+
debug=debug,
|
1052
1098
|
)
|
@@ -3,6 +3,8 @@
|
|
3
3
|
Standalone functions for running language models with full parameter typing.
|
4
4
|
"""
|
5
5
|
|
6
|
+
import inspect
|
7
|
+
import functools
|
6
8
|
from typing import (
|
7
9
|
Any,
|
8
10
|
List,
|
@@ -39,6 +41,7 @@ from .model import LanguageModel
|
|
39
41
|
__all__ = [
|
40
42
|
"run_language_model",
|
41
43
|
"async_run_language_model",
|
44
|
+
"language_model_decorator",
|
42
45
|
]
|
43
46
|
|
44
47
|
|
@@ -275,6 +278,8 @@ def run_language_model(
|
|
275
278
|
messages: "LanguageModelMessages",
|
276
279
|
instructions: Optional[str] = None,
|
277
280
|
mock_response: Optional[bool] = None,
|
281
|
+
verbose: bool = False,
|
282
|
+
debug: bool = False,
|
278
283
|
**kwargs: Any,
|
279
284
|
) -> Union["LanguageModelResponse[Any]", "LanguageModelStream[Any]"]:
|
280
285
|
"""Run a language model request with full parameter support.
|
@@ -282,6 +287,8 @@ def run_language_model(
|
|
282
287
|
Args:
|
283
288
|
messages: The input messages/content for the request
|
284
289
|
instructions: Optional system instructions to prepend
|
290
|
+
verbose: If True, set logger to INFO level for detailed output
|
291
|
+
debug: If True, set logger to DEBUG level for maximum verbosity
|
285
292
|
**kwargs: All request parameters from LanguageModelRequest
|
286
293
|
|
287
294
|
Returns:
|
@@ -291,11 +298,16 @@ def run_language_model(
|
|
291
298
|
model = kwargs.pop("model", "openai/gpt-4o-mini")
|
292
299
|
|
293
300
|
# Create language model instance
|
294
|
-
language_model = LanguageModel(model=model)
|
301
|
+
language_model = LanguageModel(model=model, verbose=verbose, debug=debug)
|
295
302
|
|
296
303
|
# Forward to the instance method
|
297
304
|
return language_model.run(
|
298
|
-
messages,
|
305
|
+
messages,
|
306
|
+
instructions,
|
307
|
+
mock_response=mock_response,
|
308
|
+
verbose=verbose,
|
309
|
+
debug=debug,
|
310
|
+
**kwargs,
|
299
311
|
)
|
300
312
|
|
301
313
|
|
@@ -528,6 +540,8 @@ async def async_run_language_model(
|
|
528
540
|
messages: "LanguageModelMessages",
|
529
541
|
instructions: Optional[str] = None,
|
530
542
|
mock_response: Optional[bool] = None,
|
543
|
+
verbose: bool = False,
|
544
|
+
debug: bool = False,
|
531
545
|
**kwargs: Any,
|
532
546
|
) -> Union["LanguageModelResponse[Any]", "LanguageModelStream[Any]"]:
|
533
547
|
"""Run an async language model request with full parameter support.
|
@@ -535,6 +549,8 @@ async def async_run_language_model(
|
|
535
549
|
Args:
|
536
550
|
messages: The input messages/content for the request
|
537
551
|
instructions: Optional system instructions to prepend
|
552
|
+
verbose: If True, set logger to INFO level for detailed output
|
553
|
+
debug: If True, set logger to DEBUG level for maximum verbosity
|
538
554
|
**kwargs: All request parameters from LanguageModelRequest
|
539
555
|
|
540
556
|
Returns:
|
@@ -544,9 +560,319 @@ async def async_run_language_model(
|
|
544
560
|
model = kwargs.pop("model", "openai/gpt-4o-mini")
|
545
561
|
|
546
562
|
# Create language model instance
|
547
|
-
language_model = LanguageModel(model=model)
|
563
|
+
language_model = LanguageModel(model=model, verbose=verbose, debug=debug)
|
548
564
|
|
549
565
|
# Forward to the instance method
|
550
566
|
return await language_model.async_run(
|
551
|
-
messages,
|
567
|
+
messages,
|
568
|
+
instructions,
|
569
|
+
mock_response=mock_response,
|
570
|
+
verbose=verbose,
|
571
|
+
debug=debug,
|
572
|
+
**kwargs,
|
552
573
|
)
|
574
|
+
|
575
|
+
|
576
|
+
def language_model_decorator(
|
577
|
+
fn: Union[str, Callable, None] = None,
|
578
|
+
*,
|
579
|
+
# Model settings
|
580
|
+
model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
|
581
|
+
instructions: Optional[str] = None,
|
582
|
+
mock_response: Optional[bool] = None,
|
583
|
+
# Request settings
|
584
|
+
output_type: Optional[Type] = None,
|
585
|
+
stream: Optional[bool] = None,
|
586
|
+
instructor_mode: Optional["LanguageModelInstructorMode"] = None,
|
587
|
+
return_output: bool = True,
|
588
|
+
# LM settings
|
589
|
+
timeout: Optional[Union[float, str, "Timeout"]] = None,
|
590
|
+
temperature: Optional[float] = None,
|
591
|
+
top_p: Optional[float] = None,
|
592
|
+
max_tokens: Optional[int] = None,
|
593
|
+
presence_penalty: Optional[float] = None,
|
594
|
+
frequency_penalty: Optional[float] = None,
|
595
|
+
seed: Optional[int] = None,
|
596
|
+
user: Optional[str] = None,
|
597
|
+
# Advanced settings
|
598
|
+
response_format: Optional[Dict[str, Any]] = None,
|
599
|
+
stop: Optional[Union[str, List[str]]] = None,
|
600
|
+
logit_bias: Optional[Dict[int, float]] = None,
|
601
|
+
logprobs: Optional[bool] = None,
|
602
|
+
top_logprobs: Optional[int] = None,
|
603
|
+
thinking: Optional[Dict[str, Any]] = None,
|
604
|
+
web_search_options: Optional[Dict[str, Any]] = None,
|
605
|
+
# Tools settings
|
606
|
+
tools: Optional[List[Any]] = None,
|
607
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
608
|
+
parallel_tool_calls: Optional[bool] = None,
|
609
|
+
functions: Optional[List[Any]] = None,
|
610
|
+
function_call: Optional[str] = None,
|
611
|
+
verbose: bool = False,
|
612
|
+
debug: bool = False,
|
613
|
+
):
|
614
|
+
"""Decorator that converts a function into a language model call.
|
615
|
+
|
616
|
+
The function's parameters become the input to the LLM (converted to a string),
|
617
|
+
the function's return type annotation becomes the language model's output type,
|
618
|
+
and the function's docstring becomes the language model's instructions.
|
619
|
+
|
620
|
+
Works with both sync and async functions.
|
621
|
+
|
622
|
+
Can be used in multiple ways:
|
623
|
+
|
624
|
+
1. As a decorator with parameters:
|
625
|
+
@language_model_decorator(model="gpt-4", temperature=0.7)
|
626
|
+
def my_lm():
|
627
|
+
pass
|
628
|
+
|
629
|
+
2. As a decorator without parameters:
|
630
|
+
@language_model_decorator
|
631
|
+
def my_lm():
|
632
|
+
pass
|
633
|
+
|
634
|
+
3. As an inline function with model as first argument:
|
635
|
+
lm = language_model_decorator("gpt-4")
|
636
|
+
# Then use: decorated_func = lm(my_function)
|
637
|
+
|
638
|
+
4. As an inline function with all parameters:
|
639
|
+
lm = language_model_decorator(model="gpt-4", temperature=0.7)
|
640
|
+
# Then use: decorated_func = lm(my_function)
|
641
|
+
"""
|
642
|
+
# Handle different calling patterns
|
643
|
+
if callable(fn):
|
644
|
+
# Case: @language_model_decorator (no parentheses)
|
645
|
+
func = fn
|
646
|
+
actual_model = model or "openai/gpt-4o-mini"
|
647
|
+
return _create_language_model_wrapper(
|
648
|
+
func,
|
649
|
+
actual_model,
|
650
|
+
instructions,
|
651
|
+
mock_response,
|
652
|
+
output_type,
|
653
|
+
stream,
|
654
|
+
instructor_mode,
|
655
|
+
return_output,
|
656
|
+
timeout,
|
657
|
+
temperature,
|
658
|
+
top_p,
|
659
|
+
max_tokens,
|
660
|
+
presence_penalty,
|
661
|
+
frequency_penalty,
|
662
|
+
seed,
|
663
|
+
user,
|
664
|
+
response_format,
|
665
|
+
stop,
|
666
|
+
logit_bias,
|
667
|
+
logprobs,
|
668
|
+
top_logprobs,
|
669
|
+
thinking,
|
670
|
+
web_search_options,
|
671
|
+
tools,
|
672
|
+
tool_choice,
|
673
|
+
parallel_tool_calls,
|
674
|
+
functions,
|
675
|
+
function_call,
|
676
|
+
verbose,
|
677
|
+
debug,
|
678
|
+
)
|
679
|
+
elif isinstance(fn, str):
|
680
|
+
# Case: language_model_decorator("gpt-4") - first arg is model
|
681
|
+
actual_model = fn
|
682
|
+
else:
|
683
|
+
# Case: language_model_decorator() or language_model_decorator(model="gpt-4")
|
684
|
+
actual_model = model or "openai/gpt-4o-mini"
|
685
|
+
|
686
|
+
def decorator(func: Callable) -> Callable:
|
687
|
+
return _create_language_model_wrapper(
|
688
|
+
func,
|
689
|
+
actual_model,
|
690
|
+
instructions,
|
691
|
+
mock_response,
|
692
|
+
output_type,
|
693
|
+
stream,
|
694
|
+
instructor_mode,
|
695
|
+
return_output,
|
696
|
+
timeout,
|
697
|
+
temperature,
|
698
|
+
top_p,
|
699
|
+
max_tokens,
|
700
|
+
presence_penalty,
|
701
|
+
frequency_penalty,
|
702
|
+
seed,
|
703
|
+
user,
|
704
|
+
response_format,
|
705
|
+
stop,
|
706
|
+
logit_bias,
|
707
|
+
logprobs,
|
708
|
+
top_logprobs,
|
709
|
+
thinking,
|
710
|
+
web_search_options,
|
711
|
+
tools,
|
712
|
+
tool_choice,
|
713
|
+
parallel_tool_calls,
|
714
|
+
functions,
|
715
|
+
function_call,
|
716
|
+
verbose,
|
717
|
+
debug,
|
718
|
+
)
|
719
|
+
|
720
|
+
return decorator
|
721
|
+
|
722
|
+
|
723
|
+
def _create_language_model_wrapper(
|
724
|
+
func: Callable,
|
725
|
+
model: Union["LanguageModel", "LanguageModelName"],
|
726
|
+
instructions: Optional[str],
|
727
|
+
mock_response: Optional[bool],
|
728
|
+
output_type: Optional[Type],
|
729
|
+
stream: Optional[bool],
|
730
|
+
instructor_mode: Optional["LanguageModelInstructorMode"],
|
731
|
+
return_output: bool,
|
732
|
+
timeout: Optional[Union[float, str, "Timeout"]],
|
733
|
+
temperature: Optional[float],
|
734
|
+
top_p: Optional[float],
|
735
|
+
max_tokens: Optional[int],
|
736
|
+
presence_penalty: Optional[float],
|
737
|
+
frequency_penalty: Optional[float],
|
738
|
+
seed: Optional[int],
|
739
|
+
user: Optional[str],
|
740
|
+
response_format: Optional[Dict[str, Any]],
|
741
|
+
stop: Optional[Union[str, List[str]]],
|
742
|
+
logit_bias: Optional[Dict[int, float]],
|
743
|
+
logprobs: Optional[bool],
|
744
|
+
top_logprobs: Optional[int],
|
745
|
+
thinking: Optional[Dict[str, Any]],
|
746
|
+
web_search_options: Optional[Dict[str, Any]],
|
747
|
+
tools: Optional[List[Any]],
|
748
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]],
|
749
|
+
parallel_tool_calls: Optional[bool],
|
750
|
+
functions: Optional[List[Any]],
|
751
|
+
function_call: Optional[str],
|
752
|
+
verbose: bool,
|
753
|
+
debug: bool,
|
754
|
+
) -> Callable:
|
755
|
+
"""Helper function to create the actual language model wrapper."""
|
756
|
+
import inspect
|
757
|
+
import asyncio
|
758
|
+
from typing import get_type_hints
|
759
|
+
|
760
|
+
# Get function metadata
|
761
|
+
sig = inspect.signature(func)
|
762
|
+
type_hints = get_type_hints(func)
|
763
|
+
return_type = output_type or type_hints.get("return", str)
|
764
|
+
func_instructions = instructions or func.__doc__ or ""
|
765
|
+
|
766
|
+
# Check if function is async
|
767
|
+
is_async = asyncio.iscoroutinefunction(func)
|
768
|
+
|
769
|
+
if is_async:
|
770
|
+
|
771
|
+
@functools.wraps(func)
|
772
|
+
async def async_wrapper(*args, **kwargs):
|
773
|
+
# Convert function parameters to message string
|
774
|
+
bound_args = sig.bind(*args, **kwargs)
|
775
|
+
bound_args.apply_defaults()
|
776
|
+
|
777
|
+
# Create message from parameters
|
778
|
+
param_parts = []
|
779
|
+
for param_name, param_value in bound_args.arguments.items():
|
780
|
+
param_parts.append(f"{param_name}: {param_value}")
|
781
|
+
message = "\n".join(param_parts)
|
782
|
+
|
783
|
+
# Prepare parameters for language model call
|
784
|
+
lm_kwargs = {
|
785
|
+
"messages": message,
|
786
|
+
"instructions": func_instructions,
|
787
|
+
"model": model,
|
788
|
+
"mock_response": mock_response,
|
789
|
+
"stream": stream,
|
790
|
+
"instructor_mode": instructor_mode,
|
791
|
+
"timeout": timeout,
|
792
|
+
"temperature": temperature,
|
793
|
+
"top_p": top_p,
|
794
|
+
"max_tokens": max_tokens,
|
795
|
+
"presence_penalty": presence_penalty,
|
796
|
+
"frequency_penalty": frequency_penalty,
|
797
|
+
"seed": seed,
|
798
|
+
"user": user,
|
799
|
+
"response_format": response_format,
|
800
|
+
"stop": stop,
|
801
|
+
"logit_bias": logit_bias,
|
802
|
+
"logprobs": logprobs,
|
803
|
+
"top_logprobs": top_logprobs,
|
804
|
+
"thinking": thinking,
|
805
|
+
"web_search_options": web_search_options,
|
806
|
+
"tools": tools,
|
807
|
+
"tool_choice": tool_choice,
|
808
|
+
"parallel_tool_calls": parallel_tool_calls,
|
809
|
+
"functions": functions,
|
810
|
+
"function_call": function_call,
|
811
|
+
}
|
812
|
+
|
813
|
+
# Only add type parameter if it's not str (for structured output)
|
814
|
+
if return_type is not str:
|
815
|
+
lm_kwargs["type"] = return_type
|
816
|
+
|
817
|
+
# Run language model with extracted parameters
|
818
|
+
return await async_run_language_model(**lm_kwargs)
|
819
|
+
|
820
|
+
return async_wrapper
|
821
|
+
else:
|
822
|
+
|
823
|
+
@functools.wraps(func)
|
824
|
+
def sync_wrapper(*args, **kwargs):
|
825
|
+
# Convert function parameters to message string
|
826
|
+
bound_args = sig.bind(*args, **kwargs)
|
827
|
+
bound_args.apply_defaults()
|
828
|
+
|
829
|
+
# Create message from parameters
|
830
|
+
param_parts = []
|
831
|
+
for param_name, param_value in bound_args.arguments.items():
|
832
|
+
param_parts.append(f"{param_name}: {param_value}")
|
833
|
+
message = "\n".join(param_parts)
|
834
|
+
|
835
|
+
# Prepare parameters for language model call
|
836
|
+
lm_kwargs = {
|
837
|
+
"messages": message,
|
838
|
+
"instructions": func_instructions,
|
839
|
+
"model": model,
|
840
|
+
"mock_response": mock_response,
|
841
|
+
"stream": stream,
|
842
|
+
"instructor_mode": instructor_mode,
|
843
|
+
"timeout": timeout,
|
844
|
+
"temperature": temperature,
|
845
|
+
"top_p": top_p,
|
846
|
+
"max_tokens": max_tokens,
|
847
|
+
"presence_penalty": presence_penalty,
|
848
|
+
"frequency_penalty": frequency_penalty,
|
849
|
+
"seed": seed,
|
850
|
+
"user": user,
|
851
|
+
"response_format": response_format,
|
852
|
+
"stop": stop,
|
853
|
+
"logit_bias": logit_bias,
|
854
|
+
"logprobs": logprobs,
|
855
|
+
"top_logprobs": top_logprobs,
|
856
|
+
"thinking": thinking,
|
857
|
+
"web_search_options": web_search_options,
|
858
|
+
"tools": tools,
|
859
|
+
"tool_choice": tool_choice,
|
860
|
+
"parallel_tool_calls": parallel_tool_calls,
|
861
|
+
"functions": functions,
|
862
|
+
"function_call": function_call,
|
863
|
+
}
|
864
|
+
|
865
|
+
# Only add type parameter if it's not str (for structured output)
|
866
|
+
if return_type is not str:
|
867
|
+
lm_kwargs["type"] = return_type
|
868
|
+
|
869
|
+
# Run language model with extracted parameters
|
870
|
+
response = run_language_model(**lm_kwargs)
|
871
|
+
|
872
|
+
# Return just the output if return_output is True (default behavior)
|
873
|
+
if return_output:
|
874
|
+
return response.output
|
875
|
+
else:
|
876
|
+
return response
|
877
|
+
|
878
|
+
return sync_wrapper
|
@@ -47,7 +47,7 @@ class LanguageModelResponse(BaseGenAIModelResponse[T]):
|
|
47
47
|
In many cases with tool calling, message content is not present, in these cases
|
48
48
|
this field will **NOT** represent tool calls, and will be returned as `None`."""
|
49
49
|
|
50
|
-
completion:
|
50
|
+
completion: Any
|
51
51
|
"""The raw Chat Completion (`litellm.ModelResponse`) object returned by the
|
52
52
|
language model."""
|
53
53
|
|
hammad/genai/types/tools.py
CHANGED
@@ -75,7 +75,7 @@ def extract_tool_calls_from_response(
|
|
75
75
|
) -> List[Any]:
|
76
76
|
"""Extract tool calls from various response types."""
|
77
77
|
# ensure type is of agent or language model
|
78
|
-
if response.type not in ["language_model", "agent"]:
|
78
|
+
if response.type not in ["language_model", "agent", "graph"]:
|
79
79
|
raise ValueError(f"Response type {response.type} is not supported")
|
80
80
|
|
81
81
|
# Handle LanguageModelResponse
|
hammad/logging/logger.py
CHANGED
@@ -534,6 +534,47 @@ class Logger:
|
|
534
534
|
|
535
535
|
return JSONFormatter()
|
536
536
|
|
537
|
+
def setLevel(
|
538
|
+
self,
|
539
|
+
level: Union[LoggerLevelName, int],
|
540
|
+
) -> None:
|
541
|
+
"""Set the logging level."""
|
542
|
+
# Handle integer levels by converting to string names
|
543
|
+
if isinstance(level, int):
|
544
|
+
# Map standard logging levels to their names
|
545
|
+
int_to_name = {
|
546
|
+
_logging.DEBUG: "debug",
|
547
|
+
_logging.INFO: "info",
|
548
|
+
_logging.WARNING: "warning",
|
549
|
+
_logging.ERROR: "error",
|
550
|
+
_logging.CRITICAL: "critical",
|
551
|
+
}
|
552
|
+
level_str = int_to_name.get(level, "warning")
|
553
|
+
else:
|
554
|
+
level_str = level
|
555
|
+
|
556
|
+
self._user_level = level_str
|
557
|
+
|
558
|
+
# Standard level mapping
|
559
|
+
level_map = {
|
560
|
+
"debug": _logging.DEBUG,
|
561
|
+
"info": _logging.INFO,
|
562
|
+
"warning": _logging.WARNING,
|
563
|
+
"error": _logging.ERROR,
|
564
|
+
"critical": _logging.CRITICAL,
|
565
|
+
}
|
566
|
+
|
567
|
+
# Check custom levels first
|
568
|
+
if level_str.lower() in self._custom_levels:
|
569
|
+
log_level = self._custom_levels[level_str.lower()]
|
570
|
+
else:
|
571
|
+
log_level = level_map.get(level_str.lower(), _logging.WARNING)
|
572
|
+
|
573
|
+
# Set the integer level on the logger and handlers
|
574
|
+
self._logger.setLevel(log_level)
|
575
|
+
for handler in self._logger.handlers:
|
576
|
+
handler.setLevel(log_level)
|
577
|
+
|
537
578
|
def add_level(
|
538
579
|
self, name: str, value: int, style: Optional[LoggerLevelSettings] = None
|
539
580
|
) -> None:
|
@@ -570,9 +611,23 @@ class Logger:
|
|
570
611
|
return self._user_level
|
571
612
|
|
572
613
|
@level.setter
|
573
|
-
def level(self, value: str) -> None:
|
614
|
+
def level(self, value: Union[str, int]) -> None:
|
574
615
|
"""Set the logging level."""
|
575
|
-
|
616
|
+
# Handle integer levels by converting to string names
|
617
|
+
if isinstance(value, int):
|
618
|
+
# Map standard logging levels to their names
|
619
|
+
int_to_name = {
|
620
|
+
_logging.DEBUG: "debug",
|
621
|
+
_logging.INFO: "info",
|
622
|
+
_logging.WARNING: "warning",
|
623
|
+
_logging.ERROR: "error",
|
624
|
+
_logging.CRITICAL: "critical",
|
625
|
+
}
|
626
|
+
value_str = int_to_name.get(value, "warning")
|
627
|
+
else:
|
628
|
+
value_str = value
|
629
|
+
|
630
|
+
self._user_level = value_str
|
576
631
|
|
577
632
|
# Standard level mapping
|
578
633
|
level_map = {
|
@@ -584,10 +639,10 @@ class Logger:
|
|
584
639
|
}
|
585
640
|
|
586
641
|
# Check custom levels
|
587
|
-
if
|
588
|
-
log_level = self._custom_levels[
|
642
|
+
if value_str.lower() in self._custom_levels:
|
643
|
+
log_level = self._custom_levels[value_str.lower()]
|
589
644
|
else:
|
590
|
-
log_level = level_map.get(
|
645
|
+
log_level = level_map.get(value_str.lower(), _logging.WARNING)
|
591
646
|
|
592
647
|
# Update logger level
|
593
648
|
self._logger.setLevel(log_level)
|
hammad/mcp/__init__.py
CHANGED
@@ -6,6 +6,7 @@ from typing import TYPE_CHECKING
|
|
6
6
|
from .._internal import create_getattr_importer
|
7
7
|
|
8
8
|
if TYPE_CHECKING:
|
9
|
+
from mcp.server.fastmcp import FastMCP
|
9
10
|
from .client.client import (
|
10
11
|
convert_mcp_tool_to_openai_tool,
|
11
12
|
MCPClient,
|
@@ -26,6 +27,8 @@ if TYPE_CHECKING:
|
|
26
27
|
|
27
28
|
|
28
29
|
__all__ = (
|
30
|
+
# fastmcp
|
31
|
+
"FastMCP",
|
29
32
|
# hammad.mcp.client
|
30
33
|
"MCPClient",
|
31
34
|
"MCPClientService",
|