hammad-python 0.0.24__py3-none-any.whl → 0.0.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -29,7 +29,7 @@ from ..types.history import History
29
29
  from ..types.base import BaseGenAIModelStream
30
30
 
31
31
  if TYPE_CHECKING:
32
- from .base import BaseGraph
32
+ from .base import BaseGraph, SelectionStrategy
33
33
 
34
34
  __all__ = [
35
35
  "GraphState",
@@ -68,7 +68,7 @@ class ActionSettings:
68
68
  start: bool = False
69
69
  terminates: bool = False
70
70
  xml: Optional[str] = None
71
- next: Optional[Union[str, List[str]]] = None
71
+ next: Optional[Union[str, List[str], "SelectionStrategy"]] = None
72
72
  read_history: bool = False
73
73
  persist_history: bool = False
74
74
  condition: Optional[str] = None
@@ -9,7 +9,11 @@ if TYPE_CHECKING:
9
9
  LanguageModel,
10
10
  create_language_model,
11
11
  )
12
- from .run import run_language_model, async_run_language_model
12
+ from .run import (
13
+ run_language_model,
14
+ async_run_language_model,
15
+ language_model_decorator,
16
+ )
13
17
  from .types.language_model_instructor_mode import LanguageModelInstructorMode
14
18
  from .types.language_model_messages import LanguageModelMessages
15
19
  from .types.language_model_name import LanguageModelName
@@ -26,6 +30,7 @@ __all__ = [
26
30
  # hammad.genai.models.language.run
27
31
  "run_language_model",
28
32
  "async_run_language_model",
33
+ "language_model_decorator",
29
34
  # hammad.genai.models.language.types.language_model_instructor_mode
30
35
  "LanguageModelInstructorMode",
31
36
  # hammad.genai.models.language.types.language_model_messages
@@ -3,6 +3,8 @@
3
3
  Standalone functions for running language models with full parameter typing.
4
4
  """
5
5
 
6
+ import inspect
7
+ import functools
6
8
  from typing import (
7
9
  Any,
8
10
  List,
@@ -39,6 +41,7 @@ from .model import LanguageModel
39
41
  __all__ = [
40
42
  "run_language_model",
41
43
  "async_run_language_model",
44
+ "language_model_decorator",
42
45
  ]
43
46
 
44
47
 
@@ -568,3 +571,308 @@ async def async_run_language_model(
568
571
  debug=debug,
569
572
  **kwargs,
570
573
  )
574
+
575
+
576
+ def language_model_decorator(
577
+ fn: Union[str, Callable, None] = None,
578
+ *,
579
+ # Model settings
580
+ model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
581
+ instructions: Optional[str] = None,
582
+ mock_response: Optional[bool] = None,
583
+ # Request settings
584
+ output_type: Optional[Type] = None,
585
+ stream: Optional[bool] = None,
586
+ instructor_mode: Optional["LanguageModelInstructorMode"] = None,
587
+ return_output: bool = True,
588
+ # LM settings
589
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
590
+ temperature: Optional[float] = None,
591
+ top_p: Optional[float] = None,
592
+ max_tokens: Optional[int] = None,
593
+ presence_penalty: Optional[float] = None,
594
+ frequency_penalty: Optional[float] = None,
595
+ seed: Optional[int] = None,
596
+ user: Optional[str] = None,
597
+ # Advanced settings
598
+ response_format: Optional[Dict[str, Any]] = None,
599
+ stop: Optional[Union[str, List[str]]] = None,
600
+ logit_bias: Optional[Dict[int, float]] = None,
601
+ logprobs: Optional[bool] = None,
602
+ top_logprobs: Optional[int] = None,
603
+ thinking: Optional[Dict[str, Any]] = None,
604
+ web_search_options: Optional[Dict[str, Any]] = None,
605
+ # Tools settings
606
+ tools: Optional[List[Any]] = None,
607
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
608
+ parallel_tool_calls: Optional[bool] = None,
609
+ functions: Optional[List[Any]] = None,
610
+ function_call: Optional[str] = None,
611
+ verbose: bool = False,
612
+ debug: bool = False,
613
+ ):
614
+ """Decorator that converts a function into a language model call.
615
+
616
+ The function's parameters become the input to the LLM (converted to a string),
617
+ the function's return type annotation becomes the language model's output type,
618
+ and the function's docstring becomes the language model's instructions.
619
+
620
+ Works with both sync and async functions.
621
+
622
+ Can be used in multiple ways:
623
+
624
+ 1. As a decorator with parameters:
625
+ @language_model_decorator(model="gpt-4", temperature=0.7)
626
+ def my_lm():
627
+ pass
628
+
629
+ 2. As a decorator without parameters:
630
+ @language_model_decorator
631
+ def my_lm():
632
+ pass
633
+
634
+ 3. As an inline function with model as first argument:
635
+ lm = language_model_decorator("gpt-4")
636
+ # Then use: decorated_func = lm(my_function)
637
+
638
+ 4. As an inline function with all parameters:
639
+ lm = language_model_decorator(model="gpt-4", temperature=0.7)
640
+ # Then use: decorated_func = lm(my_function)
641
+ """
642
+ # Handle different calling patterns
643
+ if callable(fn):
644
+ # Case: @language_model_decorator (no parentheses)
645
+ func = fn
646
+ actual_model = model or "openai/gpt-4o-mini"
647
+ return _create_language_model_wrapper(
648
+ func,
649
+ actual_model,
650
+ instructions,
651
+ mock_response,
652
+ output_type,
653
+ stream,
654
+ instructor_mode,
655
+ return_output,
656
+ timeout,
657
+ temperature,
658
+ top_p,
659
+ max_tokens,
660
+ presence_penalty,
661
+ frequency_penalty,
662
+ seed,
663
+ user,
664
+ response_format,
665
+ stop,
666
+ logit_bias,
667
+ logprobs,
668
+ top_logprobs,
669
+ thinking,
670
+ web_search_options,
671
+ tools,
672
+ tool_choice,
673
+ parallel_tool_calls,
674
+ functions,
675
+ function_call,
676
+ verbose,
677
+ debug,
678
+ )
679
+ elif isinstance(fn, str):
680
+ # Case: language_model_decorator("gpt-4") - first arg is model
681
+ actual_model = fn
682
+ else:
683
+ # Case: language_model_decorator() or language_model_decorator(model="gpt-4")
684
+ actual_model = model or "openai/gpt-4o-mini"
685
+
686
+ def decorator(func: Callable) -> Callable:
687
+ return _create_language_model_wrapper(
688
+ func,
689
+ actual_model,
690
+ instructions,
691
+ mock_response,
692
+ output_type,
693
+ stream,
694
+ instructor_mode,
695
+ return_output,
696
+ timeout,
697
+ temperature,
698
+ top_p,
699
+ max_tokens,
700
+ presence_penalty,
701
+ frequency_penalty,
702
+ seed,
703
+ user,
704
+ response_format,
705
+ stop,
706
+ logit_bias,
707
+ logprobs,
708
+ top_logprobs,
709
+ thinking,
710
+ web_search_options,
711
+ tools,
712
+ tool_choice,
713
+ parallel_tool_calls,
714
+ functions,
715
+ function_call,
716
+ verbose,
717
+ debug,
718
+ )
719
+
720
+ return decorator
721
+
722
+
723
+ def _create_language_model_wrapper(
724
+ func: Callable,
725
+ model: Union["LanguageModel", "LanguageModelName"],
726
+ instructions: Optional[str],
727
+ mock_response: Optional[bool],
728
+ output_type: Optional[Type],
729
+ stream: Optional[bool],
730
+ instructor_mode: Optional["LanguageModelInstructorMode"],
731
+ return_output: bool,
732
+ timeout: Optional[Union[float, str, "Timeout"]],
733
+ temperature: Optional[float],
734
+ top_p: Optional[float],
735
+ max_tokens: Optional[int],
736
+ presence_penalty: Optional[float],
737
+ frequency_penalty: Optional[float],
738
+ seed: Optional[int],
739
+ user: Optional[str],
740
+ response_format: Optional[Dict[str, Any]],
741
+ stop: Optional[Union[str, List[str]]],
742
+ logit_bias: Optional[Dict[int, float]],
743
+ logprobs: Optional[bool],
744
+ top_logprobs: Optional[int],
745
+ thinking: Optional[Dict[str, Any]],
746
+ web_search_options: Optional[Dict[str, Any]],
747
+ tools: Optional[List[Any]],
748
+ tool_choice: Optional[Union[str, Dict[str, Any]]],
749
+ parallel_tool_calls: Optional[bool],
750
+ functions: Optional[List[Any]],
751
+ function_call: Optional[str],
752
+ verbose: bool,
753
+ debug: bool,
754
+ ) -> Callable:
755
+ """Helper function to create the actual language model wrapper."""
756
+ import inspect
757
+ import asyncio
758
+ from typing import get_type_hints
759
+
760
+ # Get function metadata
761
+ sig = inspect.signature(func)
762
+ type_hints = get_type_hints(func)
763
+ return_type = output_type or type_hints.get("return", str)
764
+ func_instructions = instructions or func.__doc__ or ""
765
+
766
+ # Check if function is async
767
+ is_async = asyncio.iscoroutinefunction(func)
768
+
769
+ if is_async:
770
+
771
+ @functools.wraps(func)
772
+ async def async_wrapper(*args, **kwargs):
773
+ # Convert function parameters to message string
774
+ bound_args = sig.bind(*args, **kwargs)
775
+ bound_args.apply_defaults()
776
+
777
+ # Create message from parameters
778
+ param_parts = []
779
+ for param_name, param_value in bound_args.arguments.items():
780
+ param_parts.append(f"{param_name}: {param_value}")
781
+ message = "\n".join(param_parts)
782
+
783
+ # Prepare parameters for language model call
784
+ lm_kwargs = {
785
+ "messages": message,
786
+ "instructions": func_instructions,
787
+ "model": model,
788
+ "mock_response": mock_response,
789
+ "stream": stream,
790
+ "instructor_mode": instructor_mode,
791
+ "timeout": timeout,
792
+ "temperature": temperature,
793
+ "top_p": top_p,
794
+ "max_tokens": max_tokens,
795
+ "presence_penalty": presence_penalty,
796
+ "frequency_penalty": frequency_penalty,
797
+ "seed": seed,
798
+ "user": user,
799
+ "response_format": response_format,
800
+ "stop": stop,
801
+ "logit_bias": logit_bias,
802
+ "logprobs": logprobs,
803
+ "top_logprobs": top_logprobs,
804
+ "thinking": thinking,
805
+ "web_search_options": web_search_options,
806
+ "tools": tools,
807
+ "tool_choice": tool_choice,
808
+ "parallel_tool_calls": parallel_tool_calls,
809
+ "functions": functions,
810
+ "function_call": function_call,
811
+ }
812
+
813
+ # Only add type parameter if it's not str (for structured output)
814
+ if return_type is not str:
815
+ lm_kwargs["type"] = return_type
816
+
817
+ # Run language model with extracted parameters
818
+ return await async_run_language_model(**lm_kwargs)
819
+
820
+ return async_wrapper
821
+ else:
822
+
823
+ @functools.wraps(func)
824
+ def sync_wrapper(*args, **kwargs):
825
+ # Convert function parameters to message string
826
+ bound_args = sig.bind(*args, **kwargs)
827
+ bound_args.apply_defaults()
828
+
829
+ # Create message from parameters
830
+ param_parts = []
831
+ for param_name, param_value in bound_args.arguments.items():
832
+ param_parts.append(f"{param_name}: {param_value}")
833
+ message = "\n".join(param_parts)
834
+
835
+ # Prepare parameters for language model call
836
+ lm_kwargs = {
837
+ "messages": message,
838
+ "instructions": func_instructions,
839
+ "model": model,
840
+ "mock_response": mock_response,
841
+ "stream": stream,
842
+ "instructor_mode": instructor_mode,
843
+ "timeout": timeout,
844
+ "temperature": temperature,
845
+ "top_p": top_p,
846
+ "max_tokens": max_tokens,
847
+ "presence_penalty": presence_penalty,
848
+ "frequency_penalty": frequency_penalty,
849
+ "seed": seed,
850
+ "user": user,
851
+ "response_format": response_format,
852
+ "stop": stop,
853
+ "logit_bias": logit_bias,
854
+ "logprobs": logprobs,
855
+ "top_logprobs": top_logprobs,
856
+ "thinking": thinking,
857
+ "web_search_options": web_search_options,
858
+ "tools": tools,
859
+ "tool_choice": tool_choice,
860
+ "parallel_tool_calls": parallel_tool_calls,
861
+ "functions": functions,
862
+ "function_call": function_call,
863
+ }
864
+
865
+ # Only add type parameter if it's not str (for structured output)
866
+ if return_type is not str:
867
+ lm_kwargs["type"] = return_type
868
+
869
+ # Run language model with extracted parameters
870
+ response = run_language_model(**lm_kwargs)
871
+
872
+ # Return just the output if return_output is True (default behavior)
873
+ if return_output:
874
+ return response.output
875
+ else:
876
+ return response
877
+
878
+ return sync_wrapper
@@ -13,6 +13,7 @@ from typing import (
13
13
  )
14
14
 
15
15
  from .....cache import cached
16
+ from .....typing import get_type_description
16
17
 
17
18
  from ...model_provider import litellm
18
19
  from ....types.base import BaseGenAIModelResponse
@@ -210,6 +211,7 @@ class LanguageModelResponse(BaseGenAIModelResponse[T]):
210
211
  output += f"\n{self.completion}"
211
212
 
212
213
  output += f"\n\n>>> Model: {self.model}"
214
+ output += f"\n>>> Type: {get_type_description(type(self.output))}"
213
215
  output += f"\n>>> Tool Calls: {len(self.tool_calls) if self.tool_calls else 0}"
214
216
 
215
217
  return output
hammad/logging/logger.py CHANGED
@@ -539,10 +539,41 @@ class Logger:
539
539
  level: Union[LoggerLevelName, int],
540
540
  ) -> None:
541
541
  """Set the logging level."""
542
- self._user_level = level
543
- self._logger.setLevel(level)
542
+ # Handle integer levels by converting to string names
543
+ if isinstance(level, int):
544
+ # Map standard logging levels to their names
545
+ int_to_name = {
546
+ _logging.DEBUG: "debug",
547
+ _logging.INFO: "info",
548
+ _logging.WARNING: "warning",
549
+ _logging.ERROR: "error",
550
+ _logging.CRITICAL: "critical",
551
+ }
552
+ level_str = int_to_name.get(level, "warning")
553
+ else:
554
+ level_str = level
555
+
556
+ self._user_level = level_str
557
+
558
+ # Standard level mapping
559
+ level_map = {
560
+ "debug": _logging.DEBUG,
561
+ "info": _logging.INFO,
562
+ "warning": _logging.WARNING,
563
+ "error": _logging.ERROR,
564
+ "critical": _logging.CRITICAL,
565
+ }
566
+
567
+ # Check custom levels first
568
+ if level_str.lower() in self._custom_levels:
569
+ log_level = self._custom_levels[level_str.lower()]
570
+ else:
571
+ log_level = level_map.get(level_str.lower(), _logging.WARNING)
572
+
573
+ # Set the integer level on the logger and handlers
574
+ self._logger.setLevel(log_level)
544
575
  for handler in self._logger.handlers:
545
- handler.setLevel(level)
576
+ handler.setLevel(log_level)
546
577
 
547
578
  def add_level(
548
579
  self, name: str, value: int, style: Optional[LoggerLevelSettings] = None
@@ -580,9 +611,23 @@ class Logger:
580
611
  return self._user_level
581
612
 
582
613
  @level.setter
583
- def level(self, value: str) -> None:
614
+ def level(self, value: Union[str, int]) -> None:
584
615
  """Set the logging level."""
585
- self._user_level = value
616
+ # Handle integer levels by converting to string names
617
+ if isinstance(value, int):
618
+ # Map standard logging levels to their names
619
+ int_to_name = {
620
+ _logging.DEBUG: "debug",
621
+ _logging.INFO: "info",
622
+ _logging.WARNING: "warning",
623
+ _logging.ERROR: "error",
624
+ _logging.CRITICAL: "critical",
625
+ }
626
+ value_str = int_to_name.get(value, "warning")
627
+ else:
628
+ value_str = value
629
+
630
+ self._user_level = value_str
586
631
 
587
632
  # Standard level mapping
588
633
  level_map = {
@@ -594,10 +639,10 @@ class Logger:
594
639
  }
595
640
 
596
641
  # Check custom levels
597
- if value.lower() in self._custom_levels:
598
- log_level = self._custom_levels[value.lower()]
642
+ if value_str.lower() in self._custom_levels:
643
+ log_level = self._custom_levels[value_str.lower()]
599
644
  else:
600
- log_level = level_map.get(value.lower(), _logging.WARNING)
645
+ log_level = level_map.get(value_str.lower(), _logging.WARNING)
601
646
 
602
647
  # Update logger level
603
648
  self._logger.setLevel(log_level)
hammad/mcp/__init__.py CHANGED
@@ -6,6 +6,7 @@ from typing import TYPE_CHECKING
6
6
  from .._internal import create_getattr_importer
7
7
 
8
8
  if TYPE_CHECKING:
9
+ from mcp.server.fastmcp import FastMCP
9
10
  from .client.client import (
10
11
  convert_mcp_tool_to_openai_tool,
11
12
  MCPClient,
@@ -26,6 +27,8 @@ if TYPE_CHECKING:
26
27
 
27
28
 
28
29
  __all__ = (
30
+ # fastmcp
31
+ "FastMCP",
29
32
  # hammad.mcp.client
30
33
  "MCPClient",
31
34
  "MCPClientService",