hammad-python 0.0.23__py3-none-any.whl → 0.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. hammad/__init__.py +62 -14
  2. hammad/_main.py +226 -0
  3. hammad/cli/__init__.py +0 -2
  4. hammad/cli/plugins.py +3 -1
  5. hammad/data/__init__.py +4 -5
  6. hammad/data/types/__init__.py +37 -1
  7. hammad/data/types/file.py +74 -1
  8. hammad/data/types/multimodal/__init__.py +14 -2
  9. hammad/data/types/multimodal/audio.py +106 -2
  10. hammad/data/types/multimodal/image.py +104 -2
  11. hammad/data/types/text.py +242 -0
  12. hammad/genai/__init__.py +73 -0
  13. hammad/genai/a2a/__init__.py +32 -0
  14. hammad/genai/a2a/workers.py +552 -0
  15. hammad/genai/agents/__init__.py +8 -0
  16. hammad/genai/agents/agent.py +747 -214
  17. hammad/genai/agents/run.py +421 -12
  18. hammad/genai/agents/types/agent_response.py +2 -1
  19. hammad/genai/graphs/__init__.py +125 -0
  20. hammad/genai/graphs/base.py +1786 -0
  21. hammad/genai/graphs/plugins.py +316 -0
  22. hammad/genai/graphs/types.py +638 -0
  23. hammad/genai/models/language/__init__.py +6 -1
  24. hammad/genai/models/language/model.py +46 -0
  25. hammad/genai/models/language/run.py +330 -4
  26. hammad/genai/models/language/types/language_model_response.py +1 -1
  27. hammad/genai/types/tools.py +1 -1
  28. hammad/logging/logger.py +60 -5
  29. hammad/mcp/__init__.py +3 -0
  30. hammad/types.py +288 -0
  31. {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/METADATA +6 -1
  32. {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/RECORD +34 -32
  33. hammad/_main/__init__.py +0 -4
  34. hammad/_main/_fn.py +0 -20
  35. hammad/_main/_new.py +0 -52
  36. hammad/_main/_run.py +0 -50
  37. hammad/_main/_to.py +0 -19
  38. hammad/cli/_runner.py +0 -265
  39. {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/WHEEL +0 -0
  40. {hammad_python-0.0.23.dist-info → hammad_python-0.0.25.dist-info}/licenses/LICENSE +0 -0
@@ -3,6 +3,7 @@
3
3
  Standalone functions for running agents with full parameter typing.
4
4
  """
5
5
 
6
+ import functools
6
7
  from typing import (
7
8
  Any,
8
9
  Callable,
@@ -40,6 +41,7 @@ __all__ = [
40
41
  "async_run_agent",
41
42
  "run_agent_iter",
42
43
  "async_run_agent_iter",
44
+ "agent_decorator",
43
45
  ]
44
46
 
45
47
  T = TypeVar("T")
@@ -72,6 +74,9 @@ def run_agent(
72
74
  model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
73
75
  max_steps: Optional[int] = None,
74
76
  instructor_mode: Optional["LanguageModelInstructorMode"] = None,
77
+ # End strategy
78
+ end_strategy: Optional[Literal["tool"]] = None,
79
+ end_tool: Optional[Callable] = None,
75
80
  # LM settings
76
81
  timeout: Optional[Union[float, str, "Timeout"]] = None,
77
82
  temperature: Optional[float] = None,
@@ -81,6 +86,8 @@ def run_agent(
81
86
  frequency_penalty: Optional[float] = None,
82
87
  seed: Optional[int] = None,
83
88
  user: Optional[str] = None,
89
+ verbose: bool = False,
90
+ debug: bool = False,
84
91
  ) -> "AgentResponse[str]": ...
85
92
 
86
93
 
@@ -111,6 +118,9 @@ def run_agent(
111
118
  model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
112
119
  max_steps: Optional[int] = None,
113
120
  instructor_mode: Optional["LanguageModelInstructorMode"] = None,
121
+ # End strategy
122
+ end_strategy: Optional[Literal["tool"]] = None,
123
+ end_tool: Optional[Callable] = None,
114
124
  # LM settings
115
125
  timeout: Optional[Union[float, str, "Timeout"]] = None,
116
126
  temperature: Optional[float] = None,
@@ -120,10 +130,14 @@ def run_agent(
120
130
  frequency_penalty: Optional[float] = None,
121
131
  seed: Optional[int] = None,
122
132
  user: Optional[str] = None,
133
+ verbose: bool = False,
134
+ debug: bool = False,
123
135
  ) -> "AgentResponse[T]": ...
124
136
 
125
137
 
126
- def run_agent(messages: "AgentMessages", **kwargs: Any) -> "AgentResponse[Any]":
138
+ def run_agent(
139
+ messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
140
+ ) -> "AgentResponse[Any]":
127
141
  """Runs this agent and returns a final agent response or stream.
128
142
 
129
143
  You can override defaults assigned to this agent from this function directly.
@@ -145,6 +159,8 @@ def run_agent(messages: "AgentMessages", **kwargs: Any) -> "AgentResponse[Any]":
145
159
  stream: Whether to return a stream instead of a final response.
146
160
  - If True, returns AgentStream for real-time processing
147
161
  - If False, returns complete AgentResponse
162
+ verbose: If True, set logger to INFO level for detailed output
163
+ debug: If True, set logger to DEBUG level for maximum verbosity
148
164
  **kwargs: Additional keyword arguments passed to the language model.
149
165
  - Examples: temperature=0.7, top_p=0.9, presence_penalty=0.1
150
166
 
@@ -195,8 +211,20 @@ def run_agent(messages: "AgentMessages", **kwargs: Any) -> "AgentResponse[Any]":
195
211
  ... context=context
196
212
  ... )
197
213
  """
198
- agent = Agent(**kwargs)
199
- return agent.run(messages, **kwargs)
214
+ # Separate agent constructor parameters from run parameters
215
+ agent_constructor_params = {
216
+ k: v
217
+ for k, v in kwargs.items()
218
+ if k in ["name", "instructions", "description", "tools", "settings", "model"]
219
+ }
220
+ agent_run_params = {
221
+ k: v
222
+ for k, v in kwargs.items()
223
+ if k not in ["name", "instructions", "description", "tools", "settings"]
224
+ }
225
+
226
+ agent = Agent(verbose=verbose, debug=debug, **agent_constructor_params)
227
+ return agent.run(messages, verbose=verbose, debug=debug, **agent_run_params)
200
228
 
201
229
 
202
230
  # Overloads for async_run_agent
@@ -226,6 +254,9 @@ async def async_run_agent(
226
254
  model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
227
255
  max_steps: Optional[int] = None,
228
256
  instructor_mode: Optional["LanguageModelInstructorMode"] = None,
257
+ # End strategy
258
+ end_strategy: Optional[Literal["tool"]] = None,
259
+ end_tool: Optional[Callable] = None,
229
260
  # LM settings
230
261
  timeout: Optional[Union[float, str, "Timeout"]] = None,
231
262
  temperature: Optional[float] = None,
@@ -235,6 +266,8 @@ async def async_run_agent(
235
266
  frequency_penalty: Optional[float] = None,
236
267
  seed: Optional[int] = None,
237
268
  user: Optional[str] = None,
269
+ verbose: bool = False,
270
+ debug: bool = False,
238
271
  ) -> "AgentResponse[str]": ...
239
272
 
240
273
 
@@ -265,6 +298,9 @@ async def async_run_agent(
265
298
  model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
266
299
  max_steps: Optional[int] = None,
267
300
  instructor_mode: Optional["LanguageModelInstructorMode"] = None,
301
+ # End strategy
302
+ end_strategy: Optional[Literal["tool"]] = None,
303
+ end_tool: Optional[Callable] = None,
268
304
  # LM settings
269
305
  timeout: Optional[Union[float, str, "Timeout"]] = None,
270
306
  temperature: Optional[float] = None,
@@ -274,11 +310,13 @@ async def async_run_agent(
274
310
  frequency_penalty: Optional[float] = None,
275
311
  seed: Optional[int] = None,
276
312
  user: Optional[str] = None,
313
+ verbose: bool = False,
314
+ debug: bool = False,
277
315
  ) -> "AgentResponse[T]": ...
278
316
 
279
317
 
280
318
  async def async_run_agent(
281
- messages: "AgentMessages", **kwargs: Any
319
+ messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
282
320
  ) -> "AgentResponse[Any]":
283
321
  """Runs this agent asynchronously and returns a final agent response.
284
322
 
@@ -347,8 +385,22 @@ async def async_run_agent(
347
385
  ... )
348
386
  ... return response.output
349
387
  """
350
- agent = Agent(**kwargs)
351
- return await agent.async_run(messages, **kwargs)
388
+ # Separate agent constructor parameters from run parameters
389
+ agent_constructor_params = {
390
+ k: v
391
+ for k, v in kwargs.items()
392
+ if k in ["name", "instructions", "description", "tools", "settings", "model"]
393
+ }
394
+ agent_run_params = {
395
+ k: v
396
+ for k, v in kwargs.items()
397
+ if k not in ["name", "instructions", "description", "tools", "settings"]
398
+ }
399
+
400
+ agent = Agent(verbose=verbose, debug=debug, **agent_constructor_params)
401
+ return await agent.async_run(
402
+ messages, verbose=verbose, debug=debug, **agent_run_params
403
+ )
352
404
 
353
405
 
354
406
  # Overloads for run_agent_iter
@@ -378,6 +430,9 @@ def run_agent_iter(
378
430
  model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
379
431
  max_steps: Optional[int] = None,
380
432
  instructor_mode: Optional["LanguageModelInstructorMode"] = None,
433
+ # End strategy
434
+ end_strategy: Optional[Literal["tool"]] = None,
435
+ end_tool: Optional[Callable] = None,
381
436
  # LM settings
382
437
  timeout: Optional[Union[float, str, "Timeout"]] = None,
383
438
  temperature: Optional[float] = None,
@@ -417,6 +472,9 @@ def run_agent_iter(
417
472
  model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
418
473
  max_steps: Optional[int] = None,
419
474
  instructor_mode: Optional["LanguageModelInstructorMode"] = None,
475
+ # End strategy
476
+ end_strategy: Optional[Literal["tool"]] = None,
477
+ end_tool: Optional[Callable] = None,
420
478
  # LM settings
421
479
  timeout: Optional[Union[float, str, "Timeout"]] = None,
422
480
  temperature: Optional[float] = None,
@@ -429,7 +487,9 @@ def run_agent_iter(
429
487
  ) -> "AgentStream[T]": ...
430
488
 
431
489
 
432
- def run_agent_iter(messages: "AgentMessages", **kwargs: Any) -> "AgentStream[Any]":
490
+ def run_agent_iter(
491
+ messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
492
+ ) -> "AgentStream[Any]":
433
493
  """Iterate over agent steps, yielding each step response.
434
494
 
435
495
  You can override defaults assigned to this agent from this function directly.
@@ -513,8 +573,22 @@ def run_agent_iter(messages: "AgentMessages", **kwargs: Any) -> "AgentStream[Any
513
573
  ... except Exception as e:
514
574
  ... print(f"Stream error: {e}")
515
575
  """
516
- agent = Agent(**kwargs)
517
- return agent.run(messages, stream=True, **kwargs)
576
+ # Separate agent constructor parameters from run parameters
577
+ agent_constructor_params = {
578
+ k: v
579
+ for k, v in kwargs.items()
580
+ if k in ["name", "instructions", "description", "tools", "settings", "model"]
581
+ }
582
+ agent_run_params = {
583
+ k: v
584
+ for k, v in kwargs.items()
585
+ if k not in ["name", "instructions", "description", "tools", "settings"]
586
+ }
587
+
588
+ agent = Agent(verbose=verbose, debug=debug, **agent_constructor_params)
589
+ return agent.run(
590
+ messages, stream=True, verbose=verbose, debug=debug, **agent_run_params
591
+ )
518
592
 
519
593
 
520
594
  # Overloads for async_run_agent_iter
@@ -544,6 +618,9 @@ def async_run_agent_iter(
544
618
  model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
545
619
  max_steps: Optional[int] = None,
546
620
  instructor_mode: Optional["LanguageModelInstructorMode"] = None,
621
+ # End strategy
622
+ end_strategy: Optional[Literal["tool"]] = None,
623
+ end_tool: Optional[Callable] = None,
547
624
  # LM settings
548
625
  timeout: Optional[Union[float, str, "Timeout"]] = None,
549
626
  temperature: Optional[float] = None,
@@ -583,6 +660,9 @@ def async_run_agent_iter(
583
660
  model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
584
661
  max_steps: Optional[int] = None,
585
662
  instructor_mode: Optional["LanguageModelInstructorMode"] = None,
663
+ # End strategy
664
+ end_strategy: Optional[Literal["tool"]] = None,
665
+ end_tool: Optional[Callable] = None,
586
666
  # LM settings
587
667
  timeout: Optional[Union[float, str, "Timeout"]] = None,
588
668
  temperature: Optional[float] = None,
@@ -596,7 +676,7 @@ def async_run_agent_iter(
596
676
 
597
677
 
598
678
  def async_run_agent_iter(
599
- messages: "AgentMessages", **kwargs: Any
679
+ messages: "AgentMessages", verbose: bool = False, debug: bool = False, **kwargs: Any
600
680
  ) -> "AgentStream[Any]":
601
681
  """Async iterate over agent steps, yielding each step response.
602
682
 
@@ -611,5 +691,334 @@ def async_run_agent_iter(
611
691
  Returns:
612
692
  An AgentStream that can be iterated over asynchronously
613
693
  """
614
- agent = Agent(**kwargs)
615
- return agent.run(messages, stream=True, **kwargs)
694
+ # Separate agent constructor parameters from run parameters
695
+ agent_constructor_params = {
696
+ k: v
697
+ for k, v in kwargs.items()
698
+ if k in ["name", "instructions", "description", "tools", "settings", "model"]
699
+ }
700
+ agent_run_params = {
701
+ k: v
702
+ for k, v in kwargs.items()
703
+ if k not in ["name", "instructions", "description", "tools", "settings"]
704
+ }
705
+
706
+ agent = Agent(verbose=verbose, debug=debug, **agent_constructor_params)
707
+ return agent.run(
708
+ messages, stream=True, verbose=verbose, debug=debug, **agent_run_params
709
+ )
710
+
711
+
712
+ def agent_decorator(
713
+ fn: Union[str, Callable, None] = None,
714
+ *,
715
+ # Agent settings
716
+ name: Optional[str] = None,
717
+ instructions: Optional[str] = None,
718
+ description: Optional[str] = None,
719
+ tools: Union[List["Tool"], Callable, None] = None,
720
+ settings: Optional[AgentSettings] = None,
721
+ # Context management
722
+ context: Optional["AgentContext"] = None,
723
+ context_updates: Optional[
724
+ Union[List[Literal["before", "after"]], Literal["before", "after"]]
725
+ ] = None,
726
+ context_confirm: bool = False,
727
+ context_strategy: Literal["selective", "all"] = "all",
728
+ context_max_retries: int = 3,
729
+ context_confirm_instructions: Optional[str] = None,
730
+ context_selection_instructions: Optional[str] = None,
731
+ context_update_instructions: Optional[str] = None,
732
+ context_format: Literal["json", "python", "markdown"] = "json",
733
+ # Model settings
734
+ model: Optional[Union["LanguageModel", "LanguageModelName"]] = None,
735
+ max_steps: Optional[int] = None,
736
+ instructor_mode: Optional["LanguageModelInstructorMode"] = None,
737
+ return_output: bool = True,
738
+ # End strategy
739
+ end_strategy: Optional[Literal["tool"]] = None,
740
+ end_tool: Optional[Callable] = None,
741
+ # LM settings
742
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
743
+ temperature: Optional[float] = None,
744
+ top_p: Optional[float] = None,
745
+ max_tokens: Optional[int] = None,
746
+ presence_penalty: Optional[float] = None,
747
+ frequency_penalty: Optional[float] = None,
748
+ seed: Optional[int] = None,
749
+ user: Optional[str] = None,
750
+ verbose: bool = False,
751
+ debug: bool = False,
752
+ ):
753
+ """Decorator that converts a function into an agent.
754
+
755
+ The function's parameters become the input to the LLM (converted to a string),
756
+ the function's return type annotation becomes the agent's output type,
757
+ and the function's docstring becomes the agent's instructions.
758
+
759
+ Works with both sync and async functions.
760
+
761
+ Can be used in multiple ways:
762
+
763
+ 1. As a decorator with parameters:
764
+ @agent_decorator(name="steve", temperature=0.7)
765
+ def my_agent():
766
+ pass
767
+
768
+ 2. As a decorator without parameters:
769
+ @agent_decorator
770
+ def my_agent():
771
+ pass
772
+
773
+ 3. As an inline function with name as first argument:
774
+ agent = agent_decorator("steve")
775
+ # Then use: decorated_func = agent(my_function)
776
+
777
+ 4. As an inline function with all parameters:
778
+ agent = agent_decorator(name="steve", temperature=0.7)
779
+ # Then use: decorated_func = agent(my_function)
780
+ """
781
+ # Handle different calling patterns
782
+ if callable(fn):
783
+ # Case: @agent_decorator (no parentheses)
784
+ func = fn
785
+ actual_name = name or "agent"
786
+ return _create_agent_wrapper(
787
+ func,
788
+ actual_name,
789
+ instructions,
790
+ description,
791
+ tools,
792
+ settings,
793
+ context,
794
+ context_updates,
795
+ context_confirm,
796
+ context_strategy,
797
+ context_max_retries,
798
+ context_confirm_instructions,
799
+ context_selection_instructions,
800
+ context_update_instructions,
801
+ context_format,
802
+ model,
803
+ max_steps,
804
+ instructor_mode,
805
+ return_output,
806
+ end_strategy,
807
+ end_tool,
808
+ timeout,
809
+ temperature,
810
+ top_p,
811
+ max_tokens,
812
+ presence_penalty,
813
+ frequency_penalty,
814
+ seed,
815
+ user,
816
+ verbose,
817
+ debug,
818
+ )
819
+ elif isinstance(fn, str):
820
+ # Case: agent_decorator("steve") - first arg is name
821
+ actual_name = fn
822
+ else:
823
+ # Case: agent_decorator() or agent_decorator(name="steve")
824
+ actual_name = name or "agent"
825
+
826
+ def decorator(func: Callable) -> Callable:
827
+ return _create_agent_wrapper(
828
+ func,
829
+ actual_name,
830
+ instructions,
831
+ description,
832
+ tools,
833
+ settings,
834
+ context,
835
+ context_updates,
836
+ context_confirm,
837
+ context_strategy,
838
+ context_max_retries,
839
+ context_confirm_instructions,
840
+ context_selection_instructions,
841
+ context_update_instructions,
842
+ context_format,
843
+ model,
844
+ max_steps,
845
+ instructor_mode,
846
+ return_output,
847
+ end_strategy,
848
+ end_tool,
849
+ timeout,
850
+ temperature,
851
+ top_p,
852
+ max_tokens,
853
+ presence_penalty,
854
+ frequency_penalty,
855
+ seed,
856
+ user,
857
+ verbose,
858
+ debug,
859
+ )
860
+
861
+ return decorator
862
+
863
+
864
+ def _create_agent_wrapper(
865
+ func: Callable,
866
+ name: str,
867
+ instructions: Optional[str],
868
+ description: Optional[str],
869
+ tools: Union[List["Tool"], Callable, None],
870
+ settings: Optional[AgentSettings],
871
+ context: Optional["AgentContext"],
872
+ context_updates: Optional[
873
+ Union[List[Literal["before", "after"]], Literal["before", "after"]]
874
+ ],
875
+ context_confirm: bool,
876
+ context_strategy: Literal["selective", "all"],
877
+ context_max_retries: int,
878
+ context_confirm_instructions: Optional[str],
879
+ context_selection_instructions: Optional[str],
880
+ context_update_instructions: Optional[str],
881
+ context_format: Literal["json", "python", "markdown"],
882
+ model: Optional[Union["LanguageModel", "LanguageModelName"]],
883
+ max_steps: Optional[int],
884
+ instructor_mode: Optional["LanguageModelInstructorMode"],
885
+ return_output: bool,
886
+ end_strategy: Optional[Literal["tool"]],
887
+ end_tool: Optional[Callable],
888
+ timeout: Optional[Union[float, str, "Timeout"]],
889
+ temperature: Optional[float],
890
+ top_p: Optional[float],
891
+ max_tokens: Optional[int],
892
+ presence_penalty: Optional[float],
893
+ frequency_penalty: Optional[float],
894
+ seed: Optional[int],
895
+ user: Optional[str],
896
+ verbose: bool,
897
+ debug: bool,
898
+ ) -> Callable:
899
+ """Helper function to create the actual agent wrapper."""
900
+ import inspect
901
+ import asyncio
902
+ from typing import get_type_hints
903
+
904
+ # Get function metadata
905
+ sig = inspect.signature(func)
906
+ type_hints = get_type_hints(func)
907
+ return_type = type_hints.get("return", str)
908
+ func_instructions = instructions or func.__doc__ or ""
909
+
910
+ # Check if function is async
911
+ is_async = asyncio.iscoroutinefunction(func)
912
+
913
+ if is_async:
914
+
915
+ @functools.wraps(func)
916
+ async def async_wrapper(*args, **kwargs):
917
+ # Convert function parameters to message string
918
+ bound_args = sig.bind(*args, **kwargs)
919
+ bound_args.apply_defaults()
920
+
921
+ # Create message from parameters
922
+ param_parts = []
923
+ for param_name, param_value in bound_args.arguments.items():
924
+ param_parts.append(f"{param_name}: {param_value}")
925
+ message = "\n".join(param_parts)
926
+
927
+ # Run agent with extracted parameters
928
+ response = await async_run_agent(
929
+ messages=message,
930
+ output_type=return_type,
931
+ name=name,
932
+ instructions=func_instructions,
933
+ description=description,
934
+ tools=tools,
935
+ settings=settings,
936
+ context=context,
937
+ context_updates=context_updates,
938
+ context_confirm=context_confirm,
939
+ context_strategy=context_strategy,
940
+ context_max_retries=context_max_retries,
941
+ context_confirm_instructions=context_confirm_instructions,
942
+ context_selection_instructions=context_selection_instructions,
943
+ context_update_instructions=context_update_instructions,
944
+ context_format=context_format,
945
+ model=model or "openai/gpt-4o-mini",
946
+ max_steps=max_steps,
947
+ instructor_mode=instructor_mode,
948
+ end_strategy=end_strategy,
949
+ end_tool=end_tool,
950
+ timeout=timeout,
951
+ temperature=temperature,
952
+ top_p=top_p,
953
+ max_tokens=max_tokens,
954
+ presence_penalty=presence_penalty,
955
+ frequency_penalty=frequency_penalty,
956
+ seed=seed,
957
+ user=user,
958
+ verbose=verbose,
959
+ debug=debug,
960
+ )
961
+
962
+ # Return just the output if return_output is True (default behavior)
963
+ if return_output:
964
+ return response.output
965
+ else:
966
+ return response
967
+
968
+ return async_wrapper
969
+ else:
970
+
971
+ @functools.wraps(func)
972
+ def sync_wrapper(*args, **kwargs):
973
+ # Convert function parameters to message string
974
+ bound_args = sig.bind(*args, **kwargs)
975
+ bound_args.apply_defaults()
976
+
977
+ # Create message from parameters
978
+ param_parts = []
979
+ for param_name, param_value in bound_args.arguments.items():
980
+ param_parts.append(f"{param_name}: {param_value}")
981
+ message = "\n".join(param_parts)
982
+
983
+ # Run agent with extracted parameters
984
+ response = run_agent(
985
+ messages=message,
986
+ output_type=return_type,
987
+ name=name,
988
+ instructions=func_instructions,
989
+ description=description,
990
+ tools=tools,
991
+ settings=settings,
992
+ context=context,
993
+ context_updates=context_updates,
994
+ context_confirm=context_confirm,
995
+ context_strategy=context_strategy,
996
+ context_max_retries=context_max_retries,
997
+ context_confirm_instructions=context_confirm_instructions,
998
+ context_selection_instructions=context_selection_instructions,
999
+ context_update_instructions=context_update_instructions,
1000
+ context_format=context_format,
1001
+ model=model or "openai/gpt-4o-mini",
1002
+ max_steps=max_steps,
1003
+ instructor_mode=instructor_mode,
1004
+ end_strategy=end_strategy,
1005
+ end_tool=end_tool,
1006
+ timeout=timeout,
1007
+ temperature=temperature,
1008
+ top_p=top_p,
1009
+ max_tokens=max_tokens,
1010
+ presence_penalty=presence_penalty,
1011
+ frequency_penalty=frequency_penalty,
1012
+ seed=seed,
1013
+ user=user,
1014
+ verbose=verbose,
1015
+ debug=debug,
1016
+ )
1017
+
1018
+ # Return just the output if return_output is True (default behavior)
1019
+ if return_output:
1020
+ return response.output
1021
+ else:
1022
+ return response
1023
+
1024
+ return sync_wrapper
@@ -1,6 +1,7 @@
1
1
  """hammad.genai.agents.types.agent_response"""
2
2
 
3
3
  from typing import List, Any, TypeVar, Literal, Generic
4
+ from pydantic import Field
4
5
 
5
6
  from ....cache import cached
6
7
  from ....typing import get_type_description
@@ -49,7 +50,7 @@ class AgentResponse(LanguageModelResponse[T], Generic[T, AgentContext]):
49
50
  type: Literal["agent"] = "agent"
50
51
  """The type of the response. Always `agent`."""
51
52
 
52
- steps: List[LanguageModelResponse[str]]
53
+ steps: List[LanguageModelResponse[str]] = Field(default_factory=list)
53
54
  """
54
55
  A list of steps taken by the agent **BEFORE** its final output.
55
56