hammad-python 0.0.20__py3-none-any.whl → 0.0.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/genai/agents/agent.py +386 -47
- hammad/genai/agents/types/agent_response.py +39 -5
- hammad/genai/agents/types/agent_stream.py +92 -7
- hammad/genai/models/language/model.py +17 -0
- hammad/genai/models/language/types/language_model_response.py +5 -2
- hammad/genai/types/tools.py +2 -0
- hammad/logging/logger.py +8 -0
- {hammad_python-0.0.20.dist-info → hammad_python-0.0.22.dist-info}/METADATA +1 -1
- {hammad_python-0.0.20.dist-info → hammad_python-0.0.22.dist-info}/RECORD +11 -11
- {hammad_python-0.0.20.dist-info → hammad_python-0.0.22.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.20.dist-info → hammad_python-0.0.22.dist-info}/licenses/LICENSE +0 -0
hammad/genai/agents/agent.py
CHANGED
@@ -19,6 +19,8 @@ from dataclasses import dataclass, field
|
|
19
19
|
from enum import Enum
|
20
20
|
import json
|
21
21
|
|
22
|
+
from ...logging.logger import _get_internal_logger
|
23
|
+
|
22
24
|
from ..types.base import BaseGenAIModel, BaseGenAIModelSettings
|
23
25
|
from ..models.language.model import LanguageModel
|
24
26
|
from ..models.language.types import (
|
@@ -53,6 +55,9 @@ if TYPE_CHECKING:
|
|
53
55
|
T = TypeVar("T")
|
54
56
|
|
55
57
|
|
58
|
+
logger = _get_internal_logger(__name__)
|
59
|
+
|
60
|
+
|
56
61
|
@dataclass
|
57
62
|
class AgentSettings:
|
58
63
|
"""Settings object that controls the default behavior of an agent's run."""
|
@@ -253,6 +258,43 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
253
258
|
context_format: Literal["json", "python", "markdown"] = "json",
|
254
259
|
**kwargs: Any,
|
255
260
|
):
|
261
|
+
"""Create a new AI agent with specified capabilities and behavior.
|
262
|
+
|
263
|
+
An agent is an intelligent assistant that can use tools, follow instructions,
|
264
|
+
and maintain context across conversations. It combines a language model with
|
265
|
+
additional capabilities like tool execution and structured output generation.
|
266
|
+
|
267
|
+
Args:
|
268
|
+
name: A human-readable name for the agent (default: "agent")
|
269
|
+
instructions: System instructions that define the agent's behavior and personality
|
270
|
+
model: The language model to use - either a LanguageModel instance or model name string
|
271
|
+
description: Optional description of what the agent does
|
272
|
+
tools: List of tools/functions the agent can call, or a single callable
|
273
|
+
settings: AgentSettings object to customize default behavior
|
274
|
+
instructor_mode: Mode for structured output generation
|
275
|
+
context_updates: When to update context - "before", "after", or both
|
276
|
+
context_confirm: Whether to confirm context updates with the user
|
277
|
+
context_strategy: How to select context updates - "selective" or "all"
|
278
|
+
context_max_retries: Maximum attempts for context update operations
|
279
|
+
context_confirm_instructions: Custom instructions for context confirmation
|
280
|
+
context_selection_instructions: Custom instructions for context selection
|
281
|
+
context_update_instructions: Custom instructions for context updates
|
282
|
+
context_format: Format for context display - "json", "python", or "markdown"
|
283
|
+
**kwargs: Additional parameters passed to the underlying language model
|
284
|
+
|
285
|
+
Example:
|
286
|
+
Basic agent:
|
287
|
+
>>> agent = Agent(name="assistant", instructions="You are helpful")
|
288
|
+
|
289
|
+
Agent with tools:
|
290
|
+
>>> def calculator(x: int, y: int) -> int:
|
291
|
+
... return x + y
|
292
|
+
>>> agent = Agent(tools=[calculator])
|
293
|
+
|
294
|
+
Agent with custom settings:
|
295
|
+
>>> settings = AgentSettings(max_steps=5)
|
296
|
+
>>> agent = Agent(settings=settings, model="gpt-4")
|
297
|
+
"""
|
256
298
|
# Initialize BaseGenAIModel with basic parameters
|
257
299
|
super().__init__(
|
258
300
|
model=model if isinstance(model, str) else model.model, **kwargs
|
@@ -297,17 +339,65 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
297
339
|
"""Get the underlying language model."""
|
298
340
|
return self._language_model
|
299
341
|
|
342
|
+
def _get_effective_context_settings(
|
343
|
+
self,
|
344
|
+
context_updates: Optional[
|
345
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
346
|
+
] = None,
|
347
|
+
context_confirm: Optional[bool] = None,
|
348
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
349
|
+
context_max_retries: Optional[int] = None,
|
350
|
+
context_confirm_instructions: Optional[str] = None,
|
351
|
+
context_selection_instructions: Optional[str] = None,
|
352
|
+
context_update_instructions: Optional[str] = None,
|
353
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
354
|
+
) -> dict:
|
355
|
+
"""Get effective context settings, using provided parameters or defaults."""
|
356
|
+
return {
|
357
|
+
"context_updates": context_updates
|
358
|
+
if context_updates is not None
|
359
|
+
else self.context_updates,
|
360
|
+
"context_confirm": context_confirm
|
361
|
+
if context_confirm is not None
|
362
|
+
else self.context_confirm,
|
363
|
+
"context_strategy": context_strategy
|
364
|
+
if context_strategy is not None
|
365
|
+
else self.context_strategy,
|
366
|
+
"context_max_retries": context_max_retries
|
367
|
+
if context_max_retries is not None
|
368
|
+
else self.context_max_retries,
|
369
|
+
"context_confirm_instructions": context_confirm_instructions
|
370
|
+
if context_confirm_instructions is not None
|
371
|
+
else self.context_confirm_instructions,
|
372
|
+
"context_selection_instructions": context_selection_instructions
|
373
|
+
if context_selection_instructions is not None
|
374
|
+
else self.context_selection_instructions,
|
375
|
+
"context_update_instructions": context_update_instructions
|
376
|
+
if context_update_instructions is not None
|
377
|
+
else self.context_update_instructions,
|
378
|
+
"context_format": context_format
|
379
|
+
if context_format is not None
|
380
|
+
else self.context_format,
|
381
|
+
}
|
382
|
+
|
300
383
|
def _should_update_context(
|
301
|
-
self,
|
384
|
+
self,
|
385
|
+
context: AgentContext,
|
386
|
+
timing: Literal["before", "after"],
|
387
|
+
context_updates=None,
|
302
388
|
) -> bool:
|
303
389
|
"""Determine if context should be updated based on timing and configuration."""
|
304
|
-
|
390
|
+
effective_context_updates = (
|
391
|
+
context_updates if context_updates is not None else self.context_updates
|
392
|
+
)
|
393
|
+
|
394
|
+
if not effective_context_updates:
|
305
395
|
return False
|
306
396
|
|
307
|
-
if isinstance(
|
308
|
-
return
|
397
|
+
if isinstance(effective_context_updates, str):
|
398
|
+
return effective_context_updates == timing
|
309
399
|
else:
|
310
|
-
return timing in
|
400
|
+
return timing in effective_context_updates
|
311
401
|
|
312
402
|
def _create_context_confirm_model(self):
|
313
403
|
"""Create IsUpdateRequired model for context confirmation."""
|
@@ -334,18 +424,60 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
334
424
|
if field_name:
|
335
425
|
# Single field update
|
336
426
|
if isinstance(context, BaseModel):
|
337
|
-
field_type = context.model_fields[field_name].annotation
|
427
|
+
field_type = context.__class__.model_fields[field_name].annotation
|
428
|
+
field_info = context.__class__.model_fields[field_name]
|
429
|
+
description = getattr(
|
430
|
+
field_info, "description", f"Update the {field_name} field"
|
431
|
+
)
|
338
432
|
elif isinstance(context, dict):
|
339
433
|
field_type = type(context[field_name])
|
434
|
+
description = f"Update the {field_name} field"
|
340
435
|
else:
|
341
436
|
field_type = Any
|
437
|
+
description = f"Update the {field_name} field"
|
342
438
|
|
343
439
|
return create_model(
|
344
|
-
field_name.capitalize(),
|
440
|
+
f"Update{field_name.capitalize()}",
|
441
|
+
**{field_name: (field_type, Field(description=description))},
|
345
442
|
)
|
346
443
|
else:
|
347
|
-
# All fields update
|
348
|
-
|
444
|
+
# All fields update - create a model with the exact same fields as the context
|
445
|
+
if isinstance(context, BaseModel):
|
446
|
+
# Create a model with the same fields as the context
|
447
|
+
field_definitions = {}
|
448
|
+
for field_name, field_info in context.model_fields.items():
|
449
|
+
field_type = field_info.annotation
|
450
|
+
current_value = getattr(context, field_name)
|
451
|
+
description = getattr(
|
452
|
+
field_info, "description", f"Current value: {current_value}"
|
453
|
+
)
|
454
|
+
field_definitions[field_name] = (
|
455
|
+
field_type,
|
456
|
+
Field(description=description),
|
457
|
+
)
|
458
|
+
|
459
|
+
return create_model("ContextUpdate", **field_definitions)
|
460
|
+
elif isinstance(context, dict):
|
461
|
+
# Create a model with the same keys as the dict
|
462
|
+
field_definitions = {}
|
463
|
+
for key, value in context.items():
|
464
|
+
field_type = type(value)
|
465
|
+
description = f"Current value: {value}"
|
466
|
+
field_definitions[key] = (
|
467
|
+
field_type,
|
468
|
+
Field(description=description),
|
469
|
+
)
|
470
|
+
|
471
|
+
return create_model("ContextUpdate", **field_definitions)
|
472
|
+
else:
|
473
|
+
# Fallback to generic updates
|
474
|
+
return create_model(
|
475
|
+
"ContextUpdate",
|
476
|
+
updates=(
|
477
|
+
Dict[str, Any],
|
478
|
+
Field(description="Dictionary of field updates"),
|
479
|
+
),
|
480
|
+
)
|
349
481
|
|
350
482
|
def _perform_context_update(
|
351
483
|
self,
|
@@ -353,20 +485,42 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
353
485
|
model: LanguageModel,
|
354
486
|
current_messages: List[Dict[str, Any]],
|
355
487
|
timing: Literal["before", "after"],
|
488
|
+
effective_settings: Optional[dict] = None,
|
356
489
|
) -> AgentContext:
|
357
490
|
"""Perform context update with retries and error handling."""
|
358
491
|
updated_context = context
|
359
492
|
|
360
|
-
|
493
|
+
# Use effective settings or defaults
|
494
|
+
if effective_settings is None:
|
495
|
+
effective_settings = {
|
496
|
+
"context_confirm": self.context_confirm,
|
497
|
+
"context_strategy": self.context_strategy,
|
498
|
+
"context_max_retries": self.context_max_retries,
|
499
|
+
"context_confirm_instructions": self.context_confirm_instructions,
|
500
|
+
"context_selection_instructions": self.context_selection_instructions,
|
501
|
+
"context_update_instructions": self.context_update_instructions,
|
502
|
+
"context_format": self.context_format,
|
503
|
+
}
|
504
|
+
|
505
|
+
for attempt in range(effective_settings["context_max_retries"]):
|
361
506
|
try:
|
362
507
|
# Check if update is needed (if confirmation is enabled)
|
363
|
-
if
|
508
|
+
if effective_settings["context_confirm"]:
|
364
509
|
confirm_model = self._create_context_confirm_model()
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
510
|
+
|
511
|
+
# Create detailed instructions with context structure
|
512
|
+
context_structure = _format_context_for_instructions(
|
513
|
+
updated_context, effective_settings["context_format"]
|
514
|
+
)
|
515
|
+
confirm_instructions = f"""Based on the conversation, determine if the context should be updated {timing} processing.
|
516
|
+
|
517
|
+
Current context structure:
|
518
|
+
{context_structure}
|
519
|
+
|
520
|
+
Should the context be updated based on the new information provided in the conversation?"""
|
521
|
+
|
522
|
+
if effective_settings["context_confirm_instructions"]:
|
523
|
+
confirm_instructions += f"\n\nAdditional instructions: {effective_settings['context_confirm_instructions']}"
|
370
524
|
|
371
525
|
confirm_response = model.run(
|
372
526
|
messages=current_messages
|
@@ -379,16 +533,25 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
379
533
|
return updated_context
|
380
534
|
|
381
535
|
# Perform the update based on strategy
|
382
|
-
if
|
536
|
+
if effective_settings["context_strategy"] == "selective":
|
383
537
|
# Get fields to update
|
384
538
|
selection_model = self._create_context_selection_model(
|
385
539
|
updated_context
|
386
540
|
)
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
541
|
+
|
542
|
+
# Create detailed instructions with context structure
|
543
|
+
context_structure = _format_context_for_instructions(
|
544
|
+
updated_context, effective_settings["context_format"]
|
545
|
+
)
|
546
|
+
selection_instructions = f"""Select which fields in the context should be updated {timing} processing based on the conversation.
|
547
|
+
|
548
|
+
Current context structure:
|
549
|
+
{context_structure}
|
550
|
+
|
551
|
+
Choose only the fields that need to be updated based on the new information provided in the conversation."""
|
552
|
+
|
553
|
+
if effective_settings["context_selection_instructions"]:
|
554
|
+
selection_instructions += f"\n\nAdditional instructions: {effective_settings['context_selection_instructions']}"
|
392
555
|
|
393
556
|
selection_response = model.run(
|
394
557
|
messages=current_messages
|
@@ -403,13 +566,21 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
403
566
|
field_model = self._create_context_update_model(
|
404
567
|
updated_context, field_name
|
405
568
|
)
|
406
|
-
|
407
|
-
|
569
|
+
# Get current field value for context
|
570
|
+
current_value = (
|
571
|
+
getattr(updated_context, field_name)
|
572
|
+
if isinstance(updated_context, BaseModel)
|
573
|
+
else updated_context.get(field_name)
|
408
574
|
)
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
575
|
+
|
576
|
+
field_instructions = f"""Update the {field_name} field in the context based on the conversation.
|
577
|
+
|
578
|
+
Current value of {field_name}: {current_value}
|
579
|
+
|
580
|
+
Please provide the new value for {field_name} based on the information from the conversation."""
|
581
|
+
|
582
|
+
if effective_settings["context_update_instructions"]:
|
583
|
+
field_instructions += f"\n\nAdditional instructions: {effective_settings['context_update_instructions']}"
|
413
584
|
|
414
585
|
field_response = model.run(
|
415
586
|
messages=current_messages
|
@@ -429,9 +600,20 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
429
600
|
else: # strategy == "all"
|
430
601
|
# Update all fields at once
|
431
602
|
update_model = self._create_context_update_model(updated_context)
|
432
|
-
|
433
|
-
|
434
|
-
|
603
|
+
|
604
|
+
# Create detailed instructions with context structure
|
605
|
+
context_structure = _format_context_for_instructions(
|
606
|
+
updated_context, effective_settings["context_format"]
|
607
|
+
)
|
608
|
+
update_instructions = f"""Update the context {timing} processing based on the conversation.
|
609
|
+
|
610
|
+
Current context structure:
|
611
|
+
{context_structure}
|
612
|
+
|
613
|
+
Please update the appropriate fields based on the conversation. Only update fields that need to be changed based on the new information provided."""
|
614
|
+
|
615
|
+
if effective_settings["context_update_instructions"]:
|
616
|
+
update_instructions += f"\n\nAdditional instructions: {effective_settings['context_update_instructions']}"
|
435
617
|
|
436
618
|
update_response = model.run(
|
437
619
|
messages=current_messages
|
@@ -441,9 +623,26 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
441
623
|
)
|
442
624
|
|
443
625
|
# Apply the updates
|
444
|
-
|
445
|
-
|
446
|
-
|
626
|
+
if hasattr(update_response.output, "updates"):
|
627
|
+
# Legacy fallback for generic updates
|
628
|
+
updated_context = _update_context_object(
|
629
|
+
updated_context, update_response.output.updates
|
630
|
+
)
|
631
|
+
else:
|
632
|
+
# New approach - extract field values directly from the response
|
633
|
+
updates_dict = {}
|
634
|
+
for field_name in (
|
635
|
+
context.model_fields.keys()
|
636
|
+
if isinstance(context, BaseModel)
|
637
|
+
else context.keys()
|
638
|
+
):
|
639
|
+
if hasattr(update_response.output, field_name):
|
640
|
+
updates_dict[field_name] = getattr(
|
641
|
+
update_response.output, field_name
|
642
|
+
)
|
643
|
+
updated_context = _update_context_object(
|
644
|
+
updated_context, updates_dict
|
645
|
+
)
|
447
646
|
|
448
647
|
# Trigger context update hooks
|
449
648
|
self.hook_manager.trigger_hooks("context_update", updated_context)
|
@@ -490,10 +689,20 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
490
689
|
max_steps: Optional[int] = None,
|
491
690
|
context: Optional[AgentContext] = None,
|
492
691
|
output_type: Optional[Type[T]] = None,
|
692
|
+
context_updates: Optional[
|
693
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
694
|
+
] = None,
|
695
|
+
context_confirm: Optional[bool] = None,
|
696
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
697
|
+
context_max_retries: Optional[int] = None,
|
698
|
+
context_confirm_instructions: Optional[str] = None,
|
699
|
+
context_selection_instructions: Optional[str] = None,
|
700
|
+
context_update_instructions: Optional[str] = None,
|
701
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
493
702
|
*,
|
494
703
|
stream: Literal[False] = False,
|
495
704
|
**kwargs: Any,
|
496
|
-
) -> AgentResponse[T]: ...
|
705
|
+
) -> AgentResponse[T, AgentContext]: ...
|
497
706
|
|
498
707
|
@overload
|
499
708
|
def run(
|
@@ -503,10 +712,20 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
503
712
|
max_steps: Optional[int] = None,
|
504
713
|
context: Optional[AgentContext] = None,
|
505
714
|
output_type: Optional[Type[T]] = None,
|
715
|
+
context_updates: Optional[
|
716
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
717
|
+
] = None,
|
718
|
+
context_confirm: Optional[bool] = None,
|
719
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
720
|
+
context_max_retries: Optional[int] = None,
|
721
|
+
context_confirm_instructions: Optional[str] = None,
|
722
|
+
context_selection_instructions: Optional[str] = None,
|
723
|
+
context_update_instructions: Optional[str] = None,
|
724
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
506
725
|
*,
|
507
726
|
stream: Literal[True],
|
508
727
|
**kwargs: Any,
|
509
|
-
) -> AgentStream[T]: ...
|
728
|
+
) -> AgentStream[T, AgentContext]: ...
|
510
729
|
|
511
730
|
def run(
|
512
731
|
self,
|
@@ -515,9 +734,19 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
515
734
|
max_steps: Optional[int] = None,
|
516
735
|
context: Optional[AgentContext] = None,
|
517
736
|
output_type: Optional[Type[T]] = None,
|
737
|
+
context_updates: Optional[
|
738
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
739
|
+
] = None,
|
740
|
+
context_confirm: Optional[bool] = None,
|
741
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
742
|
+
context_max_retries: Optional[int] = None,
|
743
|
+
context_confirm_instructions: Optional[str] = None,
|
744
|
+
context_selection_instructions: Optional[str] = None,
|
745
|
+
context_update_instructions: Optional[str] = None,
|
746
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
518
747
|
stream: bool = False,
|
519
748
|
**kwargs: Any,
|
520
|
-
) -> Union[AgentResponse[T], AgentStream[T]]:
|
749
|
+
) -> Union[AgentResponse[T, AgentContext], AgentStream[T, AgentContext]]:
|
521
750
|
"""Runs this agent and returns a final agent response or stream.
|
522
751
|
|
523
752
|
You can override defaults assigned to this agent from this function directly.
|
@@ -614,6 +843,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
614
843
|
if max_steps is None:
|
615
844
|
max_steps = self.settings.max_steps
|
616
845
|
|
846
|
+
# Get effective context settings
|
847
|
+
effective_context_settings = self._get_effective_context_settings(
|
848
|
+
context_updates=context_updates,
|
849
|
+
context_confirm=context_confirm,
|
850
|
+
context_strategy=context_strategy,
|
851
|
+
context_max_retries=context_max_retries,
|
852
|
+
context_confirm_instructions=context_confirm_instructions,
|
853
|
+
context_selection_instructions=context_selection_instructions,
|
854
|
+
context_update_instructions=context_update_instructions,
|
855
|
+
context_format=context_format,
|
856
|
+
)
|
857
|
+
|
617
858
|
# Parse initial messages
|
618
859
|
parsed_messages = parse_messages(messages)
|
619
860
|
current_messages = parsed_messages.copy()
|
@@ -621,6 +862,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
621
862
|
|
622
863
|
# RUN MAIN AGENTIC LOOP
|
623
864
|
for step in range(max_steps):
|
865
|
+
# Update context before processing if configured
|
866
|
+
if context and self._should_update_context(
|
867
|
+
context, "before", effective_context_settings["context_updates"]
|
868
|
+
):
|
869
|
+
context = self._perform_context_update(
|
870
|
+
context=context,
|
871
|
+
model=working_model,
|
872
|
+
current_messages=current_messages,
|
873
|
+
timing="before",
|
874
|
+
effective_settings=effective_context_settings,
|
875
|
+
)
|
876
|
+
|
624
877
|
# Format messages with instructions and context for first step only
|
625
878
|
if step == 0:
|
626
879
|
formatted_messages = self._format_messages_with_context(
|
@@ -640,9 +893,7 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
640
893
|
# Get language model response
|
641
894
|
response = working_model.run(
|
642
895
|
messages=formatted_messages,
|
643
|
-
tools=[tool.
|
644
|
-
if self.tools
|
645
|
-
else None,
|
896
|
+
tools=[tool.to_dict() for tool in self.tools] if self.tools else None,
|
646
897
|
**model_kwargs,
|
647
898
|
)
|
648
899
|
|
@@ -663,6 +914,17 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
663
914
|
steps.append(response)
|
664
915
|
else:
|
665
916
|
# No tool calls - this is the final step
|
917
|
+
# Update context after processing if configured
|
918
|
+
if context and self._should_update_context(
|
919
|
+
context, "after", effective_context_settings["context_updates"]
|
920
|
+
):
|
921
|
+
context = self._perform_context_update(
|
922
|
+
context=context,
|
923
|
+
model=working_model,
|
924
|
+
current_messages=current_messages,
|
925
|
+
timing="after",
|
926
|
+
effective_settings=effective_context_settings,
|
927
|
+
)
|
666
928
|
return _create_agent_response_from_language_model_response(
|
667
929
|
response=response, steps=steps, context=context
|
668
930
|
)
|
@@ -680,6 +942,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
680
942
|
**model_kwargs,
|
681
943
|
)
|
682
944
|
|
945
|
+
# Update context after processing if configured
|
946
|
+
if context and self._should_update_context(
|
947
|
+
context, "after", effective_context_settings["context_updates"]
|
948
|
+
):
|
949
|
+
context = self._perform_context_update(
|
950
|
+
context=context,
|
951
|
+
model=working_model,
|
952
|
+
current_messages=current_messages,
|
953
|
+
timing="after",
|
954
|
+
effective_settings=effective_context_settings,
|
955
|
+
)
|
956
|
+
|
683
957
|
return _create_agent_response_from_language_model_response(
|
684
958
|
response=final_response, steps=steps, context=context
|
685
959
|
)
|
@@ -691,8 +965,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
691
965
|
max_steps: Optional[int] = None,
|
692
966
|
context: Optional[AgentContext] = None,
|
693
967
|
output_type: Optional[Type[T]] = None,
|
968
|
+
context_updates: Optional[
|
969
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
970
|
+
] = None,
|
971
|
+
context_confirm: Optional[bool] = None,
|
972
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
973
|
+
context_max_retries: Optional[int] = None,
|
974
|
+
context_confirm_instructions: Optional[str] = None,
|
975
|
+
context_selection_instructions: Optional[str] = None,
|
976
|
+
context_update_instructions: Optional[str] = None,
|
977
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
694
978
|
**kwargs: Any,
|
695
|
-
) -> AgentResponse[T]:
|
979
|
+
) -> AgentResponse[T, AgentContext]:
|
696
980
|
"""Runs this agent asynchronously and returns a final agent response.
|
697
981
|
|
698
982
|
You can override defaults assigned to this agent from this function directly.
|
@@ -772,6 +1056,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
772
1056
|
if max_steps is None:
|
773
1057
|
max_steps = self.settings.max_steps
|
774
1058
|
|
1059
|
+
# Get effective context settings
|
1060
|
+
effective_context_settings = self._get_effective_context_settings(
|
1061
|
+
context_updates=context_updates,
|
1062
|
+
context_confirm=context_confirm,
|
1063
|
+
context_strategy=context_strategy,
|
1064
|
+
context_max_retries=context_max_retries,
|
1065
|
+
context_confirm_instructions=context_confirm_instructions,
|
1066
|
+
context_selection_instructions=context_selection_instructions,
|
1067
|
+
context_update_instructions=context_update_instructions,
|
1068
|
+
context_format=context_format,
|
1069
|
+
)
|
1070
|
+
|
775
1071
|
# Parse initial messages
|
776
1072
|
parsed_messages = parse_messages(messages)
|
777
1073
|
current_messages = parsed_messages.copy()
|
@@ -779,6 +1075,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
779
1075
|
|
780
1076
|
# RUN MAIN AGENTIC LOOP
|
781
1077
|
for step in range(max_steps):
|
1078
|
+
# Update context before processing if configured
|
1079
|
+
if context and self._should_update_context(
|
1080
|
+
context, "before", effective_context_settings["context_updates"]
|
1081
|
+
):
|
1082
|
+
context = self._perform_context_update(
|
1083
|
+
context=context,
|
1084
|
+
model=working_model,
|
1085
|
+
current_messages=current_messages,
|
1086
|
+
timing="before",
|
1087
|
+
effective_settings=effective_context_settings,
|
1088
|
+
)
|
1089
|
+
|
782
1090
|
# Format messages with instructions and context for first step only
|
783
1091
|
if step == 0:
|
784
1092
|
formatted_messages = self._format_messages_with_context(
|
@@ -798,9 +1106,7 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
798
1106
|
# Get language model response
|
799
1107
|
response = await working_model.async_run(
|
800
1108
|
messages=formatted_messages,
|
801
|
-
tools=[tool.
|
802
|
-
if self.tools
|
803
|
-
else None,
|
1109
|
+
tools=[tool.to_dict() for tool in self.tools] if self.tools else None,
|
804
1110
|
**model_kwargs,
|
805
1111
|
)
|
806
1112
|
|
@@ -821,6 +1127,17 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
821
1127
|
steps.append(response)
|
822
1128
|
else:
|
823
1129
|
# No tool calls - this is the final step
|
1130
|
+
# Update context after processing if configured
|
1131
|
+
if context and self._should_update_context(
|
1132
|
+
context, "after", effective_context_settings["context_updates"]
|
1133
|
+
):
|
1134
|
+
context = self._perform_context_update(
|
1135
|
+
context=context,
|
1136
|
+
model=working_model,
|
1137
|
+
current_messages=current_messages,
|
1138
|
+
timing="after",
|
1139
|
+
effective_settings=effective_context_settings,
|
1140
|
+
)
|
824
1141
|
return _create_agent_response_from_language_model_response(
|
825
1142
|
response=response, steps=steps, context=context
|
826
1143
|
)
|
@@ -838,6 +1155,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
838
1155
|
**model_kwargs,
|
839
1156
|
)
|
840
1157
|
|
1158
|
+
# Update context after processing if configured
|
1159
|
+
if context and self._should_update_context(
|
1160
|
+
context, "after", effective_context_settings["context_updates"]
|
1161
|
+
):
|
1162
|
+
context = self._perform_context_update(
|
1163
|
+
context=context,
|
1164
|
+
model=working_model,
|
1165
|
+
current_messages=current_messages,
|
1166
|
+
timing="after",
|
1167
|
+
effective_settings=effective_context_settings,
|
1168
|
+
)
|
1169
|
+
|
841
1170
|
return _create_agent_response_from_language_model_response(
|
842
1171
|
response=final_response, steps=steps, context=context
|
843
1172
|
)
|
@@ -850,7 +1179,7 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
850
1179
|
context: Optional[AgentContext] = None,
|
851
1180
|
output_type: Optional[Type[T]] = None,
|
852
1181
|
**kwargs: Any,
|
853
|
-
) -> AgentStream[T]:
|
1182
|
+
) -> AgentStream[T, AgentContext]:
|
854
1183
|
"""Create a stream that yields agent steps.
|
855
1184
|
|
856
1185
|
Args:
|
@@ -882,8 +1211,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
882
1211
|
max_steps: Optional[int] = None,
|
883
1212
|
context: Optional[AgentContext] = None,
|
884
1213
|
output_type: Optional[Type[T]] = None,
|
1214
|
+
context_updates: Optional[
|
1215
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
1216
|
+
] = None,
|
1217
|
+
context_confirm: Optional[bool] = None,
|
1218
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
1219
|
+
context_max_retries: Optional[int] = None,
|
1220
|
+
context_confirm_instructions: Optional[str] = None,
|
1221
|
+
context_selection_instructions: Optional[str] = None,
|
1222
|
+
context_update_instructions: Optional[str] = None,
|
1223
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
885
1224
|
**kwargs: Any,
|
886
|
-
) -> AgentStream[T]:
|
1225
|
+
) -> AgentStream[T, AgentContext]:
|
887
1226
|
"""Iterate over agent steps, yielding each step response.
|
888
1227
|
|
889
1228
|
You can override defaults assigned to this agent from this function directly.
|
@@ -986,7 +1325,7 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
986
1325
|
context: Optional[AgentContext] = None,
|
987
1326
|
output_type: Optional[Type[T]] = None,
|
988
1327
|
**kwargs: Any,
|
989
|
-
) -> AgentStream[T]:
|
1328
|
+
) -> AgentStream[T, AgentContext]:
|
990
1329
|
"""Async iterate over agent steps, yielding each step response.
|
991
1330
|
|
992
1331
|
Args:
|
@@ -1,12 +1,14 @@
|
|
1
1
|
"""hammad.genai.agents.types.agent_response"""
|
2
2
|
|
3
|
-
from typing import List, Any, TypeVar, Literal
|
3
|
+
from typing import List, Any, TypeVar, Literal, Generic
|
4
4
|
|
5
5
|
from ....cache import cached
|
6
|
+
from ....typing import get_type_description
|
6
7
|
from ...models.language.types import (
|
7
8
|
LanguageModelResponse,
|
8
9
|
)
|
9
10
|
|
11
|
+
from .agent_context import AgentContext
|
10
12
|
|
11
13
|
__all__ = [
|
12
14
|
"AgentResponse",
|
@@ -40,7 +42,7 @@ def _create_agent_response_from_language_model_response(
|
|
40
42
|
) from e
|
41
43
|
|
42
44
|
|
43
|
-
class AgentResponse(LanguageModelResponse[T]):
|
45
|
+
class AgentResponse(LanguageModelResponse[T], Generic[T, AgentContext]):
|
44
46
|
"""A response generated by an agent, that includes the steps and final
|
45
47
|
output during the agent's execution."""
|
46
48
|
|
@@ -55,7 +57,7 @@ class AgentResponse(LanguageModelResponse[T]):
|
|
55
57
|
empty.
|
56
58
|
"""
|
57
59
|
|
58
|
-
context:
|
60
|
+
context: AgentContext | None = None
|
59
61
|
"""
|
60
62
|
The final context object after agent execution.
|
61
63
|
|
@@ -66,7 +68,7 @@ class AgentResponse(LanguageModelResponse[T]):
|
|
66
68
|
@cached
|
67
69
|
def __str__(self) -> str:
|
68
70
|
"""Pretty prints the response object."""
|
69
|
-
output = "AgentResponse:"
|
71
|
+
output = ">>> AgentResponse:"
|
70
72
|
|
71
73
|
if self.output or self.content:
|
72
74
|
output += f"\n{self.output if self.output else self.content}"
|
@@ -77,7 +79,7 @@ class AgentResponse(LanguageModelResponse[T]):
|
|
77
79
|
# NOTE:
|
78
80
|
# added +1 to include final step in the output
|
79
81
|
output += f"\n>>> Steps: {len(self.steps) + 1}"
|
80
|
-
output += f"\n>>>
|
82
|
+
output += f"\n>>> Output Type: {get_type_description(type(self.output))}"
|
81
83
|
|
82
84
|
# Calculate total tool calls across all steps
|
83
85
|
total_tool_calls = 0
|
@@ -87,4 +89,36 @@ class AgentResponse(LanguageModelResponse[T]):
|
|
87
89
|
|
88
90
|
output += f"\n>>> Total Tool Calls: {total_tool_calls}"
|
89
91
|
|
92
|
+
# Show context if available
|
93
|
+
if self.context:
|
94
|
+
output += (
|
95
|
+
f"\n>>> Final Context: {self._format_context_display(self.context)}"
|
96
|
+
)
|
97
|
+
|
90
98
|
return output
|
99
|
+
|
100
|
+
def _format_context_display(self, context: AgentContext) -> str:
|
101
|
+
"""Format context for display in string representation."""
|
102
|
+
if context is None:
|
103
|
+
return "None"
|
104
|
+
|
105
|
+
try:
|
106
|
+
# For Pydantic models, show as dict
|
107
|
+
if hasattr(context, "model_dump"):
|
108
|
+
context_dict = context.model_dump()
|
109
|
+
elif isinstance(context, dict):
|
110
|
+
context_dict = context
|
111
|
+
else:
|
112
|
+
return str(context)
|
113
|
+
|
114
|
+
# Format as compact JSON-like string
|
115
|
+
items = []
|
116
|
+
for key, value in context_dict.items():
|
117
|
+
if isinstance(value, str):
|
118
|
+
items.append(f"{key}='{value}'")
|
119
|
+
else:
|
120
|
+
items.append(f"{key}={value}")
|
121
|
+
|
122
|
+
return "{" + ", ".join(items) + "}"
|
123
|
+
except Exception:
|
124
|
+
return str(context)
|
@@ -38,6 +38,7 @@ from .agent_response import (
|
|
38
38
|
AgentResponse,
|
39
39
|
_create_agent_response_from_language_model_response,
|
40
40
|
)
|
41
|
+
from .agent_context import AgentContext
|
41
42
|
|
42
43
|
if TYPE_CHECKING:
|
43
44
|
from ..agent import Agent
|
@@ -85,10 +86,21 @@ class AgentResponseChunk(LanguageModelResponseChunk[T], Generic[T]):
|
|
85
86
|
|
86
87
|
def __str__(self) -> str:
|
87
88
|
"""String representation of the chunk."""
|
88
|
-
|
89
|
+
output = f"AgentResponseChunk(step={self.step_number}, final={self.is_final})"
|
89
90
|
|
91
|
+
# Show content if available
|
92
|
+
if self.output or self.content:
|
93
|
+
content_preview = str(self.output if self.output else self.content)
|
94
|
+
if len(content_preview) > 100:
|
95
|
+
content_preview = content_preview[:100] + "..."
|
96
|
+
output += f"\nContent: {content_preview}"
|
90
97
|
|
91
|
-
|
98
|
+
return output
|
99
|
+
|
100
|
+
|
101
|
+
class AgentStream(
|
102
|
+
BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T, AgentContext]
|
103
|
+
):
|
92
104
|
"""Stream of agent responses that can be used in sync and async contexts."""
|
93
105
|
|
94
106
|
def __init__(
|
@@ -97,7 +109,7 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
97
109
|
messages: LanguageModelMessages,
|
98
110
|
model: Optional[Union[LanguageModel, str]] = None,
|
99
111
|
max_steps: Optional[int] = None,
|
100
|
-
context: Optional[
|
112
|
+
context: Optional[AgentContext] = None,
|
101
113
|
output_type: Optional[Type[T]] = None,
|
102
114
|
stream: bool = False,
|
103
115
|
**kwargs: Any,
|
@@ -127,6 +139,7 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
127
139
|
|
128
140
|
# Context handling
|
129
141
|
self.current_context = context
|
142
|
+
self.initial_context = context
|
130
143
|
|
131
144
|
# Model kwargs setup
|
132
145
|
self.model_kwargs = kwargs.copy()
|
@@ -139,7 +152,19 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
139
152
|
|
140
153
|
def _format_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
141
154
|
if self.agent.instructions:
|
142
|
-
|
155
|
+
system_content = self.agent.instructions
|
156
|
+
|
157
|
+
# Add context if available
|
158
|
+
if self.current_context is not None:
|
159
|
+
from ..agent import _format_context_for_instructions
|
160
|
+
|
161
|
+
context_str = _format_context_for_instructions(
|
162
|
+
self.current_context, self.agent.context_format
|
163
|
+
)
|
164
|
+
if context_str:
|
165
|
+
system_content += f"\n\nContext:\n{context_str}"
|
166
|
+
|
167
|
+
system_message = {"role": "system", "content": system_content}
|
143
168
|
messages = [system_message] + messages
|
144
169
|
return consolidate_system_messages(messages)
|
145
170
|
|
@@ -162,6 +187,18 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
162
187
|
else:
|
163
188
|
self.is_done = True
|
164
189
|
self._final_response = response
|
190
|
+
|
191
|
+
# Update context after processing if configured
|
192
|
+
if self.current_context and self.agent._should_update_context(
|
193
|
+
self.current_context, "after"
|
194
|
+
):
|
195
|
+
self.current_context = self.agent._perform_context_update(
|
196
|
+
context=self.current_context,
|
197
|
+
model=self.model,
|
198
|
+
current_messages=self.current_messages,
|
199
|
+
timing="after",
|
200
|
+
)
|
201
|
+
|
165
202
|
return AgentResponseChunk(
|
166
203
|
step_number=self.current_step, response=response, is_final=True
|
167
204
|
)
|
@@ -171,6 +208,17 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
171
208
|
while not self.is_done and self.current_step < self.max_steps:
|
172
209
|
self.current_step += 1
|
173
210
|
|
211
|
+
# Update context before processing if configured
|
212
|
+
if self.current_context and self.agent._should_update_context(
|
213
|
+
self.current_context, "before"
|
214
|
+
):
|
215
|
+
self.current_context = self.agent._perform_context_update(
|
216
|
+
context=self.current_context,
|
217
|
+
model=self.model,
|
218
|
+
current_messages=self.current_messages,
|
219
|
+
timing="before",
|
220
|
+
)
|
221
|
+
|
174
222
|
formatted_messages = self.current_messages
|
175
223
|
if self.current_step == 1:
|
176
224
|
formatted_messages = self._format_messages(self.current_messages)
|
@@ -198,6 +246,17 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
198
246
|
# The context manager handling should be managed by the agent's run method
|
199
247
|
self.current_step += 1
|
200
248
|
|
249
|
+
# Update context before processing if configured
|
250
|
+
if self.current_context and self.agent._should_update_context(
|
251
|
+
self.current_context, "before"
|
252
|
+
):
|
253
|
+
self.current_context = self.agent._perform_context_update(
|
254
|
+
context=self.current_context,
|
255
|
+
model=self.model,
|
256
|
+
current_messages=self.current_messages,
|
257
|
+
timing="before",
|
258
|
+
)
|
259
|
+
|
201
260
|
formatted_messages = self.current_messages
|
202
261
|
if self.current_step == 1:
|
203
262
|
formatted_messages = self._format_messages(self.current_messages)
|
@@ -215,7 +274,7 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
215
274
|
self.is_done = True
|
216
275
|
return chunk
|
217
276
|
|
218
|
-
def _build_response(self) -> AgentResponse[T]:
|
277
|
+
def _build_response(self) -> AgentResponse[T, AgentContext]:
|
219
278
|
if self._final_response:
|
220
279
|
final_response = self._final_response
|
221
280
|
elif self.steps:
|
@@ -229,13 +288,39 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
229
288
|
context=self.current_context,
|
230
289
|
)
|
231
290
|
|
232
|
-
def
|
291
|
+
def _format_context_display(self, context: AgentContext) -> str:
|
292
|
+
"""Format context for display in string representation."""
|
293
|
+
if context is None:
|
294
|
+
return "None"
|
295
|
+
|
296
|
+
try:
|
297
|
+
# For Pydantic models, show as dict
|
298
|
+
if hasattr(context, "model_dump"):
|
299
|
+
context_dict = context.model_dump()
|
300
|
+
elif isinstance(context, dict):
|
301
|
+
context_dict = context
|
302
|
+
else:
|
303
|
+
return str(context)
|
304
|
+
|
305
|
+
# Format as compact JSON-like string
|
306
|
+
items = []
|
307
|
+
for key, value in context_dict.items():
|
308
|
+
if isinstance(value, str):
|
309
|
+
items.append(f"{key}='{value}'")
|
310
|
+
else:
|
311
|
+
items.append(f"{key}={value}")
|
312
|
+
|
313
|
+
return "{" + ", ".join(items) + "}"
|
314
|
+
except Exception:
|
315
|
+
return str(context)
|
316
|
+
|
317
|
+
def collect(self) -> AgentResponse[T, AgentContext]:
|
233
318
|
"""Collect all steps and return final response."""
|
234
319
|
for _ in self:
|
235
320
|
pass
|
236
321
|
return self._build_response()
|
237
322
|
|
238
|
-
async def async_collect(self) -> AgentResponse[T]:
|
323
|
+
async def async_collect(self) -> AgentResponse[T, AgentContext]:
|
239
324
|
"""Collect all steps and return final response."""
|
240
325
|
async for _ in self:
|
241
326
|
pass
|
@@ -21,6 +21,7 @@ from typing_extensions import Literal
|
|
21
21
|
if TYPE_CHECKING:
|
22
22
|
from httpx import Timeout
|
23
23
|
|
24
|
+
from ....logging.logger import _get_internal_logger
|
24
25
|
from ..model_provider import litellm, instructor
|
25
26
|
|
26
27
|
from ...types.base import BaseGenAIModel
|
@@ -50,6 +51,9 @@ __all__ = [
|
|
50
51
|
T = TypeVar("T")
|
51
52
|
|
52
53
|
|
54
|
+
logger = _get_internal_logger(__name__)
|
55
|
+
|
56
|
+
|
53
57
|
class LanguageModelError(Exception):
|
54
58
|
"""Error raised when an error occurs during a language model operation."""
|
55
59
|
|
@@ -112,6 +116,9 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
112
116
|
# Initialize LanguageModel-specific attributes
|
113
117
|
self._instructor_client = None
|
114
118
|
|
119
|
+
logger.info(f"Initialized LanguageModel w/ model: {self.model}")
|
120
|
+
logger.debug(f"LanguageModel settings: {self.settings}")
|
121
|
+
|
115
122
|
def _get_instructor_client(
|
116
123
|
self, mode: Optional[LanguageModelInstructorMode] = None
|
117
124
|
):
|
@@ -123,6 +130,10 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
123
130
|
self._instructor_client is None
|
124
131
|
or getattr(self._instructor_client, "_mode", None) != effective_mode
|
125
132
|
):
|
133
|
+
logger.debug(
|
134
|
+
f"Creating new instructor client for mode: {effective_mode} from old mode: {getattr(self._instructor_client, '_mode', None)}"
|
135
|
+
)
|
136
|
+
|
126
137
|
self._instructor_client = instructor.from_litellm(
|
127
138
|
completion=litellm.completion, mode=instructor.Mode(effective_mode)
|
128
139
|
)
|
@@ -338,6 +349,9 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
338
349
|
Returns:
|
339
350
|
LanguageModelResponse or LanguageModelStream depending on parameters
|
340
351
|
"""
|
352
|
+
logger.info(f"Running LanguageModel request with model: {self.model}")
|
353
|
+
logger.debug(f"LanguageModel request kwargs: {kwargs}")
|
354
|
+
|
341
355
|
try:
|
342
356
|
# Extract model, base_url, api_key, and mock_response from kwargs, using instance defaults
|
343
357
|
model = kwargs.pop("model", None) or self.model
|
@@ -572,6 +586,9 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
572
586
|
Returns:
|
573
587
|
LanguageModelResponse or LanguageModelAsyncStream depending on parameters
|
574
588
|
"""
|
589
|
+
logger.info(f"Running async LanguageModel request with model: {self.model}")
|
590
|
+
logger.debug(f"LanguageModel request kwargs: {kwargs}")
|
591
|
+
|
575
592
|
try:
|
576
593
|
# Extract model, base_url, api_key, and mock_response from kwargs, using instance defaults
|
577
594
|
model = kwargs.pop("model", None) or self.model
|
@@ -12,6 +12,8 @@ from typing import (
|
|
12
12
|
Literal,
|
13
13
|
)
|
14
14
|
|
15
|
+
from .....cache import cached
|
16
|
+
|
15
17
|
from ...model_provider import litellm
|
16
18
|
from ....types.base import BaseGenAIModelResponse
|
17
19
|
from openai.types.chat import (
|
@@ -53,7 +55,7 @@ class LanguageModelResponse(BaseGenAIModelResponse[T]):
|
|
53
55
|
"""The actual response content of the completion. This is the string that
|
54
56
|
was generated by the model."""
|
55
57
|
|
56
|
-
tool_calls: Optional[List["ChatCompletionMessageToolCall"]] = None
|
58
|
+
tool_calls: Optional[List["litellm.ChatCompletionMessageToolCall"]] = None
|
57
59
|
"""The tool calls that were made by the model. This is a list of tool calls
|
58
60
|
that were made by the model."""
|
59
61
|
|
@@ -197,9 +199,10 @@ class LanguageModelResponse(BaseGenAIModelResponse[T]):
|
|
197
199
|
content part object."""
|
198
200
|
return {"type": "text", "text": self.content}
|
199
201
|
|
202
|
+
@cached
|
200
203
|
def __str__(self) -> str:
|
201
204
|
"""Pretty prints the response object."""
|
202
|
-
output = "LanguageModelResponse:"
|
205
|
+
output = ">>> LanguageModelResponse:"
|
203
206
|
|
204
207
|
if self.output or self.content:
|
205
208
|
output += f"\n{self.output if self.output else self.content}"
|
hammad/genai/types/tools.py
CHANGED
@@ -5,6 +5,7 @@ Tool system for agent function calling with JSON schema generation.
|
|
5
5
|
|
6
6
|
import asyncio
|
7
7
|
import concurrent.futures
|
8
|
+
from dataclasses import dataclass
|
8
9
|
import inspect
|
9
10
|
import json
|
10
11
|
from typing import (
|
@@ -44,6 +45,7 @@ __all__ = (
|
|
44
45
|
)
|
45
46
|
|
46
47
|
|
48
|
+
@dataclass
|
47
49
|
class ToolResponseMessage:
|
48
50
|
"""Represents a tool response message for chat completion."""
|
49
51
|
|
hammad/logging/logger.py
CHANGED
@@ -953,3 +953,11 @@ def create_logger(
|
|
953
953
|
console=console,
|
954
954
|
handlers=handlers,
|
955
955
|
)
|
956
|
+
|
957
|
+
|
958
|
+
# internal logger and helper
|
959
|
+
_logger = Logger("hammad", level="warning")
|
960
|
+
|
961
|
+
|
962
|
+
def _get_internal_logger(name: str) -> Logger:
|
963
|
+
return Logger(name=name, level="warning")
|
@@ -55,15 +55,15 @@ hammad/formatting/yaml/__init__.py,sha256=4dBeXPi0jx7ELT2_sC2fUYaiY8b8wFiUScLODc
|
|
55
55
|
hammad/formatting/yaml/converters.py,sha256=zvSB8QGb56uvwO0KjXllfTj9g1FmNINOKR06DTjvXw8,153
|
56
56
|
hammad/genai/__init__.py,sha256=16L9z0U73uUhBB7JHSL0tHWie2-rI7GAUtQSY94IeZk,3579
|
57
57
|
hammad/genai/agents/__init__.py,sha256=R_wW_fbZqMXZZYSErAb81UDRMTaNDlAFzNKfTOm4XYg,1235
|
58
|
-
hammad/genai/agents/agent.py,sha256=
|
58
|
+
hammad/genai/agents/agent.py,sha256=xtb-xexCA7XzoHCR20zCTAEsgjTo0C-i701WQGzIXgA,55884
|
59
59
|
hammad/genai/agents/run.py,sha256=G3NLJgg8nXFHfOrh_XR1NpVjGzAgjnA_Ojc_rrMHz9E,23278
|
60
60
|
hammad/genai/agents/types/__init__.py,sha256=6X6_P82qe15dyqs-vAcXUk4na4tB-7oMdMf484v87io,1119
|
61
61
|
hammad/genai/agents/types/agent_context.py,sha256=u4evwx9B-UKEHMtNcsNlN9q8i12bsW9HhtyvmU0NNTw,313
|
62
62
|
hammad/genai/agents/types/agent_event.py,sha256=zNKXXPKKOsIO9MAhE-YNCOxeNg00O7j1mE0R1pA_Xr8,3925
|
63
63
|
hammad/genai/agents/types/agent_hooks.py,sha256=wgys4ixiHjX5oux4zVSr9OPXyAZ-iJGk_MhaOKEgMxo,7853
|
64
64
|
hammad/genai/agents/types/agent_messages.py,sha256=csjEq42bElaTZYZW2dE6nlFZc142-HgT3bB6h1KMg_w,846
|
65
|
-
hammad/genai/agents/types/agent_response.py,sha256=
|
66
|
-
hammad/genai/agents/types/agent_stream.py,sha256=
|
65
|
+
hammad/genai/agents/types/agent_response.py,sha256=vRR9bWwzSA6Y6a_cpf7KrrmJqoAwh5OuuNThTERNVwY,3806
|
66
|
+
hammad/genai/agents/types/agent_stream.py,sha256=VIfqZp55wq6jQh_OtPzZjiL_K4cEoVCkeEZS3s_MIXI,11069
|
67
67
|
hammad/genai/models/__init__.py,sha256=e4TbEsiKIoXENOEsdIdQcWWt0RnFdTEqCz0nICHQHtM,26
|
68
68
|
hammad/genai/models/model_provider.py,sha256=2RdOeqr7KpjyrMqq4YH4OYy1pk6sjzf2CPu1ZHa1Pdk,75
|
69
69
|
hammad/genai/models/multimodal.py,sha256=KXUyLXqM1eBgBGZFEbMw3dYbakZFAXoko2xYprronxY,1276
|
@@ -77,14 +77,14 @@ hammad/genai/models/embeddings/types/embedding_model_response.py,sha256=V2H_VTl1
|
|
77
77
|
hammad/genai/models/embeddings/types/embedding_model_run_params.py,sha256=ZGhCXrEEzMF5y-V8neF2a73Gh1emzrYUHVxWkybg5uE,1570
|
78
78
|
hammad/genai/models/embeddings/types/embedding_model_settings.py,sha256=KEwvoElXhPMSVCKW2uKwqqT2lSAAthQXmGXaV7Qk5cU,1268
|
79
79
|
hammad/genai/models/language/__init__.py,sha256=B92q9f5UIQBMIFoYUja9V61bn5Lzdrk12_bf3DHw6Is,1838
|
80
|
-
hammad/genai/models/language/model.py,sha256=
|
80
|
+
hammad/genai/models/language/model.py,sha256=h3V-Z_AUlVRn_pFbC0wchMajk7tyJjK0dIpZxq3lYy8,39408
|
81
81
|
hammad/genai/models/language/run.py,sha256=nqqQYi3iBpkNxW3_JHyyZBNpn79LVWLpnebCBYOaEbA,21468
|
82
82
|
hammad/genai/models/language/types/__init__.py,sha256=cdLnoCiVmK6T86-5CZrUJg2rxXKoSk-svyCSviUdgao,1534
|
83
83
|
hammad/genai/models/language/types/language_model_instructor_mode.py,sha256=7ywBaY24m-UKRynnX6XsfVf_hsQrM2xHAHugTgV0Vho,1008
|
84
84
|
hammad/genai/models/language/types/language_model_messages.py,sha256=e-HZ_YKXq17gwmMlpOmYUYUpBFm7Mu3aRawtjSslWXs,504
|
85
85
|
hammad/genai/models/language/types/language_model_name.py,sha256=2V70cZ47L9yIcug6LCcMHcvEJaee7gRN6DUPhLUBlsE,8056
|
86
86
|
hammad/genai/models/language/types/language_model_request.py,sha256=ZtzhCx8o6zkEBS3uTFXFLf_poDD7MnIp1y7MbKckOmI,3911
|
87
|
-
hammad/genai/models/language/types/language_model_response.py,sha256=
|
87
|
+
hammad/genai/models/language/types/language_model_response.py,sha256=YHDEDJuhQ_ULs9qse2b-h5cx1ELWgfPc2BHk34OPVxE,7540
|
88
88
|
hammad/genai/models/language/types/language_model_response_chunk.py,sha256=wIzGZw732KsI-a1-uASjATA6qvBuq-7rupWoFjsAgQo,1796
|
89
89
|
hammad/genai/models/language/types/language_model_settings.py,sha256=C0EvLXZoOLgPZ4bX7mVFs_CWP-jam27qkseJRGsBAfQ,2794
|
90
90
|
hammad/genai/models/language/types/language_model_stream.py,sha256=XgJ83JSbtTdf7jeLQMrDhMfI7zp0pRrdY7JWYbZV_h0,22043
|
@@ -94,10 +94,10 @@ hammad/genai/models/language/utils/structured_outputs.py,sha256=Va7pie9AOvLbJOaD
|
|
94
94
|
hammad/genai/types/__init__.py,sha256=W0fzUnKhDynt4TkwZX8LCRYfgRTAVomSuWqPmhGu8sg,25
|
95
95
|
hammad/genai/types/base.py,sha256=VnGL45w8oR-6rWl2GfGgWX4SjMC-23RGWuN0_H2bH_I,5437
|
96
96
|
hammad/genai/types/history.py,sha256=zsfBvGMoFTHZCT7Igae-5_jszu409dVJ_wEmNw7alCk,10208
|
97
|
-
hammad/genai/types/tools.py,sha256=
|
97
|
+
hammad/genai/types/tools.py,sha256=3p7qhZcilP_NOCOnufCkubTeYN0yC7Ij5bqrUy-FYow,16554
|
98
98
|
hammad/logging/__init__.py,sha256=VtskZx0bKEAJ9FHTMflhB1CzeFUxLpDT5HPgcecAXUo,701
|
99
99
|
hammad/logging/decorators.py,sha256=VbI1x3P4ft0-0BGjXq7nQgiuNqcXAA51CGmoSn47iSw,30122
|
100
|
-
hammad/logging/logger.py,sha256=
|
100
|
+
hammad/logging/logger.py,sha256=5Y41gCtH7APxNjIXtsZg1E9nwpi2xTgRAoC2l-QKil4,31706
|
101
101
|
hammad/mcp/__init__.py,sha256=5oTU-BLYjfz6fBHDH9cyWg3DpQ6Qar-jodbCR05SuWo,1123
|
102
102
|
hammad/mcp/client/__init__.py,sha256=_SfnKvd5Za-FfFoE5GcXkBY9WcwprZND9SyZ6RY--no,795
|
103
103
|
hammad/mcp/client/client.py,sha256=auKCiIJfcZkuVFRapTpqYP4PxoyIfx40gVbMYLBdTzI,20565
|
@@ -121,7 +121,7 @@ hammad/web/openapi/__init__.py,sha256=JhJQ6_laBmB2djIYFc0vgGha2GsdUe4FP1LDdZCQ5J
|
|
121
121
|
hammad/web/openapi/client.py,sha256=1pXz7KAO_0pN4kQZoWKWskXDYGiJ535TsPO1GGCiC0E,26816
|
122
122
|
hammad/web/search/__init__.py,sha256=e9A6znPIiZCz-4secyHbUs0uUGf5yAqW6wGacgx961U,24
|
123
123
|
hammad/web/search/client.py,sha256=LIx2MsHhn6cRTuq5i1mWowRTdIhPobY4GQV3S3bk9lk,36694
|
124
|
-
hammad_python-0.0.
|
125
|
-
hammad_python-0.0.
|
126
|
-
hammad_python-0.0.
|
127
|
-
hammad_python-0.0.
|
124
|
+
hammad_python-0.0.22.dist-info/METADATA,sha256=OFmPfD3CcZq0Et0hWl2dBUjRNm4IcYZKL1EX8aTJPsU,6570
|
125
|
+
hammad_python-0.0.22.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
126
|
+
hammad_python-0.0.22.dist-info/licenses/LICENSE,sha256=h74yFUWjbBaodcWG5wNmm30npjl8obVcxD-1nQfUp2I,1069
|
127
|
+
hammad_python-0.0.22.dist-info/RECORD,,
|
File without changes
|
File without changes
|