hammad-python 0.0.20__py3-none-any.whl → 0.0.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/genai/agents/agent.py +321 -45
- hammad/genai/agents/types/agent_response.py +36 -4
- hammad/genai/agents/types/agent_stream.py +85 -9
- hammad/genai/models/language/model.py +15 -0
- hammad/genai/models/language/types/language_model_response.py +1 -1
- hammad/genai/types/tools.py +2 -0
- hammad/logging/logger.py +8 -0
- {hammad_python-0.0.20.dist-info → hammad_python-0.0.21.dist-info}/METADATA +1 -1
- {hammad_python-0.0.20.dist-info → hammad_python-0.0.21.dist-info}/RECORD +11 -11
- {hammad_python-0.0.20.dist-info → hammad_python-0.0.21.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.20.dist-info → hammad_python-0.0.21.dist-info}/licenses/LICENSE +0 -0
hammad/genai/agents/agent.py
CHANGED
@@ -19,6 +19,8 @@ from dataclasses import dataclass, field
|
|
19
19
|
from enum import Enum
|
20
20
|
import json
|
21
21
|
|
22
|
+
from ...logging.logger import _get_internal_logger
|
23
|
+
|
22
24
|
from ..types.base import BaseGenAIModel, BaseGenAIModelSettings
|
23
25
|
from ..models.language.model import LanguageModel
|
24
26
|
from ..models.language.types import (
|
@@ -53,6 +55,9 @@ if TYPE_CHECKING:
|
|
53
55
|
T = TypeVar("T")
|
54
56
|
|
55
57
|
|
58
|
+
logger = _get_internal_logger(__name__)
|
59
|
+
|
60
|
+
|
56
61
|
@dataclass
|
57
62
|
class AgentSettings:
|
58
63
|
"""Settings object that controls the default behavior of an agent's run."""
|
@@ -253,6 +258,43 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
253
258
|
context_format: Literal["json", "python", "markdown"] = "json",
|
254
259
|
**kwargs: Any,
|
255
260
|
):
|
261
|
+
"""Create a new AI agent with specified capabilities and behavior.
|
262
|
+
|
263
|
+
An agent is an intelligent assistant that can use tools, follow instructions,
|
264
|
+
and maintain context across conversations. It combines a language model with
|
265
|
+
additional capabilities like tool execution and structured output generation.
|
266
|
+
|
267
|
+
Args:
|
268
|
+
name: A human-readable name for the agent (default: "agent")
|
269
|
+
instructions: System instructions that define the agent's behavior and personality
|
270
|
+
model: The language model to use - either a LanguageModel instance or model name string
|
271
|
+
description: Optional description of what the agent does
|
272
|
+
tools: List of tools/functions the agent can call, or a single callable
|
273
|
+
settings: AgentSettings object to customize default behavior
|
274
|
+
instructor_mode: Mode for structured output generation
|
275
|
+
context_updates: When to update context - "before", "after", or both
|
276
|
+
context_confirm: Whether to confirm context updates with the user
|
277
|
+
context_strategy: How to select context updates - "selective" or "all"
|
278
|
+
context_max_retries: Maximum attempts for context update operations
|
279
|
+
context_confirm_instructions: Custom instructions for context confirmation
|
280
|
+
context_selection_instructions: Custom instructions for context selection
|
281
|
+
context_update_instructions: Custom instructions for context updates
|
282
|
+
context_format: Format for context display - "json", "python", or "markdown"
|
283
|
+
**kwargs: Additional parameters passed to the underlying language model
|
284
|
+
|
285
|
+
Example:
|
286
|
+
Basic agent:
|
287
|
+
>>> agent = Agent(name="assistant", instructions="You are helpful")
|
288
|
+
|
289
|
+
Agent with tools:
|
290
|
+
>>> def calculator(x: int, y: int) -> int:
|
291
|
+
... return x + y
|
292
|
+
>>> agent = Agent(tools=[calculator])
|
293
|
+
|
294
|
+
Agent with custom settings:
|
295
|
+
>>> settings = AgentSettings(max_steps=5)
|
296
|
+
>>> agent = Agent(settings=settings, model="gpt-4")
|
297
|
+
"""
|
256
298
|
# Initialize BaseGenAIModel with basic parameters
|
257
299
|
super().__init__(
|
258
300
|
model=model if isinstance(model, str) else model.model, **kwargs
|
@@ -297,17 +339,44 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
297
339
|
"""Get the underlying language model."""
|
298
340
|
return self._language_model
|
299
341
|
|
342
|
+
def _get_effective_context_settings(
|
343
|
+
self,
|
344
|
+
context_updates: Optional[
|
345
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
346
|
+
] = None,
|
347
|
+
context_confirm: Optional[bool] = None,
|
348
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
349
|
+
context_max_retries: Optional[int] = None,
|
350
|
+
context_confirm_instructions: Optional[str] = None,
|
351
|
+
context_selection_instructions: Optional[str] = None,
|
352
|
+
context_update_instructions: Optional[str] = None,
|
353
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
354
|
+
) -> dict:
|
355
|
+
"""Get effective context settings, using provided parameters or defaults."""
|
356
|
+
return {
|
357
|
+
"context_updates": context_updates if context_updates is not None else self.context_updates,
|
358
|
+
"context_confirm": context_confirm if context_confirm is not None else self.context_confirm,
|
359
|
+
"context_strategy": context_strategy if context_strategy is not None else self.context_strategy,
|
360
|
+
"context_max_retries": context_max_retries if context_max_retries is not None else self.context_max_retries,
|
361
|
+
"context_confirm_instructions": context_confirm_instructions if context_confirm_instructions is not None else self.context_confirm_instructions,
|
362
|
+
"context_selection_instructions": context_selection_instructions if context_selection_instructions is not None else self.context_selection_instructions,
|
363
|
+
"context_update_instructions": context_update_instructions if context_update_instructions is not None else self.context_update_instructions,
|
364
|
+
"context_format": context_format if context_format is not None else self.context_format,
|
365
|
+
}
|
366
|
+
|
300
367
|
def _should_update_context(
|
301
|
-
self, context: AgentContext, timing: Literal["before", "after"]
|
368
|
+
self, context: AgentContext, timing: Literal["before", "after"], context_updates=None
|
302
369
|
) -> bool:
|
303
370
|
"""Determine if context should be updated based on timing and configuration."""
|
304
|
-
if not self.context_updates
|
371
|
+
effective_context_updates = context_updates if context_updates is not None else self.context_updates
|
372
|
+
|
373
|
+
if not effective_context_updates:
|
305
374
|
return False
|
306
375
|
|
307
|
-
if isinstance(
|
308
|
-
return
|
376
|
+
if isinstance(effective_context_updates, str):
|
377
|
+
return effective_context_updates == timing
|
309
378
|
else:
|
310
|
-
return timing in
|
379
|
+
return timing in effective_context_updates
|
311
380
|
|
312
381
|
def _create_context_confirm_model(self):
|
313
382
|
"""Create IsUpdateRequired model for context confirmation."""
|
@@ -334,18 +403,44 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
334
403
|
if field_name:
|
335
404
|
# Single field update
|
336
405
|
if isinstance(context, BaseModel):
|
337
|
-
field_type = context.model_fields[field_name].annotation
|
406
|
+
field_type = context.__class__.model_fields[field_name].annotation
|
407
|
+
field_info = context.__class__.model_fields[field_name]
|
408
|
+
description = getattr(field_info, 'description', f"Update the {field_name} field")
|
338
409
|
elif isinstance(context, dict):
|
339
410
|
field_type = type(context[field_name])
|
411
|
+
description = f"Update the {field_name} field"
|
340
412
|
else:
|
341
413
|
field_type = Any
|
414
|
+
description = f"Update the {field_name} field"
|
342
415
|
|
343
416
|
return create_model(
|
344
|
-
field_name.capitalize(),
|
417
|
+
f"Update{field_name.capitalize()}",
|
418
|
+
**{field_name: (field_type, Field(description=description))}
|
345
419
|
)
|
346
420
|
else:
|
347
|
-
# All fields update
|
348
|
-
|
421
|
+
# All fields update - create a model with the exact same fields as the context
|
422
|
+
if isinstance(context, BaseModel):
|
423
|
+
# Create a model with the same fields as the context
|
424
|
+
field_definitions = {}
|
425
|
+
for field_name, field_info in context.model_fields.items():
|
426
|
+
field_type = field_info.annotation
|
427
|
+
current_value = getattr(context, field_name)
|
428
|
+
description = getattr(field_info, 'description', f"Current value: {current_value}")
|
429
|
+
field_definitions[field_name] = (field_type, Field(description=description))
|
430
|
+
|
431
|
+
return create_model("ContextUpdate", **field_definitions)
|
432
|
+
elif isinstance(context, dict):
|
433
|
+
# Create a model with the same keys as the dict
|
434
|
+
field_definitions = {}
|
435
|
+
for key, value in context.items():
|
436
|
+
field_type = type(value)
|
437
|
+
description = f"Current value: {value}"
|
438
|
+
field_definitions[key] = (field_type, Field(description=description))
|
439
|
+
|
440
|
+
return create_model("ContextUpdate", **field_definitions)
|
441
|
+
else:
|
442
|
+
# Fallback to generic updates
|
443
|
+
return create_model("ContextUpdate", updates=(Dict[str, Any], Field(description="Dictionary of field updates")))
|
349
444
|
|
350
445
|
def _perform_context_update(
|
351
446
|
self,
|
@@ -353,20 +448,40 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
353
448
|
model: LanguageModel,
|
354
449
|
current_messages: List[Dict[str, Any]],
|
355
450
|
timing: Literal["before", "after"],
|
451
|
+
effective_settings: Optional[dict] = None,
|
356
452
|
) -> AgentContext:
|
357
453
|
"""Perform context update with retries and error handling."""
|
358
454
|
updated_context = context
|
359
|
-
|
360
|
-
|
455
|
+
|
456
|
+
# Use effective settings or defaults
|
457
|
+
if effective_settings is None:
|
458
|
+
effective_settings = {
|
459
|
+
"context_confirm": self.context_confirm,
|
460
|
+
"context_strategy": self.context_strategy,
|
461
|
+
"context_max_retries": self.context_max_retries,
|
462
|
+
"context_confirm_instructions": self.context_confirm_instructions,
|
463
|
+
"context_selection_instructions": self.context_selection_instructions,
|
464
|
+
"context_update_instructions": self.context_update_instructions,
|
465
|
+
"context_format": self.context_format,
|
466
|
+
}
|
467
|
+
|
468
|
+
for attempt in range(effective_settings["context_max_retries"]):
|
361
469
|
try:
|
362
470
|
# Check if update is needed (if confirmation is enabled)
|
363
|
-
if
|
471
|
+
if effective_settings["context_confirm"]:
|
364
472
|
confirm_model = self._create_context_confirm_model()
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
473
|
+
|
474
|
+
# Create detailed instructions with context structure
|
475
|
+
context_structure = _format_context_for_instructions(updated_context, effective_settings["context_format"])
|
476
|
+
confirm_instructions = f"""Based on the conversation, determine if the context should be updated {timing} processing.
|
477
|
+
|
478
|
+
Current context structure:
|
479
|
+
{context_structure}
|
480
|
+
|
481
|
+
Should the context be updated based on the new information provided in the conversation?"""
|
482
|
+
|
483
|
+
if effective_settings["context_confirm_instructions"]:
|
484
|
+
confirm_instructions += f"\n\nAdditional instructions: {effective_settings['context_confirm_instructions']}"
|
370
485
|
|
371
486
|
confirm_response = model.run(
|
372
487
|
messages=current_messages
|
@@ -379,16 +494,23 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
379
494
|
return updated_context
|
380
495
|
|
381
496
|
# Perform the update based on strategy
|
382
|
-
if
|
497
|
+
if effective_settings["context_strategy"] == "selective":
|
383
498
|
# Get fields to update
|
384
499
|
selection_model = self._create_context_selection_model(
|
385
500
|
updated_context
|
386
501
|
)
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
502
|
+
|
503
|
+
# Create detailed instructions with context structure
|
504
|
+
context_structure = _format_context_for_instructions(updated_context, effective_settings["context_format"])
|
505
|
+
selection_instructions = f"""Select which fields in the context should be updated {timing} processing based on the conversation.
|
506
|
+
|
507
|
+
Current context structure:
|
508
|
+
{context_structure}
|
509
|
+
|
510
|
+
Choose only the fields that need to be updated based on the new information provided in the conversation."""
|
511
|
+
|
512
|
+
if effective_settings["context_selection_instructions"]:
|
513
|
+
selection_instructions += f"\n\nAdditional instructions: {effective_settings['context_selection_instructions']}"
|
392
514
|
|
393
515
|
selection_response = model.run(
|
394
516
|
messages=current_messages
|
@@ -403,13 +525,17 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
403
525
|
field_model = self._create_context_update_model(
|
404
526
|
updated_context, field_name
|
405
527
|
)
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
528
|
+
# Get current field value for context
|
529
|
+
current_value = getattr(updated_context, field_name) if isinstance(updated_context, BaseModel) else updated_context.get(field_name)
|
530
|
+
|
531
|
+
field_instructions = f"""Update the {field_name} field in the context based on the conversation.
|
532
|
+
|
533
|
+
Current value of {field_name}: {current_value}
|
534
|
+
|
535
|
+
Please provide the new value for {field_name} based on the information from the conversation."""
|
536
|
+
|
537
|
+
if effective_settings["context_update_instructions"]:
|
538
|
+
field_instructions += f"\n\nAdditional instructions: {effective_settings['context_update_instructions']}"
|
413
539
|
|
414
540
|
field_response = model.run(
|
415
541
|
messages=current_messages
|
@@ -429,9 +555,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
429
555
|
else: # strategy == "all"
|
430
556
|
# Update all fields at once
|
431
557
|
update_model = self._create_context_update_model(updated_context)
|
432
|
-
|
433
|
-
|
434
|
-
|
558
|
+
|
559
|
+
# Create detailed instructions with context structure
|
560
|
+
context_structure = _format_context_for_instructions(updated_context, effective_settings["context_format"])
|
561
|
+
update_instructions = f"""Update the context {timing} processing based on the conversation.
|
562
|
+
|
563
|
+
Current context structure:
|
564
|
+
{context_structure}
|
565
|
+
|
566
|
+
Please update the appropriate fields based on the conversation. Only update fields that need to be changed based on the new information provided."""
|
567
|
+
|
568
|
+
if effective_settings["context_update_instructions"]:
|
569
|
+
update_instructions += f"\n\nAdditional instructions: {effective_settings['context_update_instructions']}"
|
435
570
|
|
436
571
|
update_response = model.run(
|
437
572
|
messages=current_messages
|
@@ -441,9 +576,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
441
576
|
)
|
442
577
|
|
443
578
|
# Apply the updates
|
444
|
-
|
445
|
-
|
446
|
-
|
579
|
+
if hasattr(update_response.output, 'updates'):
|
580
|
+
# Legacy fallback for generic updates
|
581
|
+
updated_context = _update_context_object(
|
582
|
+
updated_context, update_response.output.updates
|
583
|
+
)
|
584
|
+
else:
|
585
|
+
# New approach - extract field values directly from the response
|
586
|
+
updates_dict = {}
|
587
|
+
for field_name in (context.model_fields.keys() if isinstance(context, BaseModel) else context.keys()):
|
588
|
+
if hasattr(update_response.output, field_name):
|
589
|
+
updates_dict[field_name] = getattr(update_response.output, field_name)
|
590
|
+
updated_context = _update_context_object(updated_context, updates_dict)
|
447
591
|
|
448
592
|
# Trigger context update hooks
|
449
593
|
self.hook_manager.trigger_hooks("context_update", updated_context)
|
@@ -490,10 +634,20 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
490
634
|
max_steps: Optional[int] = None,
|
491
635
|
context: Optional[AgentContext] = None,
|
492
636
|
output_type: Optional[Type[T]] = None,
|
637
|
+
context_updates: Optional[
|
638
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
639
|
+
] = None,
|
640
|
+
context_confirm: Optional[bool] = None,
|
641
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
642
|
+
context_max_retries: Optional[int] = None,
|
643
|
+
context_confirm_instructions: Optional[str] = None,
|
644
|
+
context_selection_instructions: Optional[str] = None,
|
645
|
+
context_update_instructions: Optional[str] = None,
|
646
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
493
647
|
*,
|
494
648
|
stream: Literal[False] = False,
|
495
649
|
**kwargs: Any,
|
496
|
-
) -> AgentResponse[T]: ...
|
650
|
+
) -> AgentResponse[T, AgentContext]: ...
|
497
651
|
|
498
652
|
@overload
|
499
653
|
def run(
|
@@ -503,10 +657,20 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
503
657
|
max_steps: Optional[int] = None,
|
504
658
|
context: Optional[AgentContext] = None,
|
505
659
|
output_type: Optional[Type[T]] = None,
|
660
|
+
context_updates: Optional[
|
661
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
662
|
+
] = None,
|
663
|
+
context_confirm: Optional[bool] = None,
|
664
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
665
|
+
context_max_retries: Optional[int] = None,
|
666
|
+
context_confirm_instructions: Optional[str] = None,
|
667
|
+
context_selection_instructions: Optional[str] = None,
|
668
|
+
context_update_instructions: Optional[str] = None,
|
669
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
506
670
|
*,
|
507
671
|
stream: Literal[True],
|
508
672
|
**kwargs: Any,
|
509
|
-
) -> AgentStream[T]: ...
|
673
|
+
) -> AgentStream[T, AgentContext]: ...
|
510
674
|
|
511
675
|
def run(
|
512
676
|
self,
|
@@ -515,9 +679,19 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
515
679
|
max_steps: Optional[int] = None,
|
516
680
|
context: Optional[AgentContext] = None,
|
517
681
|
output_type: Optional[Type[T]] = None,
|
682
|
+
context_updates: Optional[
|
683
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
684
|
+
] = None,
|
685
|
+
context_confirm: Optional[bool] = None,
|
686
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
687
|
+
context_max_retries: Optional[int] = None,
|
688
|
+
context_confirm_instructions: Optional[str] = None,
|
689
|
+
context_selection_instructions: Optional[str] = None,
|
690
|
+
context_update_instructions: Optional[str] = None,
|
691
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
518
692
|
stream: bool = False,
|
519
693
|
**kwargs: Any,
|
520
|
-
) -> Union[AgentResponse[T], AgentStream[T]]:
|
694
|
+
) -> Union[AgentResponse[T, AgentContext], AgentStream[T, AgentContext]]:
|
521
695
|
"""Runs this agent and returns a final agent response or stream.
|
522
696
|
|
523
697
|
You can override defaults assigned to this agent from this function directly.
|
@@ -614,6 +788,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
614
788
|
if max_steps is None:
|
615
789
|
max_steps = self.settings.max_steps
|
616
790
|
|
791
|
+
# Get effective context settings
|
792
|
+
effective_context_settings = self._get_effective_context_settings(
|
793
|
+
context_updates=context_updates,
|
794
|
+
context_confirm=context_confirm,
|
795
|
+
context_strategy=context_strategy,
|
796
|
+
context_max_retries=context_max_retries,
|
797
|
+
context_confirm_instructions=context_confirm_instructions,
|
798
|
+
context_selection_instructions=context_selection_instructions,
|
799
|
+
context_update_instructions=context_update_instructions,
|
800
|
+
context_format=context_format,
|
801
|
+
)
|
802
|
+
|
617
803
|
# Parse initial messages
|
618
804
|
parsed_messages = parse_messages(messages)
|
619
805
|
current_messages = parsed_messages.copy()
|
@@ -621,6 +807,16 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
621
807
|
|
622
808
|
# RUN MAIN AGENTIC LOOP
|
623
809
|
for step in range(max_steps):
|
810
|
+
# Update context before processing if configured
|
811
|
+
if context and self._should_update_context(context, "before", effective_context_settings["context_updates"]):
|
812
|
+
context = self._perform_context_update(
|
813
|
+
context=context,
|
814
|
+
model=working_model,
|
815
|
+
current_messages=current_messages,
|
816
|
+
timing="before",
|
817
|
+
effective_settings=effective_context_settings,
|
818
|
+
)
|
819
|
+
|
624
820
|
# Format messages with instructions and context for first step only
|
625
821
|
if step == 0:
|
626
822
|
formatted_messages = self._format_messages_with_context(
|
@@ -640,7 +836,7 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
640
836
|
# Get language model response
|
641
837
|
response = working_model.run(
|
642
838
|
messages=formatted_messages,
|
643
|
-
tools=[tool.
|
839
|
+
tools=[tool.to_dict() for tool in self.tools]
|
644
840
|
if self.tools
|
645
841
|
else None,
|
646
842
|
**model_kwargs,
|
@@ -663,6 +859,15 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
663
859
|
steps.append(response)
|
664
860
|
else:
|
665
861
|
# No tool calls - this is the final step
|
862
|
+
# Update context after processing if configured
|
863
|
+
if context and self._should_update_context(context, "after", effective_context_settings["context_updates"]):
|
864
|
+
context = self._perform_context_update(
|
865
|
+
context=context,
|
866
|
+
model=working_model,
|
867
|
+
current_messages=current_messages,
|
868
|
+
timing="after",
|
869
|
+
effective_settings=effective_context_settings,
|
870
|
+
)
|
666
871
|
return _create_agent_response_from_language_model_response(
|
667
872
|
response=response, steps=steps, context=context
|
668
873
|
)
|
@@ -680,6 +885,16 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
680
885
|
**model_kwargs,
|
681
886
|
)
|
682
887
|
|
888
|
+
# Update context after processing if configured
|
889
|
+
if context and self._should_update_context(context, "after", effective_context_settings["context_updates"]):
|
890
|
+
context = self._perform_context_update(
|
891
|
+
context=context,
|
892
|
+
model=working_model,
|
893
|
+
current_messages=current_messages,
|
894
|
+
timing="after",
|
895
|
+
effective_settings=effective_context_settings,
|
896
|
+
)
|
897
|
+
|
683
898
|
return _create_agent_response_from_language_model_response(
|
684
899
|
response=final_response, steps=steps, context=context
|
685
900
|
)
|
@@ -691,8 +906,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
691
906
|
max_steps: Optional[int] = None,
|
692
907
|
context: Optional[AgentContext] = None,
|
693
908
|
output_type: Optional[Type[T]] = None,
|
909
|
+
context_updates: Optional[
|
910
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
911
|
+
] = None,
|
912
|
+
context_confirm: Optional[bool] = None,
|
913
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
914
|
+
context_max_retries: Optional[int] = None,
|
915
|
+
context_confirm_instructions: Optional[str] = None,
|
916
|
+
context_selection_instructions: Optional[str] = None,
|
917
|
+
context_update_instructions: Optional[str] = None,
|
918
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
694
919
|
**kwargs: Any,
|
695
|
-
) -> AgentResponse[T]:
|
920
|
+
) -> AgentResponse[T, AgentContext]:
|
696
921
|
"""Runs this agent asynchronously and returns a final agent response.
|
697
922
|
|
698
923
|
You can override defaults assigned to this agent from this function directly.
|
@@ -772,6 +997,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
772
997
|
if max_steps is None:
|
773
998
|
max_steps = self.settings.max_steps
|
774
999
|
|
1000
|
+
# Get effective context settings
|
1001
|
+
effective_context_settings = self._get_effective_context_settings(
|
1002
|
+
context_updates=context_updates,
|
1003
|
+
context_confirm=context_confirm,
|
1004
|
+
context_strategy=context_strategy,
|
1005
|
+
context_max_retries=context_max_retries,
|
1006
|
+
context_confirm_instructions=context_confirm_instructions,
|
1007
|
+
context_selection_instructions=context_selection_instructions,
|
1008
|
+
context_update_instructions=context_update_instructions,
|
1009
|
+
context_format=context_format,
|
1010
|
+
)
|
1011
|
+
|
775
1012
|
# Parse initial messages
|
776
1013
|
parsed_messages = parse_messages(messages)
|
777
1014
|
current_messages = parsed_messages.copy()
|
@@ -779,6 +1016,16 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
779
1016
|
|
780
1017
|
# RUN MAIN AGENTIC LOOP
|
781
1018
|
for step in range(max_steps):
|
1019
|
+
# Update context before processing if configured
|
1020
|
+
if context and self._should_update_context(context, "before", effective_context_settings["context_updates"]):
|
1021
|
+
context = self._perform_context_update(
|
1022
|
+
context=context,
|
1023
|
+
model=working_model,
|
1024
|
+
current_messages=current_messages,
|
1025
|
+
timing="before",
|
1026
|
+
effective_settings=effective_context_settings,
|
1027
|
+
)
|
1028
|
+
|
782
1029
|
# Format messages with instructions and context for first step only
|
783
1030
|
if step == 0:
|
784
1031
|
formatted_messages = self._format_messages_with_context(
|
@@ -798,7 +1045,7 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
798
1045
|
# Get language model response
|
799
1046
|
response = await working_model.async_run(
|
800
1047
|
messages=formatted_messages,
|
801
|
-
tools=[tool.
|
1048
|
+
tools=[tool.to_dict() for tool in self.tools]
|
802
1049
|
if self.tools
|
803
1050
|
else None,
|
804
1051
|
**model_kwargs,
|
@@ -821,6 +1068,15 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
821
1068
|
steps.append(response)
|
822
1069
|
else:
|
823
1070
|
# No tool calls - this is the final step
|
1071
|
+
# Update context after processing if configured
|
1072
|
+
if context and self._should_update_context(context, "after", effective_context_settings["context_updates"]):
|
1073
|
+
context = self._perform_context_update(
|
1074
|
+
context=context,
|
1075
|
+
model=working_model,
|
1076
|
+
current_messages=current_messages,
|
1077
|
+
timing="after",
|
1078
|
+
effective_settings=effective_context_settings,
|
1079
|
+
)
|
824
1080
|
return _create_agent_response_from_language_model_response(
|
825
1081
|
response=response, steps=steps, context=context
|
826
1082
|
)
|
@@ -838,6 +1094,16 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
838
1094
|
**model_kwargs,
|
839
1095
|
)
|
840
1096
|
|
1097
|
+
# Update context after processing if configured
|
1098
|
+
if context and self._should_update_context(context, "after", effective_context_settings["context_updates"]):
|
1099
|
+
context = self._perform_context_update(
|
1100
|
+
context=context,
|
1101
|
+
model=working_model,
|
1102
|
+
current_messages=current_messages,
|
1103
|
+
timing="after",
|
1104
|
+
effective_settings=effective_context_settings,
|
1105
|
+
)
|
1106
|
+
|
841
1107
|
return _create_agent_response_from_language_model_response(
|
842
1108
|
response=final_response, steps=steps, context=context
|
843
1109
|
)
|
@@ -850,7 +1116,7 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
850
1116
|
context: Optional[AgentContext] = None,
|
851
1117
|
output_type: Optional[Type[T]] = None,
|
852
1118
|
**kwargs: Any,
|
853
|
-
) -> AgentStream[T]:
|
1119
|
+
) -> AgentStream[T, AgentContext]:
|
854
1120
|
"""Create a stream that yields agent steps.
|
855
1121
|
|
856
1122
|
Args:
|
@@ -882,8 +1148,18 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
882
1148
|
max_steps: Optional[int] = None,
|
883
1149
|
context: Optional[AgentContext] = None,
|
884
1150
|
output_type: Optional[Type[T]] = None,
|
1151
|
+
context_updates: Optional[
|
1152
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
1153
|
+
] = None,
|
1154
|
+
context_confirm: Optional[bool] = None,
|
1155
|
+
context_strategy: Optional[Literal["selective", "all"]] = None,
|
1156
|
+
context_max_retries: Optional[int] = None,
|
1157
|
+
context_confirm_instructions: Optional[str] = None,
|
1158
|
+
context_selection_instructions: Optional[str] = None,
|
1159
|
+
context_update_instructions: Optional[str] = None,
|
1160
|
+
context_format: Optional[Literal["json", "python", "markdown"]] = None,
|
885
1161
|
**kwargs: Any,
|
886
|
-
) -> AgentStream[T]:
|
1162
|
+
) -> AgentStream[T, AgentContext]:
|
887
1163
|
"""Iterate over agent steps, yielding each step response.
|
888
1164
|
|
889
1165
|
You can override defaults assigned to this agent from this function directly.
|
@@ -986,7 +1262,7 @@ class Agent(BaseGenAIModel, Generic[T]):
|
|
986
1262
|
context: Optional[AgentContext] = None,
|
987
1263
|
output_type: Optional[Type[T]] = None,
|
988
1264
|
**kwargs: Any,
|
989
|
-
) -> AgentStream[T]:
|
1265
|
+
) -> AgentStream[T, AgentContext]:
|
990
1266
|
"""Async iterate over agent steps, yielding each step response.
|
991
1267
|
|
992
1268
|
Args:
|
@@ -1,12 +1,14 @@
|
|
1
1
|
"""hammad.genai.agents.types.agent_response"""
|
2
2
|
|
3
|
-
from typing import List, Any, TypeVar, Literal
|
3
|
+
from typing import List, Any, TypeVar, Literal, Generic
|
4
4
|
|
5
5
|
from ....cache import cached
|
6
|
+
from ....typing import get_type_description
|
6
7
|
from ...models.language.types import (
|
7
8
|
LanguageModelResponse,
|
8
9
|
)
|
9
10
|
|
11
|
+
from .agent_context import AgentContext
|
10
12
|
|
11
13
|
__all__ = [
|
12
14
|
"AgentResponse",
|
@@ -40,7 +42,7 @@ def _create_agent_response_from_language_model_response(
|
|
40
42
|
) from e
|
41
43
|
|
42
44
|
|
43
|
-
class AgentResponse(LanguageModelResponse[T]):
|
45
|
+
class AgentResponse(LanguageModelResponse[T], Generic[T, AgentContext]):
|
44
46
|
"""A response generated by an agent, that includes the steps and final
|
45
47
|
output during the agent's execution."""
|
46
48
|
|
@@ -55,7 +57,7 @@ class AgentResponse(LanguageModelResponse[T]):
|
|
55
57
|
empty.
|
56
58
|
"""
|
57
59
|
|
58
|
-
context:
|
60
|
+
context: AgentContext = None
|
59
61
|
"""
|
60
62
|
The final context object after agent execution.
|
61
63
|
|
@@ -77,7 +79,7 @@ class AgentResponse(LanguageModelResponse[T]):
|
|
77
79
|
# NOTE:
|
78
80
|
# added +1 to include final step in the output
|
79
81
|
output += f"\n>>> Steps: {len(self.steps) + 1}"
|
80
|
-
output += f"\n>>>
|
82
|
+
output += f"\n>>> Output Type: {get_type_description(type(self.output))}"
|
81
83
|
|
82
84
|
# Calculate total tool calls across all steps
|
83
85
|
total_tool_calls = 0
|
@@ -87,4 +89,34 @@ class AgentResponse(LanguageModelResponse[T]):
|
|
87
89
|
|
88
90
|
output += f"\n>>> Total Tool Calls: {total_tool_calls}"
|
89
91
|
|
92
|
+
# Show context if available
|
93
|
+
if self.context:
|
94
|
+
output += f"\n>>> Final Context: {self._format_context_display(self.context)}"
|
95
|
+
|
90
96
|
return output
|
97
|
+
|
98
|
+
def _format_context_display(self, context: AgentContext) -> str:
|
99
|
+
"""Format context for display in string representation."""
|
100
|
+
if context is None:
|
101
|
+
return "None"
|
102
|
+
|
103
|
+
try:
|
104
|
+
# For Pydantic models, show as dict
|
105
|
+
if hasattr(context, 'model_dump'):
|
106
|
+
context_dict = context.model_dump()
|
107
|
+
elif isinstance(context, dict):
|
108
|
+
context_dict = context
|
109
|
+
else:
|
110
|
+
return str(context)
|
111
|
+
|
112
|
+
# Format as compact JSON-like string
|
113
|
+
items = []
|
114
|
+
for key, value in context_dict.items():
|
115
|
+
if isinstance(value, str):
|
116
|
+
items.append(f"{key}='{value}'")
|
117
|
+
else:
|
118
|
+
items.append(f"{key}={value}")
|
119
|
+
|
120
|
+
return "{" + ", ".join(items) + "}"
|
121
|
+
except Exception:
|
122
|
+
return str(context)
|
@@ -38,6 +38,7 @@ from .agent_response import (
|
|
38
38
|
AgentResponse,
|
39
39
|
_create_agent_response_from_language_model_response,
|
40
40
|
)
|
41
|
+
from .agent_context import AgentContext
|
41
42
|
|
42
43
|
if TYPE_CHECKING:
|
43
44
|
from ..agent import Agent
|
@@ -85,10 +86,19 @@ class AgentResponseChunk(LanguageModelResponseChunk[T], Generic[T]):
|
|
85
86
|
|
86
87
|
def __str__(self) -> str:
|
87
88
|
"""String representation of the chunk."""
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
89
|
+
output = f"AgentResponseChunk(step={self.step_number}, final={self.is_final})"
|
90
|
+
|
91
|
+
# Show content if available
|
92
|
+
if self.output or self.content:
|
93
|
+
content_preview = str(self.output if self.output else self.content)
|
94
|
+
if len(content_preview) > 100:
|
95
|
+
content_preview = content_preview[:100] + "..."
|
96
|
+
output += f"\nContent: {content_preview}"
|
97
|
+
|
98
|
+
return output
|
99
|
+
|
100
|
+
|
101
|
+
class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T, AgentContext]):
|
92
102
|
"""Stream of agent responses that can be used in sync and async contexts."""
|
93
103
|
|
94
104
|
def __init__(
|
@@ -97,7 +107,7 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
97
107
|
messages: LanguageModelMessages,
|
98
108
|
model: Optional[Union[LanguageModel, str]] = None,
|
99
109
|
max_steps: Optional[int] = None,
|
100
|
-
context: Optional[
|
110
|
+
context: Optional[AgentContext] = None,
|
101
111
|
output_type: Optional[Type[T]] = None,
|
102
112
|
stream: bool = False,
|
103
113
|
**kwargs: Any,
|
@@ -127,6 +137,7 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
127
137
|
|
128
138
|
# Context handling
|
129
139
|
self.current_context = context
|
140
|
+
self.initial_context = context
|
130
141
|
|
131
142
|
# Model kwargs setup
|
132
143
|
self.model_kwargs = kwargs.copy()
|
@@ -139,7 +150,18 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
139
150
|
|
140
151
|
def _format_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
141
152
|
if self.agent.instructions:
|
142
|
-
|
153
|
+
system_content = self.agent.instructions
|
154
|
+
|
155
|
+
# Add context if available
|
156
|
+
if self.current_context is not None:
|
157
|
+
from ..agent import _format_context_for_instructions
|
158
|
+
context_str = _format_context_for_instructions(
|
159
|
+
self.current_context, self.agent.context_format
|
160
|
+
)
|
161
|
+
if context_str:
|
162
|
+
system_content += f"\n\nContext:\n{context_str}"
|
163
|
+
|
164
|
+
system_message = {"role": "system", "content": system_content}
|
143
165
|
messages = [system_message] + messages
|
144
166
|
return consolidate_system_messages(messages)
|
145
167
|
|
@@ -162,6 +184,16 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
162
184
|
else:
|
163
185
|
self.is_done = True
|
164
186
|
self._final_response = response
|
187
|
+
|
188
|
+
# Update context after processing if configured
|
189
|
+
if self.current_context and self.agent._should_update_context(self.current_context, "after"):
|
190
|
+
self.current_context = self.agent._perform_context_update(
|
191
|
+
context=self.current_context,
|
192
|
+
model=self.model,
|
193
|
+
current_messages=self.current_messages,
|
194
|
+
timing="after",
|
195
|
+
)
|
196
|
+
|
165
197
|
return AgentResponseChunk(
|
166
198
|
step_number=self.current_step, response=response, is_final=True
|
167
199
|
)
|
@@ -171,6 +203,15 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
171
203
|
while not self.is_done and self.current_step < self.max_steps:
|
172
204
|
self.current_step += 1
|
173
205
|
|
206
|
+
# Update context before processing if configured
|
207
|
+
if self.current_context and self.agent._should_update_context(self.current_context, "before"):
|
208
|
+
self.current_context = self.agent._perform_context_update(
|
209
|
+
context=self.current_context,
|
210
|
+
model=self.model,
|
211
|
+
current_messages=self.current_messages,
|
212
|
+
timing="before",
|
213
|
+
)
|
214
|
+
|
174
215
|
formatted_messages = self.current_messages
|
175
216
|
if self.current_step == 1:
|
176
217
|
formatted_messages = self._format_messages(self.current_messages)
|
@@ -198,6 +239,15 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
198
239
|
# The context manager handling should be managed by the agent's run method
|
199
240
|
self.current_step += 1
|
200
241
|
|
242
|
+
# Update context before processing if configured
|
243
|
+
if self.current_context and self.agent._should_update_context(self.current_context, "before"):
|
244
|
+
self.current_context = self.agent._perform_context_update(
|
245
|
+
context=self.current_context,
|
246
|
+
model=self.model,
|
247
|
+
current_messages=self.current_messages,
|
248
|
+
timing="before",
|
249
|
+
)
|
250
|
+
|
201
251
|
formatted_messages = self.current_messages
|
202
252
|
if self.current_step == 1:
|
203
253
|
formatted_messages = self._format_messages(self.current_messages)
|
@@ -215,7 +265,7 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
215
265
|
self.is_done = True
|
216
266
|
return chunk
|
217
267
|
|
218
|
-
def _build_response(self) -> AgentResponse[T]:
|
268
|
+
def _build_response(self) -> AgentResponse[T, AgentContext]:
|
219
269
|
if self._final_response:
|
220
270
|
final_response = self._final_response
|
221
271
|
elif self.steps:
|
@@ -229,13 +279,39 @@ class AgentStream(BaseGenAIModelStream[AgentResponseChunk[T]], Generic[T]):
|
|
229
279
|
context=self.current_context,
|
230
280
|
)
|
231
281
|
|
232
|
-
def
|
282
|
+
def _format_context_display(self, context: AgentContext) -> str:
|
283
|
+
"""Format context for display in string representation."""
|
284
|
+
if context is None:
|
285
|
+
return "None"
|
286
|
+
|
287
|
+
try:
|
288
|
+
# For Pydantic models, show as dict
|
289
|
+
if hasattr(context, 'model_dump'):
|
290
|
+
context_dict = context.model_dump()
|
291
|
+
elif isinstance(context, dict):
|
292
|
+
context_dict = context
|
293
|
+
else:
|
294
|
+
return str(context)
|
295
|
+
|
296
|
+
# Format as compact JSON-like string
|
297
|
+
items = []
|
298
|
+
for key, value in context_dict.items():
|
299
|
+
if isinstance(value, str):
|
300
|
+
items.append(f"{key}='{value}'")
|
301
|
+
else:
|
302
|
+
items.append(f"{key}={value}")
|
303
|
+
|
304
|
+
return "{" + ", ".join(items) + "}"
|
305
|
+
except Exception:
|
306
|
+
return str(context)
|
307
|
+
|
308
|
+
def collect(self) -> AgentResponse[T, AgentContext]:
|
233
309
|
"""Collect all steps and return final response."""
|
234
310
|
for _ in self:
|
235
311
|
pass
|
236
312
|
return self._build_response()
|
237
313
|
|
238
|
-
async def async_collect(self) -> AgentResponse[T]:
|
314
|
+
async def async_collect(self) -> AgentResponse[T, AgentContext]:
|
239
315
|
"""Collect all steps and return final response."""
|
240
316
|
async for _ in self:
|
241
317
|
pass
|
@@ -21,6 +21,7 @@ from typing_extensions import Literal
|
|
21
21
|
if TYPE_CHECKING:
|
22
22
|
from httpx import Timeout
|
23
23
|
|
24
|
+
from ....logging.logger import _get_internal_logger
|
24
25
|
from ..model_provider import litellm, instructor
|
25
26
|
|
26
27
|
from ...types.base import BaseGenAIModel
|
@@ -50,6 +51,9 @@ __all__ = [
|
|
50
51
|
T = TypeVar("T")
|
51
52
|
|
52
53
|
|
54
|
+
logger = _get_internal_logger(__name__)
|
55
|
+
|
56
|
+
|
53
57
|
class LanguageModelError(Exception):
|
54
58
|
"""Error raised when an error occurs during a language model operation."""
|
55
59
|
|
@@ -112,6 +116,9 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
112
116
|
# Initialize LanguageModel-specific attributes
|
113
117
|
self._instructor_client = None
|
114
118
|
|
119
|
+
logger.info(f"Initialized LanguageModel w/ model: {self.model}")
|
120
|
+
logger.debug(f"LanguageModel settings: {self.settings}")
|
121
|
+
|
115
122
|
def _get_instructor_client(
|
116
123
|
self, mode: Optional[LanguageModelInstructorMode] = None
|
117
124
|
):
|
@@ -123,6 +130,8 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
123
130
|
self._instructor_client is None
|
124
131
|
or getattr(self._instructor_client, "_mode", None) != effective_mode
|
125
132
|
):
|
133
|
+
logger.debug(f"Creating new instructor client for mode: {effective_mode} from old mode: {getattr(self._instructor_client, '_mode', None)}")
|
134
|
+
|
126
135
|
self._instructor_client = instructor.from_litellm(
|
127
136
|
completion=litellm.completion, mode=instructor.Mode(effective_mode)
|
128
137
|
)
|
@@ -338,6 +347,9 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
338
347
|
Returns:
|
339
348
|
LanguageModelResponse or LanguageModelStream depending on parameters
|
340
349
|
"""
|
350
|
+
logger.info(f"Running LanguageModel request with model: {self.model}")
|
351
|
+
logger.debug(f"LanguageModel request kwargs: {kwargs}")
|
352
|
+
|
341
353
|
try:
|
342
354
|
# Extract model, base_url, api_key, and mock_response from kwargs, using instance defaults
|
343
355
|
model = kwargs.pop("model", None) or self.model
|
@@ -572,6 +584,9 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
572
584
|
Returns:
|
573
585
|
LanguageModelResponse or LanguageModelAsyncStream depending on parameters
|
574
586
|
"""
|
587
|
+
logger.info(f"Running async LanguageModel request with model: {self.model}")
|
588
|
+
logger.debug(f"LanguageModel request kwargs: {kwargs}")
|
589
|
+
|
575
590
|
try:
|
576
591
|
# Extract model, base_url, api_key, and mock_response from kwargs, using instance defaults
|
577
592
|
model = kwargs.pop("model", None) or self.model
|
@@ -53,7 +53,7 @@ class LanguageModelResponse(BaseGenAIModelResponse[T]):
|
|
53
53
|
"""The actual response content of the completion. This is the string that
|
54
54
|
was generated by the model."""
|
55
55
|
|
56
|
-
tool_calls: Optional[List["ChatCompletionMessageToolCall"]] = None
|
56
|
+
tool_calls: Optional[List["litellm.ChatCompletionMessageToolCall"]] = None
|
57
57
|
"""The tool calls that were made by the model. This is a list of tool calls
|
58
58
|
that were made by the model."""
|
59
59
|
|
hammad/genai/types/tools.py
CHANGED
@@ -5,6 +5,7 @@ Tool system for agent function calling with JSON schema generation.
|
|
5
5
|
|
6
6
|
import asyncio
|
7
7
|
import concurrent.futures
|
8
|
+
from dataclasses import dataclass
|
8
9
|
import inspect
|
9
10
|
import json
|
10
11
|
from typing import (
|
@@ -44,6 +45,7 @@ __all__ = (
|
|
44
45
|
)
|
45
46
|
|
46
47
|
|
48
|
+
@dataclass
|
47
49
|
class ToolResponseMessage:
|
48
50
|
"""Represents a tool response message for chat completion."""
|
49
51
|
|
hammad/logging/logger.py
CHANGED
@@ -953,3 +953,11 @@ def create_logger(
|
|
953
953
|
console=console,
|
954
954
|
handlers=handlers,
|
955
955
|
)
|
956
|
+
|
957
|
+
|
958
|
+
# internal logger and helper
|
959
|
+
_logger = Logger("hammad", level="warning")
|
960
|
+
|
961
|
+
|
962
|
+
def _get_internal_logger(name: str) -> Logger:
|
963
|
+
return Logger(name=name, level="warning")
|
@@ -55,15 +55,15 @@ hammad/formatting/yaml/__init__.py,sha256=4dBeXPi0jx7ELT2_sC2fUYaiY8b8wFiUScLODc
|
|
55
55
|
hammad/formatting/yaml/converters.py,sha256=zvSB8QGb56uvwO0KjXllfTj9g1FmNINOKR06DTjvXw8,153
|
56
56
|
hammad/genai/__init__.py,sha256=16L9z0U73uUhBB7JHSL0tHWie2-rI7GAUtQSY94IeZk,3579
|
57
57
|
hammad/genai/agents/__init__.py,sha256=R_wW_fbZqMXZZYSErAb81UDRMTaNDlAFzNKfTOm4XYg,1235
|
58
|
-
hammad/genai/agents/agent.py,sha256=
|
58
|
+
hammad/genai/agents/agent.py,sha256=9dA1GGVR9bQo2KAnmpvyBYNtOhhbxq7bXuKcb9NpwD0,54902
|
59
59
|
hammad/genai/agents/run.py,sha256=G3NLJgg8nXFHfOrh_XR1NpVjGzAgjnA_Ojc_rrMHz9E,23278
|
60
60
|
hammad/genai/agents/types/__init__.py,sha256=6X6_P82qe15dyqs-vAcXUk4na4tB-7oMdMf484v87io,1119
|
61
61
|
hammad/genai/agents/types/agent_context.py,sha256=u4evwx9B-UKEHMtNcsNlN9q8i12bsW9HhtyvmU0NNTw,313
|
62
62
|
hammad/genai/agents/types/agent_event.py,sha256=zNKXXPKKOsIO9MAhE-YNCOxeNg00O7j1mE0R1pA_Xr8,3925
|
63
63
|
hammad/genai/agents/types/agent_hooks.py,sha256=wgys4ixiHjX5oux4zVSr9OPXyAZ-iJGk_MhaOKEgMxo,7853
|
64
64
|
hammad/genai/agents/types/agent_messages.py,sha256=csjEq42bElaTZYZW2dE6nlFZc142-HgT3bB6h1KMg_w,846
|
65
|
-
hammad/genai/agents/types/agent_response.py,sha256=
|
66
|
-
hammad/genai/agents/types/agent_stream.py,sha256=
|
65
|
+
hammad/genai/agents/types/agent_response.py,sha256=3-6zMMKQXbFXN4VSf_J-y_GeO9sxQTNjC_uW8x6E0m0,3795
|
66
|
+
hammad/genai/agents/types/agent_stream.py,sha256=57Pd4lyDRQnkjeM0x3KDkqBXEDjP8lCyK836kX8wlJM,11076
|
67
67
|
hammad/genai/models/__init__.py,sha256=e4TbEsiKIoXENOEsdIdQcWWt0RnFdTEqCz0nICHQHtM,26
|
68
68
|
hammad/genai/models/model_provider.py,sha256=2RdOeqr7KpjyrMqq4YH4OYy1pk6sjzf2CPu1ZHa1Pdk,75
|
69
69
|
hammad/genai/models/multimodal.py,sha256=KXUyLXqM1eBgBGZFEbMw3dYbakZFAXoko2xYprronxY,1276
|
@@ -77,14 +77,14 @@ hammad/genai/models/embeddings/types/embedding_model_response.py,sha256=V2H_VTl1
|
|
77
77
|
hammad/genai/models/embeddings/types/embedding_model_run_params.py,sha256=ZGhCXrEEzMF5y-V8neF2a73Gh1emzrYUHVxWkybg5uE,1570
|
78
78
|
hammad/genai/models/embeddings/types/embedding_model_settings.py,sha256=KEwvoElXhPMSVCKW2uKwqqT2lSAAthQXmGXaV7Qk5cU,1268
|
79
79
|
hammad/genai/models/language/__init__.py,sha256=B92q9f5UIQBMIFoYUja9V61bn5Lzdrk12_bf3DHw6Is,1838
|
80
|
-
hammad/genai/models/language/model.py,sha256=
|
80
|
+
hammad/genai/models/language/model.py,sha256=zdQHI_vC3QXsCKnAtxqOtnzKqSTSa5rkgv86GnKrmm8,39378
|
81
81
|
hammad/genai/models/language/run.py,sha256=nqqQYi3iBpkNxW3_JHyyZBNpn79LVWLpnebCBYOaEbA,21468
|
82
82
|
hammad/genai/models/language/types/__init__.py,sha256=cdLnoCiVmK6T86-5CZrUJg2rxXKoSk-svyCSviUdgao,1534
|
83
83
|
hammad/genai/models/language/types/language_model_instructor_mode.py,sha256=7ywBaY24m-UKRynnX6XsfVf_hsQrM2xHAHugTgV0Vho,1008
|
84
84
|
hammad/genai/models/language/types/language_model_messages.py,sha256=e-HZ_YKXq17gwmMlpOmYUYUpBFm7Mu3aRawtjSslWXs,504
|
85
85
|
hammad/genai/models/language/types/language_model_name.py,sha256=2V70cZ47L9yIcug6LCcMHcvEJaee7gRN6DUPhLUBlsE,8056
|
86
86
|
hammad/genai/models/language/types/language_model_request.py,sha256=ZtzhCx8o6zkEBS3uTFXFLf_poDD7MnIp1y7MbKckOmI,3911
|
87
|
-
hammad/genai/models/language/types/language_model_response.py,sha256=
|
87
|
+
hammad/genai/models/language/types/language_model_response.py,sha256=vxNunQ8ZXds-9hawMkXfNoTkWlEeyRy7kgWhlNt6xoc,7493
|
88
88
|
hammad/genai/models/language/types/language_model_response_chunk.py,sha256=wIzGZw732KsI-a1-uASjATA6qvBuq-7rupWoFjsAgQo,1796
|
89
89
|
hammad/genai/models/language/types/language_model_settings.py,sha256=C0EvLXZoOLgPZ4bX7mVFs_CWP-jam27qkseJRGsBAfQ,2794
|
90
90
|
hammad/genai/models/language/types/language_model_stream.py,sha256=XgJ83JSbtTdf7jeLQMrDhMfI7zp0pRrdY7JWYbZV_h0,22043
|
@@ -94,10 +94,10 @@ hammad/genai/models/language/utils/structured_outputs.py,sha256=Va7pie9AOvLbJOaD
|
|
94
94
|
hammad/genai/types/__init__.py,sha256=W0fzUnKhDynt4TkwZX8LCRYfgRTAVomSuWqPmhGu8sg,25
|
95
95
|
hammad/genai/types/base.py,sha256=VnGL45w8oR-6rWl2GfGgWX4SjMC-23RGWuN0_H2bH_I,5437
|
96
96
|
hammad/genai/types/history.py,sha256=zsfBvGMoFTHZCT7Igae-5_jszu409dVJ_wEmNw7alCk,10208
|
97
|
-
hammad/genai/types/tools.py,sha256=
|
97
|
+
hammad/genai/types/tools.py,sha256=3p7qhZcilP_NOCOnufCkubTeYN0yC7Ij5bqrUy-FYow,16554
|
98
98
|
hammad/logging/__init__.py,sha256=VtskZx0bKEAJ9FHTMflhB1CzeFUxLpDT5HPgcecAXUo,701
|
99
99
|
hammad/logging/decorators.py,sha256=VbI1x3P4ft0-0BGjXq7nQgiuNqcXAA51CGmoSn47iSw,30122
|
100
|
-
hammad/logging/logger.py,sha256=
|
100
|
+
hammad/logging/logger.py,sha256=vqinFHYUiCdHxJntsvXRjNextwOfBkXBOlLXduEk5VM,31705
|
101
101
|
hammad/mcp/__init__.py,sha256=5oTU-BLYjfz6fBHDH9cyWg3DpQ6Qar-jodbCR05SuWo,1123
|
102
102
|
hammad/mcp/client/__init__.py,sha256=_SfnKvd5Za-FfFoE5GcXkBY9WcwprZND9SyZ6RY--no,795
|
103
103
|
hammad/mcp/client/client.py,sha256=auKCiIJfcZkuVFRapTpqYP4PxoyIfx40gVbMYLBdTzI,20565
|
@@ -121,7 +121,7 @@ hammad/web/openapi/__init__.py,sha256=JhJQ6_laBmB2djIYFc0vgGha2GsdUe4FP1LDdZCQ5J
|
|
121
121
|
hammad/web/openapi/client.py,sha256=1pXz7KAO_0pN4kQZoWKWskXDYGiJ535TsPO1GGCiC0E,26816
|
122
122
|
hammad/web/search/__init__.py,sha256=e9A6znPIiZCz-4secyHbUs0uUGf5yAqW6wGacgx961U,24
|
123
123
|
hammad/web/search/client.py,sha256=LIx2MsHhn6cRTuq5i1mWowRTdIhPobY4GQV3S3bk9lk,36694
|
124
|
-
hammad_python-0.0.
|
125
|
-
hammad_python-0.0.
|
126
|
-
hammad_python-0.0.
|
127
|
-
hammad_python-0.0.
|
124
|
+
hammad_python-0.0.21.dist-info/METADATA,sha256=rqHKqJ3Ipe4XisXufat4OwBws3-MgShyRN0PFrUM58Q,6570
|
125
|
+
hammad_python-0.0.21.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
126
|
+
hammad_python-0.0.21.dist-info/licenses/LICENSE,sha256=h74yFUWjbBaodcWG5wNmm30npjl8obVcxD-1nQfUp2I,1069
|
127
|
+
hammad_python-0.0.21.dist-info/RECORD,,
|
File without changes
|
File without changes
|