unique_toolkit 1.28.8__py3-none-any.whl → 1.33.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. unique_toolkit/__init__.py +12 -6
  2. unique_toolkit/_common/docx_generator/service.py +8 -32
  3. unique_toolkit/_common/utils/jinja/helpers.py +10 -0
  4. unique_toolkit/_common/utils/jinja/render.py +18 -0
  5. unique_toolkit/_common/utils/jinja/schema.py +65 -0
  6. unique_toolkit/_common/utils/jinja/utils.py +80 -0
  7. unique_toolkit/agentic/message_log_manager/service.py +9 -0
  8. unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +58 -3
  9. unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +11 -0
  10. unique_toolkit/agentic/tools/a2a/postprocessing/config.py +33 -0
  11. unique_toolkit/agentic/tools/a2a/postprocessing/display.py +99 -15
  12. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +421 -0
  13. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +768 -0
  14. unique_toolkit/agentic/tools/a2a/tool/config.py +77 -1
  15. unique_toolkit/agentic/tools/a2a/tool/service.py +67 -3
  16. unique_toolkit/agentic/tools/config.py +5 -45
  17. unique_toolkit/agentic/tools/openai_builtin/base.py +4 -0
  18. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +4 -0
  19. unique_toolkit/agentic/tools/tool_manager.py +16 -19
  20. unique_toolkit/app/__init__.py +3 -0
  21. unique_toolkit/app/fast_api_factory.py +131 -0
  22. unique_toolkit/app/webhook.py +77 -0
  23. unique_toolkit/chat/functions.py +1 -1
  24. unique_toolkit/content/functions.py +4 -4
  25. unique_toolkit/content/service.py +1 -1
  26. unique_toolkit/data_extraction/README.md +96 -0
  27. unique_toolkit/data_extraction/__init__.py +11 -0
  28. unique_toolkit/data_extraction/augmented/__init__.py +5 -0
  29. unique_toolkit/data_extraction/augmented/service.py +93 -0
  30. unique_toolkit/data_extraction/base.py +25 -0
  31. unique_toolkit/data_extraction/basic/__init__.py +11 -0
  32. unique_toolkit/data_extraction/basic/config.py +18 -0
  33. unique_toolkit/data_extraction/basic/prompt.py +13 -0
  34. unique_toolkit/data_extraction/basic/service.py +55 -0
  35. unique_toolkit/embedding/service.py +1 -1
  36. unique_toolkit/framework_utilities/langchain/__init__.py +10 -0
  37. unique_toolkit/framework_utilities/openai/client.py +2 -1
  38. unique_toolkit/language_model/infos.py +22 -1
  39. unique_toolkit/services/knowledge_base.py +4 -6
  40. {unique_toolkit-1.28.8.dist-info → unique_toolkit-1.33.3.dist-info}/METADATA +51 -2
  41. {unique_toolkit-1.28.8.dist-info → unique_toolkit-1.33.3.dist-info}/RECORD +43 -27
  42. unique_toolkit/agentic/tools/test/test_tool_manager.py +0 -1686
  43. {unique_toolkit-1.28.8.dist-info → unique_toolkit-1.33.3.dist-info}/LICENSE +0 -0
  44. {unique_toolkit-1.28.8.dist-info → unique_toolkit-1.33.3.dist-info}/WHEEL +0 -0
@@ -0,0 +1,96 @@
1
+ # Data Extraction Module
2
+
3
+ This module provides a flexible framework for extracting structured data from text using language models. It supports both basic and augmented data extraction capabilities.
4
+
5
+ ## Overview
6
+
7
+ The module consists of two main components:
8
+
9
+ 1. **Basic Data Extraction**: Uses language models to extract structured data from text based on a provided schema.
10
+ 2. **Augmented Data Extraction**: Extends basic extraction by adding extra fields to the output schema while maintaining the original data structure.
11
+
12
+ ## Components
13
+
14
+ ### Base Classes
15
+
16
+ - `BaseDataExtractor`: Abstract base class that defines the interface for data extraction
17
+ - `BaseDataExtractionResult`: Generic base class for extraction results
18
+
19
+ ### Basic Extraction
20
+
21
+ - `StructuredOutputDataExtractor`: Implements basic data extraction using language models
22
+ - `StructuredOutputDataExtractorConfig`: Configuration for the basic extractor
23
+
24
+ ### Augmented Extraction
25
+
26
+ - `AugmentedDataExtractor`: Extends basic extraction with additional fields
27
+ - `AugmentedDataExtractionResult`: Result type for augmented extraction
28
+
29
+ ## Usage Examples
30
+
31
+ ### Basic Data Extraction
32
+
33
+ ```python
34
+ from pydantic import BaseModel
35
+ from unique_toolkit._common.data_extraction import StructuredOutputDataExtractor, StructuredOutputDataExtractorConfig
36
+ from unique_toolkit import LanguageModelService
37
+
38
+ # Define your schema
39
+ class PersonInfo(BaseModel):
40
+ name: str
41
+ age: int
42
+ occupation: str
43
+
44
+ # Create the extractor
45
+ config = StructuredOutputDataExtractorConfig()
46
+ lm_service = LanguageModelService() # Configure as needed
47
+ extractor = StructuredOutputDataExtractor(config, lm_service)
48
+
49
+ # Extract data
50
+ text = "John is 30 years old and works as a software engineer."
51
+ result = await extractor.extract_data_from_text(text, PersonInfo)
52
+ print(result.data) # PersonInfo(name="John", age=30, occupation="software engineer")
53
+ ```
54
+
55
+ ### Augmented Data Extraction
56
+
57
+ ```python
58
+ from pydantic import BaseModel, Field
59
+ from _common.data_extraction import AugmentedDataExtractor, StructuredOutputDataExtractor
60
+
61
+ # Define your base schema
62
+ class PersonInfo(BaseModel):
63
+ name: str
64
+ age: int
65
+
66
+ # Create base extractor
67
+ base_extractor = StructuredOutputDataExtractor(...)
68
+
69
+ # Create augmented extractor with confidence scores
70
+ augmented_extractor = AugmentedDataExtractor(
71
+ base_extractor,
72
+ confidence=float,
73
+ source=("extracted", Field(description="Source of the information"))
74
+ )
75
+
76
+ # Extract data
77
+ text = "John is 30 years old."
78
+ result = await augmented_extractor.extract_data_from_text(text, PersonInfo)
79
+ print(result.data) # Original PersonInfo
80
+ print(result.augmented_data) # Contains additional fields
81
+ ```
82
+
83
+ ## Configuration
84
+
85
+ The `StructuredOutputDataExtractorConfig` allows customization of:
86
+
87
+ - Language model selection
88
+ - System and user prompt templates
89
+ - Schema enforcement settings
90
+
91
+ ## Best Practices
92
+
93
+ 1. Always define clear Pydantic models for your extraction schemas
94
+ 2. Use augmented extraction when you need additional metadata
95
+ 3. Consider using strict mode for augmented extraction when you want to enforce schema compliance
96
+ 4. Customize prompts for better extraction results in specific domains
@@ -0,0 +1,11 @@
1
+ from unique_toolkit.data_extraction.augmented import AugmentedDataExtractor
2
+ from unique_toolkit.data_extraction.basic import (
3
+ StructuredOutputDataExtractor,
4
+ StructuredOutputDataExtractorConfig,
5
+ )
6
+
7
+ __all__ = [
8
+ "StructuredOutputDataExtractor",
9
+ "StructuredOutputDataExtractorConfig",
10
+ "AugmentedDataExtractor",
11
+ ]
@@ -0,0 +1,5 @@
1
+ from unique_toolkit.data_extraction.augmented.service import (
2
+ AugmentedDataExtractor,
3
+ )
4
+
5
+ __all__ = ["AugmentedDataExtractor"]
@@ -0,0 +1,93 @@
1
+ from docxtpl.template import Any
2
+ from pydantic import BaseModel, create_model
3
+ from pydantic.alias_generators import to_pascal
4
+ from pydantic.fields import FieldInfo
5
+ from typing_extensions import override
6
+
7
+ from unique_toolkit.data_extraction.base import (
8
+ BaseDataExtractionResult,
9
+ BaseDataExtractor,
10
+ ExtractionSchema,
11
+ )
12
+
13
+
14
+ def _build_augmented_model_for_field(
15
+ field_name: str,
16
+ field_type: Any | tuple[Any, FieldInfo],
17
+ strict: bool = False,
18
+ **extra_fields: Any | tuple[Any, FieldInfo],
19
+ ) -> type[BaseModel]:
20
+ camelized_field_name = to_pascal(field_name)
21
+
22
+ fields = {
23
+ **extra_fields,
24
+ field_name: field_type,
25
+ }
26
+
27
+ return create_model(
28
+ f"{camelized_field_name}Value",
29
+ **fields, # type: ignore
30
+ __config__={"extra": "forbid" if strict else "ignore"},
31
+ )
32
+
33
+
34
+ class AugmentedDataExtractionResult(BaseDataExtractionResult[ExtractionSchema]):
35
+ """
36
+ Result of data extraction from text using an augmented schema.
37
+ """
38
+
39
+ augmented_data: BaseModel
40
+
41
+
42
+ class AugmentedDataExtractor(BaseDataExtractor):
43
+ def __init__(
44
+ self,
45
+ base_data_extractor: BaseDataExtractor,
46
+ strict: bool = False,
47
+ **extra_fields: Any | tuple[Any, FieldInfo],
48
+ ):
49
+ self._base_data_extractor = base_data_extractor
50
+ self._extra_fields = extra_fields
51
+ self._strict = strict
52
+
53
+ def _prepare_schema(self, schema: type[ExtractionSchema]) -> type[BaseModel]:
54
+ fields = {}
55
+
56
+ for field_name, field_type in schema.model_fields.items():
57
+ wrapped_field = _build_augmented_model_for_field(
58
+ field_name,
59
+ (field_type.annotation, field_type),
60
+ strict=self._strict,
61
+ **self._extra_fields,
62
+ )
63
+ fields[field_name] = wrapped_field
64
+
65
+ return create_model(
66
+ schema.__name__,
67
+ **fields,
68
+ __config__={"extra": "forbid" if self._strict else "ignore"},
69
+ __doc__=schema.__doc__,
70
+ )
71
+
72
+ def _extract_output(
73
+ self, llm_output: BaseModel, schema: type[ExtractionSchema]
74
+ ) -> ExtractionSchema:
75
+ output_data = {
76
+ field_name: getattr(value, field_name) for field_name, value in llm_output
77
+ }
78
+ return schema.model_validate(output_data)
79
+
80
+ @override
81
+ async def extract_data_from_text(
82
+ self, text: str, schema: type[ExtractionSchema]
83
+ ) -> AugmentedDataExtractionResult[ExtractionSchema]:
84
+ model_with_extra_fields = self._prepare_schema(schema)
85
+ augmented_data = (
86
+ await self._base_data_extractor.extract_data_from_text(
87
+ text, model_with_extra_fields
88
+ )
89
+ ).data
90
+ return AugmentedDataExtractionResult(
91
+ data=self._extract_output(augmented_data, schema),
92
+ augmented_data=augmented_data,
93
+ )
@@ -0,0 +1,25 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Generic, TypeVar
3
+
4
+ from pydantic import BaseModel
5
+
6
+ ExtractionSchema = TypeVar("ExtractionSchema", bound=BaseModel)
7
+
8
+
9
+ class BaseDataExtractionResult(BaseModel, Generic[ExtractionSchema]):
10
+ """
11
+ Base class for data extraction results.
12
+ """
13
+
14
+ data: ExtractionSchema
15
+
16
+
17
+ class BaseDataExtractor(ABC):
18
+ """
19
+ Extract structured data from text.
20
+ """
21
+
22
+ @abstractmethod
23
+ async def extract_data_from_text(
24
+ self, text: str, schema: type[ExtractionSchema]
25
+ ) -> BaseDataExtractionResult[ExtractionSchema]: ...
@@ -0,0 +1,11 @@
1
+ from unique_toolkit.data_extraction.basic.config import (
2
+ StructuredOutputDataExtractorConfig,
3
+ )
4
+ from unique_toolkit.data_extraction.basic.service import (
5
+ StructuredOutputDataExtractor,
6
+ )
7
+
8
+ __all__ = [
9
+ "StructuredOutputDataExtractorConfig",
10
+ "StructuredOutputDataExtractor",
11
+ ]
@@ -0,0 +1,18 @@
1
+ from pydantic import BaseModel
2
+
3
+ from unique_toolkit._common.pydantic_helpers import get_configuration_dict
4
+ from unique_toolkit._common.validators import LMI, get_LMI_default_field
5
+ from unique_toolkit.data_extraction.basic.prompt import (
6
+ DEFAULT_DATA_EXTRACTION_SYSTEM_PROMPT,
7
+ DEFAULT_DATA_EXTRACTION_USER_PROMPT,
8
+ )
9
+ from unique_toolkit.language_model.default_language_model import DEFAULT_GPT_4o
10
+
11
+
12
+ class StructuredOutputDataExtractorConfig(BaseModel):
13
+ model_config = get_configuration_dict()
14
+
15
+ language_model: LMI = get_LMI_default_field(DEFAULT_GPT_4o)
16
+ structured_output_enforce_schema: bool = False
17
+ system_prompt_template: str = DEFAULT_DATA_EXTRACTION_SYSTEM_PROMPT
18
+ user_prompt_template: str = DEFAULT_DATA_EXTRACTION_USER_PROMPT
@@ -0,0 +1,13 @@
1
+ DEFAULT_DATA_EXTRACTION_SYSTEM_PROMPT = """
2
+ You are a thorough and accurate expert in data processing.
3
+
4
+ You will be given some text and an output schema, describing what needs to be extracted from the text.
5
+ You will need to extract the data from the text and return it in the output schema.
6
+ """.strip()
7
+
8
+ DEFAULT_DATA_EXTRACTION_USER_PROMPT = """
9
+ Here is the text to extract data from:
10
+ {{ text }}
11
+
12
+ Please thoroughly extract the data from the text and return it in the output schema.
13
+ """.strip()
@@ -0,0 +1,55 @@
1
+ from typing_extensions import override
2
+
3
+ from unique_toolkit._common.utils.jinja.render import render_template
4
+ from unique_toolkit.data_extraction.base import (
5
+ BaseDataExtractionResult,
6
+ BaseDataExtractor,
7
+ ExtractionSchema,
8
+ )
9
+ from unique_toolkit.data_extraction.basic.config import (
10
+ StructuredOutputDataExtractorConfig,
11
+ )
12
+ from unique_toolkit.language_model import LanguageModelService
13
+ from unique_toolkit.language_model.builder import MessagesBuilder
14
+
15
+
16
+ class StructuredOutputDataExtractor(BaseDataExtractor):
17
+ """
18
+ Basic Structured Output Data Extraction.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ config: StructuredOutputDataExtractorConfig,
24
+ language_model_service: LanguageModelService,
25
+ ):
26
+ self._config = config
27
+ self._language_model_service = language_model_service
28
+
29
+ @override
30
+ async def extract_data_from_text(
31
+ self, text: str, schema: type[ExtractionSchema]
32
+ ) -> BaseDataExtractionResult[ExtractionSchema]:
33
+ messages_builder = (
34
+ MessagesBuilder()
35
+ .system_message_append(self._config.system_prompt_template)
36
+ .user_message_append(
37
+ render_template(
38
+ self._config.user_prompt_template,
39
+ {
40
+ "text": text,
41
+ },
42
+ )
43
+ )
44
+ )
45
+ response = await self._language_model_service.complete_async(
46
+ messages=messages_builder.build(),
47
+ model_name=self._config.language_model.name,
48
+ structured_output_model=schema,
49
+ temperature=0.0,
50
+ structured_output_enforce_schema=self._config.structured_output_enforce_schema,
51
+ )
52
+
53
+ return BaseDataExtractionResult(
54
+ data=schema.model_validate(response.choices[0].message.parsed),
55
+ )
@@ -145,7 +145,7 @@ class EmbeddingService(BaseService):
145
145
  Embed text.
146
146
 
147
147
  Args:
148
- text (str): The text to embed.
148
+ texts (list[str]): The texts to embed.
149
149
  timeout (int): The timeout in milliseconds. Defaults to 600000.
150
150
 
151
151
  Returns:
@@ -0,0 +1,10 @@
1
+ """Langchain framework utilities."""
2
+
3
+ try:
4
+ from .client import LangchainNotInstalledError, get_langchain_client
5
+
6
+ __all__ = ["get_langchain_client", "LangchainNotInstalledError"]
7
+ except (ImportError, Exception):
8
+ # If langchain is not installed, don't export anything
9
+ # This handles both ImportError and LangchainNotInstalledError
10
+ __all__ = []
@@ -30,7 +30,8 @@ def get_openai_client(
30
30
  """Get an OpenAI client instance.
31
31
 
32
32
  Args:
33
- env_file: Optional path to environment file
33
+ unique_settings (UniqueSettings | None): Optional UniqueSettings instance
34
+ additional_headers (dict[str, str] | None): Optional additional headers to add to the request
34
35
 
35
36
  Returns:
36
37
  OpenAI client instance
@@ -47,6 +47,7 @@ class LanguageModelName(StrEnum):
47
47
  ANTHROPIC_CLAUDE_SONNET_4_5 = "litellm:anthropic-claude-sonnet-4-5"
48
48
  ANTHROPIC_CLAUDE_OPUS_4 = "litellm:anthropic-claude-opus-4"
49
49
  ANTHROPIC_CLAUDE_OPUS_4_1 = "litellm:anthropic-claude-opus-4-1"
50
+ ANTHROPIC_CLAUDE_OPUS_4_5 = "litellm:anthropic-claude-opus-4-5"
50
51
  GEMINI_2_0_FLASH = "litellm:gemini-2-0-flash"
51
52
  GEMINI_2_5_FLASH = "litellm:gemini-2-5-flash"
52
53
  GEMINI_2_5_FLASH_LITE = "litellm:gemini-2-5-flash-lite"
@@ -946,7 +947,7 @@ class LanguageModelInfo(BaseModel):
946
947
  ModelCapabilities.REASONING,
947
948
  ],
948
949
  provider=LanguageModelProvider.LITELLM,
949
- version="claude-opus-4",
950
+ version="claude-opus-4-1",
950
951
  encoder_name=EncoderName.O200K_BASE, # TODO: Update encoder with litellm
951
952
  token_limits=LanguageModelTokenLimits(
952
953
  # Input limit is 200_000, we leave 20_000 tokens as buffer due to tokenizer mismatch
@@ -956,6 +957,26 @@ class LanguageModelInfo(BaseModel):
956
957
  info_cutoff_at=date(2025, 3, 1),
957
958
  published_at=date(2025, 5, 1),
958
959
  )
960
+ case LanguageModelName.ANTHROPIC_CLAUDE_OPUS_4_5:
961
+ return cls(
962
+ name=model_name,
963
+ capabilities=[
964
+ ModelCapabilities.FUNCTION_CALLING,
965
+ ModelCapabilities.STREAMING,
966
+ ModelCapabilities.VISION,
967
+ ModelCapabilities.REASONING,
968
+ ],
969
+ provider=LanguageModelProvider.LITELLM,
970
+ version="claude-opus-4-5",
971
+ encoder_name=EncoderName.O200K_BASE, # TODO: Update encoder with litellm
972
+ token_limits=LanguageModelTokenLimits(
973
+ # Input limit is 200_000, we leave 20_000 tokens as buffer due to tokenizer mismatch
974
+ token_limit_input=180_000,
975
+ token_limit_output=64_000,
976
+ ),
977
+ info_cutoff_at=date(2025, 8, 1),
978
+ published_at=date(2025, 11, 13),
979
+ )
959
980
  case LanguageModelName.GEMINI_2_0_FLASH:
960
981
  return cls(
961
982
  name=model_name,
@@ -377,7 +377,6 @@ class KnowledgeBaseService:
377
377
  mime_type (str): The MIME type of the content.
378
378
  scope_id (str | None): The scope ID. Defaults to None.
379
379
  skip_ingestion (bool): Whether to skip ingestion. Defaults to False.
380
- skip_excel_ingestion (bool): Whether to skip excel ingestion. Defaults to False.
381
380
  ingestion_config (unique_sdk.Content.IngestionConfig | None): The ingestion configuration. Defaults to None.
382
381
  metadata (dict | None): The metadata to associate with the content. Defaults to None.
383
382
 
@@ -449,7 +448,7 @@ class KnowledgeBaseService:
449
448
  skip_excel_ingestion: bool = False,
450
449
  ingestion_config: unique_sdk.Content.IngestionConfig | None = None,
451
450
  metadata: dict[str, Any] | None = None,
452
- ):
451
+ ) -> Content:
453
452
  """
454
453
  Uploads content to the knowledge base.
455
454
 
@@ -487,14 +486,14 @@ class KnowledgeBaseService:
487
486
  content_id: str,
488
487
  output_dir_path: Path | None = None,
489
488
  output_filename: str | None = None,
490
- ):
489
+ ) -> Path:
491
490
  """
492
491
  Downloads content from a chat and saves it to a file.
493
492
 
494
493
  Args:
495
494
  content_id (str): The ID of the content to download.
496
- filename (str | None): The name of the file to save the content as. If not provided, the original filename will be used. Defaults to None.
497
- tmp_dir_path (str | Path | None): The path to the temporary directory where the content will be saved. Defaults to "/tmp".
495
+ output_filename (str | None): The name of the file to save the content as. If not provided, the original filename will be used. Defaults to None.
496
+ output_dir_path (str | Path | None): The path to the temporary directory where the content will be saved. Defaults to "/tmp".
498
497
 
499
498
  Returns:
500
499
  Path: The path to the downloaded file.
@@ -522,7 +521,6 @@ class KnowledgeBaseService:
522
521
 
523
522
  Args:
524
523
  content_id (str): The id of the uploaded content.
525
- chat_id (Optional[str]): The chat_id, defaults to None.
526
524
 
527
525
  Returns:
528
526
  bytes: The downloaded content.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 1.28.8
3
+ Version: 1.33.3
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Cedric Klinkert
@@ -11,10 +11,11 @@ Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.12
12
12
  Requires-Dist: docxtpl (>=0.20.1,<0.21.0)
13
13
  Requires-Dist: jambo (>=0.1.2,<0.2.0)
14
+ Requires-Dist: jinja2 (>=3.1.6,<4.0.0)
14
15
  Requires-Dist: markdown-it-py (>=4.0.0,<5.0.0)
15
16
  Requires-Dist: mkdocs-mermaid2-plugin (>=1.2.2,<2.0.0)
16
17
  Requires-Dist: mkdocs-multirepo-plugin (>=0.8.3,<0.9.0)
17
- Requires-Dist: numpy (>=1.26.4,<2.0.0)
18
+ Requires-Dist: numpy (>=2.1.0,<3.0.0)
18
19
  Requires-Dist: openai (>=1.99.9,<2.0.0)
19
20
  Requires-Dist: pillow (>=10.4.0,<11.0.0)
20
21
  Requires-Dist: platformdirs (>=4.0.0,<5.0.0)
@@ -120,6 +121,54 @@ All notable changes to this project will be documented in this file.
120
121
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
121
122
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
122
123
 
124
+ ## [1.33.3] - 2025-12-02
125
+ - Fix serialization of ToolBuildConfig `configuration` field.
126
+
127
+ ## [1.33.2] - 2025-12-01
128
+ - Upgrade numpy to >2.1.0 to ensure compatibility with langchain library
129
+
130
+ ## [1.33.1] - 2025-12-01
131
+ - Add `data_extraction` to unique_toolkit
132
+
133
+ ## [1.33.0] - 2025-11-28
134
+ - Add support for system reminders in sub agent responses.
135
+
136
+ ## [1.32.1] - 2025-12-01
137
+ - Added documentation for the toolkit,some missing type hints and doc string fixes.
138
+
139
+ ## [1.32.0] - 2025-11-28
140
+ - Add option to filter duplicate sub agent answers.
141
+
142
+ ## [1.31.2] - 2025-11-27
143
+ - Added the function `filter_tool_calls_by_max_tool_calls_allowed` in `tool_manager` to limit the number of parallel tool calls permitted per loop iteration.
144
+
145
+ ## [1.31.1] - 2025-11-27
146
+ - Various fixes to sub agent answers.
147
+
148
+ ## [1.31.0] - 2025-11-20
149
+ - Adding model `litellm:anthropic-claude-opus-4-5` to `language_model/info.py`
150
+
151
+ ## [1.30.0] - 2025-11-26
152
+ - Add option to only display parts of sub agent responses.
153
+
154
+ ## [1.29.4] - 2025-11-25
155
+ - Add display name to openai builtin tools
156
+
157
+ ## [1.29.3] - 2025-11-24
158
+ - Fix jinja utility helpers import
159
+
160
+ ## [1.29.2] - 2025-11-21
161
+ - Add `jinja` utility helpers to `_common`
162
+
163
+ ## [1.29.1] - 2025-11-21
164
+ - Add early return in `create_message_log_entry` if chat_service doesn't have assistant_message_id (relevant for agentic table)
165
+
166
+ ## [1.29.0] - 2025-11-21
167
+ - Add option to force include references in sub agent responses even if unused by main agent response.
168
+
169
+ ## [1.28.9] - 2025-11-21
170
+ - Remove `knolwedge_base_service` from DocXGeneratorService
171
+
123
172
  ## [1.28.8] - 2025-11-20
124
173
  - Add query params to api operation
125
174
  - Add query params to endpoint builder