unique_toolkit 0.8.13__py3-none-any.whl → 0.8.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/_common/default_language_model.py +6 -0
- unique_toolkit/_common/token/image_token_counting.py +67 -0
- unique_toolkit/_common/token/token_counting.py +196 -0
- unique_toolkit/history_manager/history_construction_with_contents.py +307 -0
- unique_toolkit/history_manager/history_manager.py +85 -111
- unique_toolkit/history_manager/loop_token_reducer.py +457 -0
- unique_toolkit/language_model/infos.py +119 -0
- unique_toolkit/reference_manager/reference_manager.py +15 -2
- {unique_toolkit-0.8.13.dist-info → unique_toolkit-0.8.15.dist-info}/METADATA +9 -3
- {unique_toolkit-0.8.13.dist-info → unique_toolkit-0.8.15.dist-info}/RECORD +12 -7
- {unique_toolkit-0.8.13.dist-info → unique_toolkit-0.8.15.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.8.13.dist-info → unique_toolkit-0.8.15.dist-info}/WHEEL +0 -0
|
@@ -44,6 +44,10 @@ class LanguageModelName(StrEnum):
|
|
|
44
44
|
GEMINI_2_5_PRO = "litellm:gemini-2-5-pro"
|
|
45
45
|
GEMINI_2_5_PRO_EXP_0325 = "litellm:gemini-2-5-pro-exp-03-25"
|
|
46
46
|
GEMINI_2_5_PRO_PREVIEW_0605 = "litellm:gemini-2-5-pro-preview-06-05"
|
|
47
|
+
LITELLM_OPENAI_GPT_5 = "litellm:openai-gpt-5"
|
|
48
|
+
LITELLM_OPENAI_GPT_5_MINI = "litellm:openai-gpt-5-mini"
|
|
49
|
+
LITELLM_OPENAI_GPT_5_NANO = "litellm:openai-gpt-5-nano"
|
|
50
|
+
LITELLM_OPENAI_GPT_5_CHAT = "litellm:openai-gpt-5-chat"
|
|
47
51
|
|
|
48
52
|
|
|
49
53
|
class EncoderName(StrEnum):
|
|
@@ -71,6 +75,10 @@ def get_encoder_name(model_name: LanguageModelName) -> EncoderName:
|
|
|
71
75
|
| LMN.AZURE_GPT_5_MINI_2025_0807
|
|
72
76
|
| LMN.AZURE_GPT_5_NANO_2025_0807
|
|
73
77
|
| LMN.AZURE_GPT_5_CHAT_2025_0807
|
|
78
|
+
| LMN.LITELLM_OPENAI_GPT_5
|
|
79
|
+
| LMN.LITELLM_OPENAI_GPT_5_MINI
|
|
80
|
+
| LMN.LITELLM_OPENAI_GPT_5_NANO
|
|
81
|
+
| LMN.LITELLM_OPENAI_GPT_5_CHAT
|
|
74
82
|
):
|
|
75
83
|
return EncoderName.O200K_BASE
|
|
76
84
|
case _:
|
|
@@ -215,8 +223,10 @@ class LanguageModelInfo(BaseModel):
|
|
|
215
223
|
capabilities=[
|
|
216
224
|
ModelCapabilities.FUNCTION_CALLING,
|
|
217
225
|
ModelCapabilities.STREAMING,
|
|
226
|
+
ModelCapabilities.REASONING,
|
|
218
227
|
ModelCapabilities.VISION,
|
|
219
228
|
ModelCapabilities.STRUCTURED_OUTPUT,
|
|
229
|
+
ModelCapabilities.PARALLEL_FUNCTION_CALLING,
|
|
220
230
|
],
|
|
221
231
|
token_limits=LanguageModelTokenLimits(
|
|
222
232
|
token_limit_input=272000, token_limit_output=128000
|
|
@@ -241,8 +251,10 @@ class LanguageModelInfo(BaseModel):
|
|
|
241
251
|
capabilities=[
|
|
242
252
|
ModelCapabilities.FUNCTION_CALLING,
|
|
243
253
|
ModelCapabilities.STREAMING,
|
|
254
|
+
ModelCapabilities.REASONING,
|
|
244
255
|
ModelCapabilities.VISION,
|
|
245
256
|
ModelCapabilities.STRUCTURED_OUTPUT,
|
|
257
|
+
ModelCapabilities.PARALLEL_FUNCTION_CALLING,
|
|
246
258
|
],
|
|
247
259
|
token_limits=LanguageModelTokenLimits(
|
|
248
260
|
token_limit_input=272000, token_limit_output=128000
|
|
@@ -264,6 +276,10 @@ class LanguageModelInfo(BaseModel):
|
|
|
264
276
|
provider=LanguageModelProvider.AZURE,
|
|
265
277
|
version="2025-08-07",
|
|
266
278
|
encoder_name=EncoderName.O200K_BASE,
|
|
279
|
+
capabilities=[
|
|
280
|
+
ModelCapabilities.STREAMING,
|
|
281
|
+
ModelCapabilities.VISION,
|
|
282
|
+
],
|
|
267
283
|
token_limits=LanguageModelTokenLimits(
|
|
268
284
|
token_limit_input=128000, token_limit_output=16384
|
|
269
285
|
),
|
|
@@ -757,6 +773,109 @@ class LanguageModelInfo(BaseModel):
|
|
|
757
773
|
info_cutoff_at=date(2025, 1, day=1),
|
|
758
774
|
published_at=date(2025, 6, 5),
|
|
759
775
|
)
|
|
776
|
+
case LanguageModelName.LITELLM_OPENAI_GPT_5:
|
|
777
|
+
return cls(
|
|
778
|
+
name=model_name,
|
|
779
|
+
provider=LanguageModelProvider.LITELLM,
|
|
780
|
+
version="gpt-5",
|
|
781
|
+
encoder_name=EncoderName.O200K_BASE,
|
|
782
|
+
capabilities=[
|
|
783
|
+
ModelCapabilities.FUNCTION_CALLING,
|
|
784
|
+
ModelCapabilities.STREAMING,
|
|
785
|
+
ModelCapabilities.REASONING,
|
|
786
|
+
ModelCapabilities.VISION,
|
|
787
|
+
ModelCapabilities.STRUCTURED_OUTPUT,
|
|
788
|
+
ModelCapabilities.PARALLEL_FUNCTION_CALLING,
|
|
789
|
+
],
|
|
790
|
+
token_limits=LanguageModelTokenLimits(
|
|
791
|
+
token_limit_input=272000, token_limit_output=128000
|
|
792
|
+
),
|
|
793
|
+
info_cutoff_at=date(2024, 10, 24),
|
|
794
|
+
published_at=date(2025, 8, 7),
|
|
795
|
+
deprecated_at=date(2026, 8, 7),
|
|
796
|
+
retirement_at=date(2026, 8, 7),
|
|
797
|
+
temperature_bounds=TemperatureBounds(
|
|
798
|
+
min_temperature=1.0, max_temperature=1.0
|
|
799
|
+
),
|
|
800
|
+
default_options={
|
|
801
|
+
"reasoning_effort": "minimal",
|
|
802
|
+
},
|
|
803
|
+
)
|
|
804
|
+
case LanguageModelName.LITELLM_OPENAI_GPT_5_MINI:
|
|
805
|
+
return cls(
|
|
806
|
+
name=model_name,
|
|
807
|
+
provider=LanguageModelProvider.LITELLM,
|
|
808
|
+
version="gpt-5-mini",
|
|
809
|
+
encoder_name=EncoderName.O200K_BASE,
|
|
810
|
+
capabilities=[
|
|
811
|
+
ModelCapabilities.FUNCTION_CALLING,
|
|
812
|
+
ModelCapabilities.STREAMING,
|
|
813
|
+
ModelCapabilities.REASONING,
|
|
814
|
+
ModelCapabilities.VISION,
|
|
815
|
+
ModelCapabilities.STRUCTURED_OUTPUT,
|
|
816
|
+
ModelCapabilities.PARALLEL_FUNCTION_CALLING,
|
|
817
|
+
],
|
|
818
|
+
token_limits=LanguageModelTokenLimits(
|
|
819
|
+
token_limit_input=272000, token_limit_output=128000
|
|
820
|
+
),
|
|
821
|
+
info_cutoff_at=date(2024, 6, 24),
|
|
822
|
+
published_at=date(2025, 8, 7),
|
|
823
|
+
deprecated_at=date(2026, 8, 7),
|
|
824
|
+
retirement_at=date(2026, 8, 7),
|
|
825
|
+
temperature_bounds=TemperatureBounds(
|
|
826
|
+
min_temperature=1.0, max_temperature=1.0
|
|
827
|
+
),
|
|
828
|
+
default_options={
|
|
829
|
+
"reasoning_effort": "minimal",
|
|
830
|
+
},
|
|
831
|
+
)
|
|
832
|
+
case LanguageModelName.LITELLM_OPENAI_GPT_5_NANO:
|
|
833
|
+
return cls(
|
|
834
|
+
name=model_name,
|
|
835
|
+
provider=LanguageModelProvider.LITELLM,
|
|
836
|
+
version="gpt-5-nano",
|
|
837
|
+
encoder_name=EncoderName.O200K_BASE,
|
|
838
|
+
capabilities=[
|
|
839
|
+
ModelCapabilities.FUNCTION_CALLING,
|
|
840
|
+
ModelCapabilities.STREAMING,
|
|
841
|
+
ModelCapabilities.REASONING,
|
|
842
|
+
ModelCapabilities.VISION,
|
|
843
|
+
ModelCapabilities.STRUCTURED_OUTPUT,
|
|
844
|
+
ModelCapabilities.PARALLEL_FUNCTION_CALLING,
|
|
845
|
+
],
|
|
846
|
+
token_limits=LanguageModelTokenLimits(
|
|
847
|
+
token_limit_input=272000, token_limit_output=128000
|
|
848
|
+
),
|
|
849
|
+
info_cutoff_at=date(2024, 5, 31),
|
|
850
|
+
published_at=date(2025, 8, 7),
|
|
851
|
+
deprecated_at=date(2026, 8, 7),
|
|
852
|
+
retirement_at=date(2026, 8, 7),
|
|
853
|
+
temperature_bounds=TemperatureBounds(
|
|
854
|
+
min_temperature=1.0, max_temperature=1.0
|
|
855
|
+
),
|
|
856
|
+
default_options={
|
|
857
|
+
"reasoning_effort": "minimal",
|
|
858
|
+
},
|
|
859
|
+
)
|
|
860
|
+
case LanguageModelName.LITELLM_OPENAI_GPT_5_CHAT:
|
|
861
|
+
return cls(
|
|
862
|
+
name=model_name,
|
|
863
|
+
provider=LanguageModelProvider.LITELLM,
|
|
864
|
+
version="gpt-5-chat",
|
|
865
|
+
encoder_name=EncoderName.O200K_BASE,
|
|
866
|
+
capabilities=[
|
|
867
|
+
ModelCapabilities.STREAMING,
|
|
868
|
+
ModelCapabilities.VISION,
|
|
869
|
+
],
|
|
870
|
+
token_limits=LanguageModelTokenLimits(
|
|
871
|
+
token_limit_input=128000, token_limit_output=16384
|
|
872
|
+
),
|
|
873
|
+
info_cutoff_at=date(2024, 10, 24),
|
|
874
|
+
published_at=date(2025, 8, 7),
|
|
875
|
+
deprecated_at=date(2026, 8, 7),
|
|
876
|
+
retirement_at=date(2026, 8, 7),
|
|
877
|
+
)
|
|
878
|
+
|
|
760
879
|
case _:
|
|
761
880
|
if isinstance(model_name, LanguageModelName):
|
|
762
881
|
raise ValueError(
|
|
@@ -3,7 +3,7 @@ from unique_toolkit.tools.schemas import ToolCallResponse
|
|
|
3
3
|
|
|
4
4
|
|
|
5
5
|
class tool_chunks:
|
|
6
|
-
def __init__(self, name: str, chunks: list) -> None:
|
|
6
|
+
def __init__(self, name: str, chunks: list[ContentChunk]) -> None:
|
|
7
7
|
self.name = name
|
|
8
8
|
self.chunks = chunks
|
|
9
9
|
|
|
@@ -47,8 +47,21 @@ class ReferenceManager:
|
|
|
47
47
|
def get_chunks(self) -> list[ContentChunk]:
|
|
48
48
|
return self._chunks
|
|
49
49
|
|
|
50
|
-
def get_tool_chunks(self) -> dict:
|
|
50
|
+
def get_tool_chunks(self) -> dict[str, tool_chunks]:
|
|
51
51
|
return self._tool_chunks
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def get_chunks_of_all_tools(self) -> list[list[ContentChunk]]:
|
|
55
|
+
return [tool_chunks.chunks for tool_chunks in self._tool_chunks.values()]
|
|
56
|
+
|
|
57
|
+
def get_chunks_of_tool(self, tool_call_id: str) -> list[ContentChunk]:
|
|
58
|
+
return self._tool_chunks.get(tool_call_id, tool_chunks("", [])).chunks
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def replace_chunks_of_tool(self, tool_call_id: str,chunks: list[ContentChunk]) -> None:
|
|
62
|
+
if tool_call_id in self._tool_chunks:
|
|
63
|
+
self._tool_chunks[tool_call_id].chunks = chunks
|
|
64
|
+
|
|
52
65
|
|
|
53
66
|
def replace(self, chunks: list[ContentChunk]):
|
|
54
67
|
self._chunks = chunks
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: unique_toolkit
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.15
|
|
4
4
|
Summary:
|
|
5
5
|
License: Proprietary
|
|
6
6
|
Author: Martin Fadler
|
|
@@ -114,7 +114,13 @@ All notable changes to this project will be documented in this file.
|
|
|
114
114
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
115
115
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
116
116
|
|
|
117
|
-
## [0.8.
|
|
117
|
+
## [0.8.15] - 2025-08-19
|
|
118
|
+
- Added history loading from database for History Manager
|
|
119
|
+
|
|
120
|
+
## [0.8.14] - 2025-08-19
|
|
121
|
+
- Including GPT-5 series deployed via LiteLLM into language model info
|
|
122
|
+
|
|
123
|
+
## [0.8.13] - 2025-08-18
|
|
118
124
|
- Adding initial versions of
|
|
119
125
|
- Evaluation Manager
|
|
120
126
|
- History Manager
|
|
@@ -122,7 +128,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
122
128
|
- Thinking Manager
|
|
123
129
|
- Updated tool manager
|
|
124
130
|
|
|
125
|
-
## [0.8.
|
|
131
|
+
## [0.8.12] - 2025-08-18
|
|
126
132
|
- Fix no tool call respoonse in ChatMessage -> Open Ai messages translation
|
|
127
133
|
- Add simple append method to OpenAIMessageBuilder
|
|
128
134
|
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
unique_toolkit/__init__.py,sha256=waK7W0EK3v2RJ26hawccwVz1i3yHGvHIIu5qgGjEGHQ,583
|
|
2
2
|
unique_toolkit/_common/_base_service.py,sha256=S8H0rAebx7GsOldA7xInLp3aQJt9yEPDQdsGSFRJsGg,276
|
|
3
3
|
unique_toolkit/_common/_time_utils.py,sha256=ztmTovTvr-3w71Ns2VwXC65OKUUh-sQlzbHdKTQWm-w,135
|
|
4
|
+
unique_toolkit/_common/default_language_model.py,sha256=M6OiVfpi21CixfgYFigOcJGqG8r987f2rxHnn0NZ2dc,333
|
|
4
5
|
unique_toolkit/_common/exception.py,sha256=caQIE1btsQnpKCHqL2cgWUSbHup06enQu_Pt7uGUTTE,727
|
|
6
|
+
unique_toolkit/_common/token/image_token_counting.py,sha256=VpFfZyY0GIH27q_Wy4YNjk2algqvbCtJyzuuROoFQPw,2189
|
|
7
|
+
unique_toolkit/_common/token/token_counting.py,sha256=l8tDo5EaD5FIlKz7Zd6CTNYwMhF-UZ2S3Hb-pU5z2UY,6281
|
|
5
8
|
unique_toolkit/_common/validate_required_values.py,sha256=Y_M1ub9gIKP9qZ45F6Zq3ZHtuIqhmOjl8Z2Vd3avg8w,588
|
|
6
9
|
unique_toolkit/_common/validators.py,sha256=uPGPkeygNi3KimWZxKOKYFxwpCxTkhhYBAn-b_5TS_M,2584
|
|
7
10
|
unique_toolkit/app/__init__.py,sha256=ETxYDpEizg_PKmi4JPX_P76ySq-us-xypfAIdKQ1QZU,1284
|
|
@@ -54,13 +57,15 @@ unique_toolkit/framework_utilities/langchain/history.py,sha256=R9RuCeSFNaUO3OZ0G
|
|
|
54
57
|
unique_toolkit/framework_utilities/openai/client.py,sha256=IasxPXlVJHIsZdXHin7yq-5tO4RNLUu9cEuhrgb4ghE,1205
|
|
55
58
|
unique_toolkit/framework_utilities/openai/message_builder.py,sha256=VU6mJm_upLcarJQKFft_t1RlLRncWDxDuLC5LIJ5lQQ,4339
|
|
56
59
|
unique_toolkit/framework_utilities/utils.py,sha256=JK7g2yMfEx3eMprug26769xqNpS5WJcizf8n2zWMBng,789
|
|
57
|
-
unique_toolkit/history_manager/
|
|
60
|
+
unique_toolkit/history_manager/history_construction_with_contents.py,sha256=xKUVnJ4ZJq4-nnO2_35dbDh9d-zfCJfRzuj7v9hXUdM,9049
|
|
61
|
+
unique_toolkit/history_manager/history_manager.py,sha256=ULtsC7cGl92G2fXKIkEajH3tIy_qqWKIK8FudpNKhu4,8834
|
|
62
|
+
unique_toolkit/history_manager/loop_token_reducer.py,sha256=-7Ezk3OLUsrU0Jd9Qc73_PBJZIayz7bVE3awc-q6Se0,17624
|
|
58
63
|
unique_toolkit/history_manager/utils.py,sha256=3GT53SfOQ7g-dN3PHFIPaAab74sUfV28hbUtGMdX-bY,5607
|
|
59
64
|
unique_toolkit/language_model/__init__.py,sha256=lRQyLlbwHbNFf4-0foBU13UGb09lwEeodbVsfsSgaCk,1971
|
|
60
65
|
unique_toolkit/language_model/builder.py,sha256=4OKfwJfj3TrgO1ezc_ewIue6W7BCQ2ZYQXUckWVPPTA,3369
|
|
61
66
|
unique_toolkit/language_model/constants.py,sha256=B-topqW0r83dkC_25DeQfnPk3n53qzIHUCBS7YJ0-1U,119
|
|
62
67
|
unique_toolkit/language_model/functions.py,sha256=-nWgcscaZvEfuf2ftCQj8hyFY2RynPFO8XgX2gsN924,16665
|
|
63
|
-
unique_toolkit/language_model/infos.py,sha256=
|
|
68
|
+
unique_toolkit/language_model/infos.py,sha256=MbuXYtyTLik-7KTKlyQCjW3uNRTT5ddTvbpSarFxzPk,45715
|
|
64
69
|
unique_toolkit/language_model/prompt.py,sha256=JSawaLjQg3VR-E2fK8engFyJnNdk21zaO8pPIodzN4Q,3991
|
|
65
70
|
unique_toolkit/language_model/reference.py,sha256=nkX2VFz-IrUz8yqyc3G5jUMNwrNpxITBrMEKkbqqYoI,8583
|
|
66
71
|
unique_toolkit/language_model/schemas.py,sha256=p95cAwrYmEBupI8f7JZY17aJTKEHvtqF88hMIzS5r_k,16259
|
|
@@ -68,7 +73,7 @@ unique_toolkit/language_model/service.py,sha256=N_I3VtK5B0G8s5c6TcBVWM7CcLGqakDh
|
|
|
68
73
|
unique_toolkit/language_model/utils.py,sha256=bPQ4l6_YO71w-zaIPanUUmtbXC1_hCvLK0tAFc3VCRc,1902
|
|
69
74
|
unique_toolkit/postprocessor/postprocessor_manager.py,sha256=68TAcXMU_ohWOtzo91LntY950HV9I9gGU92-V0Mxmr8,4239
|
|
70
75
|
unique_toolkit/protocols/support.py,sha256=V15WEIFKVMyF1QCnR8vIi4GrJy4dfTCB6d6JlqPZ58o,2341
|
|
71
|
-
unique_toolkit/reference_manager/reference_manager.py,sha256=
|
|
76
|
+
unique_toolkit/reference_manager/reference_manager.py,sha256=WIvZkRgQztkY0zNTM_KIPSqJFT22HIGNexJ4yG3aj5E,3993
|
|
72
77
|
unique_toolkit/short_term_memory/__init__.py,sha256=2mI3AUrffgH7Yt-xS57EGqnHf7jnn6xquoKEhJqk3Wg,185
|
|
73
78
|
unique_toolkit/short_term_memory/constants.py,sha256=698CL6-wjup2MvU19RxSmQk3gX7aqW_OOpZB7sbz_Xg,34
|
|
74
79
|
unique_toolkit/short_term_memory/functions.py,sha256=3WiK-xatY5nh4Dr5zlDUye1k3E6kr41RiscwtTplw5k,4484
|
|
@@ -90,7 +95,7 @@ unique_toolkit/tools/utils/execution/execution.py,sha256=vjG2Y6awsGNtlvyQAGCTthQ
|
|
|
90
95
|
unique_toolkit/tools/utils/source_handling/schema.py,sha256=pvNhtL2daDLpCVIQpfdn6R35GvKmITVLXjZNLAwpgUE,871
|
|
91
96
|
unique_toolkit/tools/utils/source_handling/source_formatting.py,sha256=C7uayNbdkNVJdEARA5CENnHtNY1SU6etlaqbgHNyxaQ,9152
|
|
92
97
|
unique_toolkit/tools/utils/source_handling/tests/test_source_formatting.py,sha256=zu3AJnYH9CMqZPrxKEH3IgI-fM3nlvIBuspJG6W6B18,6978
|
|
93
|
-
unique_toolkit-0.8.
|
|
94
|
-
unique_toolkit-0.8.
|
|
95
|
-
unique_toolkit-0.8.
|
|
96
|
-
unique_toolkit-0.8.
|
|
98
|
+
unique_toolkit-0.8.15.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
|
|
99
|
+
unique_toolkit-0.8.15.dist-info/METADATA,sha256=HC0kUwceqxR_BLx4SUd7-mFMLZ4roX2mIxpTRy19xnE,27726
|
|
100
|
+
unique_toolkit-0.8.15.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
101
|
+
unique_toolkit-0.8.15.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|