unique_toolkit 1.38.4__py3-none-any.whl → 1.39.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/app/schemas.py +15 -7
- unique_toolkit/language_model/infos.py +58 -0
- unique_toolkit/services/chat_service.py +409 -1
- unique_toolkit/short_term_memory/functions.py +126 -4
- unique_toolkit/short_term_memory/schemas.py +27 -22
- {unique_toolkit-1.38.4.dist-info → unique_toolkit-1.39.1.dist-info}/METADATA +28 -20
- {unique_toolkit-1.38.4.dist-info → unique_toolkit-1.39.1.dist-info}/RECORD +9 -9
- {unique_toolkit-1.38.4.dist-info → unique_toolkit-1.39.1.dist-info}/LICENSE +0 -0
- {unique_toolkit-1.38.4.dist-info → unique_toolkit-1.39.1.dist-info}/WHEEL +0 -0
unique_toolkit/app/schemas.py
CHANGED
|
@@ -120,11 +120,15 @@ class McpServer(BaseModel):
|
|
|
120
120
|
class ChatEventUserMessage(BaseModel):
|
|
121
121
|
model_config = model_config
|
|
122
122
|
|
|
123
|
-
id: str
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
123
|
+
id: str = Field(
|
|
124
|
+
description="The id of the user message. On an event this corresponds to the user message that created the event."
|
|
125
|
+
)
|
|
126
|
+
text: str = Field(description="The text of the user message.")
|
|
127
|
+
original_text: str = Field(description="The original text of the user message.")
|
|
128
|
+
created_at: str = Field(
|
|
129
|
+
description="The creation date and time of the user message."
|
|
130
|
+
)
|
|
131
|
+
language: str = Field(description="The language of the user message.")
|
|
128
132
|
|
|
129
133
|
|
|
130
134
|
@deprecated(
|
|
@@ -140,8 +144,12 @@ class EventUserMessage(ChatEventUserMessage):
|
|
|
140
144
|
class ChatEventAssistantMessage(BaseModel):
|
|
141
145
|
model_config = model_config
|
|
142
146
|
|
|
143
|
-
id: str
|
|
144
|
-
|
|
147
|
+
id: str = Field(
|
|
148
|
+
description="The id of the assistant message. On an event this corresponds to the assistant message that will be returned by the process handling the event."
|
|
149
|
+
)
|
|
150
|
+
created_at: str = Field(
|
|
151
|
+
description="The creation date and time of the assistant message."
|
|
152
|
+
)
|
|
145
153
|
|
|
146
154
|
|
|
147
155
|
@deprecated(
|
|
@@ -31,6 +31,8 @@ class LanguageModelName(StrEnum):
|
|
|
31
31
|
AZURE_GPT_51_CHAT_2025_1113 = "AZURE_GPT_51_CHAT_2025_1113"
|
|
32
32
|
AZURE_GPT_51_CODEX_2025_1113 = "AZURE_GPT_51_CODEX_2025_1113"
|
|
33
33
|
AZURE_GPT_51_CODEX_MINI_2025_1113 = "AZURE_GPT_51_CODEX_MINI_2025_1113"
|
|
34
|
+
AZURE_GPT_52_2025_1211 = "AZURE_GPT_52_2025_1211"
|
|
35
|
+
AZURE_GPT_52_CHAT_2025_1211 = "AZURE_GPT_52_CHAT_2025_1211"
|
|
34
36
|
AZURE_GPT_4o_2024_0513 = "AZURE_GPT_4o_2024_0513"
|
|
35
37
|
AZURE_GPT_4o_2024_0806 = "AZURE_GPT_4o_2024_0806"
|
|
36
38
|
AZURE_GPT_4o_2024_1120 = "AZURE_GPT_4o_2024_1120"
|
|
@@ -115,6 +117,8 @@ def get_encoder_name(model_name: LanguageModelName) -> EncoderName:
|
|
|
115
117
|
| LMN.AZURE_GPT_51_CHAT_2025_1113
|
|
116
118
|
| LMN.AZURE_GPT_51_CODEX_2025_1113
|
|
117
119
|
| LMN.AZURE_GPT_51_CODEX_MINI_2025_1113
|
|
120
|
+
| LMN.AZURE_GPT_52_2025_1211
|
|
121
|
+
| LMN.AZURE_GPT_52_CHAT_2025_1211
|
|
118
122
|
| LMN.LITELLM_OPENAI_GPT_5
|
|
119
123
|
| LMN.LITELLM_OPENAI_GPT_5_MINI
|
|
120
124
|
| LMN.LITELLM_OPENAI_GPT_5_NANO
|
|
@@ -611,6 +615,60 @@ class LanguageModelInfo(BaseModel):
|
|
|
611
615
|
"reasoning_effort": "medium",
|
|
612
616
|
},
|
|
613
617
|
)
|
|
618
|
+
case LanguageModelName.AZURE_GPT_52_2025_1211:
|
|
619
|
+
return cls(
|
|
620
|
+
name=model_name,
|
|
621
|
+
provider=LanguageModelProvider.AZURE,
|
|
622
|
+
version="2025-12-11",
|
|
623
|
+
encoder_name=EncoderName.O200K_BASE,
|
|
624
|
+
capabilities=[
|
|
625
|
+
ModelCapabilities.CHAT_COMPLETIONS_API,
|
|
626
|
+
ModelCapabilities.FUNCTION_CALLING,
|
|
627
|
+
ModelCapabilities.PARALLEL_FUNCTION_CALLING,
|
|
628
|
+
ModelCapabilities.REASONING,
|
|
629
|
+
ModelCapabilities.RESPONSES_API,
|
|
630
|
+
ModelCapabilities.STREAMING,
|
|
631
|
+
ModelCapabilities.STRUCTURED_OUTPUT,
|
|
632
|
+
ModelCapabilities.VISION,
|
|
633
|
+
],
|
|
634
|
+
token_limits=LanguageModelTokenLimits(
|
|
635
|
+
token_limit_input=400_000, token_limit_output=128_000
|
|
636
|
+
),
|
|
637
|
+
info_cutoff_at=date(2024, 9, 30),
|
|
638
|
+
published_at=date(2025, 12, 11),
|
|
639
|
+
temperature_bounds=TemperatureBounds(
|
|
640
|
+
min_temperature=1.0, max_temperature=1.0
|
|
641
|
+
),
|
|
642
|
+
default_options={
|
|
643
|
+
"reasoning_effort": None,
|
|
644
|
+
},
|
|
645
|
+
)
|
|
646
|
+
case LanguageModelName.AZURE_GPT_52_CHAT_2025_1211:
|
|
647
|
+
return cls(
|
|
648
|
+
name=model_name,
|
|
649
|
+
provider=LanguageModelProvider.AZURE,
|
|
650
|
+
version="2025-12-11",
|
|
651
|
+
encoder_name=EncoderName.O200K_BASE,
|
|
652
|
+
capabilities=[
|
|
653
|
+
ModelCapabilities.CHAT_COMPLETIONS_API,
|
|
654
|
+
ModelCapabilities.FUNCTION_CALLING,
|
|
655
|
+
ModelCapabilities.PARALLEL_FUNCTION_CALLING,
|
|
656
|
+
ModelCapabilities.RESPONSES_API,
|
|
657
|
+
ModelCapabilities.STREAMING,
|
|
658
|
+
ModelCapabilities.STRUCTURED_OUTPUT,
|
|
659
|
+
],
|
|
660
|
+
token_limits=LanguageModelTokenLimits(
|
|
661
|
+
token_limit_input=128_000, token_limit_output=16_384
|
|
662
|
+
),
|
|
663
|
+
info_cutoff_at=date(2024, 9, 30),
|
|
664
|
+
published_at=date(2025, 12, 11),
|
|
665
|
+
temperature_bounds=TemperatureBounds(
|
|
666
|
+
min_temperature=1.0, max_temperature=1.0
|
|
667
|
+
),
|
|
668
|
+
default_options={
|
|
669
|
+
"reasoning_effort": "medium",
|
|
670
|
+
},
|
|
671
|
+
)
|
|
614
672
|
case LanguageModelName.AZURE_GPT_4_TURBO_2024_0409:
|
|
615
673
|
return cls(
|
|
616
674
|
name=model_name,
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import Any, Sequence
|
|
2
|
+
from typing import Any, Sequence, overload
|
|
3
3
|
|
|
4
4
|
import unique_sdk
|
|
5
5
|
from openai.types.chat import ChatCompletionToolChoiceOptionParam
|
|
@@ -13,6 +13,7 @@ from openai.types.responses import (
|
|
|
13
13
|
response_create_params,
|
|
14
14
|
)
|
|
15
15
|
from openai.types.shared_params import Metadata, Reasoning
|
|
16
|
+
from pydantic import BaseModel
|
|
16
17
|
from typing_extensions import deprecated
|
|
17
18
|
|
|
18
19
|
from unique_toolkit._common.utils.files import is_file_content, is_image_content
|
|
@@ -93,6 +94,13 @@ from unique_toolkit.language_model.schemas import (
|
|
|
93
94
|
LanguageModelToolDescription,
|
|
94
95
|
ResponsesLanguageModelStreamResponse,
|
|
95
96
|
)
|
|
97
|
+
from unique_toolkit.short_term_memory.functions import (
|
|
98
|
+
create_memory,
|
|
99
|
+
create_memory_async,
|
|
100
|
+
find_latest_memory,
|
|
101
|
+
find_latest_memory_async,
|
|
102
|
+
)
|
|
103
|
+
from unique_toolkit.short_term_memory.schemas import ShortTermMemory
|
|
96
104
|
|
|
97
105
|
logger = logging.getLogger(f"toolkit.{DOMAIN_NAME}.{__name__}")
|
|
98
106
|
|
|
@@ -1654,3 +1662,403 @@ class ChatService(ChatServiceDeprecated):
|
|
|
1654
1662
|
if is_image_content(filename=c.key):
|
|
1655
1663
|
images.append(c)
|
|
1656
1664
|
return images, files
|
|
1665
|
+
|
|
1666
|
+
# Short Term Memories
|
|
1667
|
+
############################################################################
|
|
1668
|
+
|
|
1669
|
+
def create_chat_memory_by_id(
|
|
1670
|
+
self, *, chat_id: str, key: str, value: str | dict | BaseModel
|
|
1671
|
+
) -> ShortTermMemory:
|
|
1672
|
+
"""Creates a short-term memory for a specific chat synchronously.
|
|
1673
|
+
|
|
1674
|
+
Args:
|
|
1675
|
+
chat_id (str): The chat ID
|
|
1676
|
+
key (str): The memory key
|
|
1677
|
+
value (str | dict | BaseModel): The memory value
|
|
1678
|
+
|
|
1679
|
+
Returns:
|
|
1680
|
+
ShortTermMemory: The created short-term memory
|
|
1681
|
+
|
|
1682
|
+
Raises:
|
|
1683
|
+
Exception: If the creation fails
|
|
1684
|
+
"""
|
|
1685
|
+
# Convert BaseModel to JSON string if needed
|
|
1686
|
+
if isinstance(value, BaseModel):
|
|
1687
|
+
value = value.model_dump_json()
|
|
1688
|
+
|
|
1689
|
+
return create_memory(
|
|
1690
|
+
user_id=self._user_id,
|
|
1691
|
+
company_id=self._company_id,
|
|
1692
|
+
key=key,
|
|
1693
|
+
value=value,
|
|
1694
|
+
chat_id=chat_id,
|
|
1695
|
+
)
|
|
1696
|
+
|
|
1697
|
+
async def create_chat_memory_by_id_async(
|
|
1698
|
+
self, *, chat_id: str, key: str, value: str | dict | BaseModel
|
|
1699
|
+
) -> ShortTermMemory:
|
|
1700
|
+
"""Creates a short-term memory for a specific chat asynchronously.
|
|
1701
|
+
|
|
1702
|
+
Args:
|
|
1703
|
+
chat_id (str): The chat ID
|
|
1704
|
+
key (str): The memory key
|
|
1705
|
+
value (str | dict | BaseModel): The memory value
|
|
1706
|
+
|
|
1707
|
+
Returns:
|
|
1708
|
+
ShortTermMemory: The created short-term memory
|
|
1709
|
+
|
|
1710
|
+
Raises:
|
|
1711
|
+
Exception: If the creation fails
|
|
1712
|
+
"""
|
|
1713
|
+
# Convert BaseModel to JSON string if needed
|
|
1714
|
+
if isinstance(value, BaseModel):
|
|
1715
|
+
value = value.model_dump_json()
|
|
1716
|
+
|
|
1717
|
+
return await create_memory_async(
|
|
1718
|
+
user_id=self._user_id,
|
|
1719
|
+
company_id=self._company_id,
|
|
1720
|
+
key=key,
|
|
1721
|
+
value=value,
|
|
1722
|
+
chat_id=chat_id,
|
|
1723
|
+
)
|
|
1724
|
+
|
|
1725
|
+
def create_message_memory_by_id(
|
|
1726
|
+
self, *, message_id: str, key: str, value: str | dict | BaseModel
|
|
1727
|
+
) -> ShortTermMemory:
|
|
1728
|
+
"""Creates a short-term memory for a specific message synchronously.
|
|
1729
|
+
|
|
1730
|
+
Args:
|
|
1731
|
+
message_id (str): The message ID
|
|
1732
|
+
key (str): The memory key
|
|
1733
|
+
value (str | dict | BaseModel): The memory value
|
|
1734
|
+
|
|
1735
|
+
Returns:
|
|
1736
|
+
ShortTermMemory: The created short-term memory
|
|
1737
|
+
|
|
1738
|
+
Raises:
|
|
1739
|
+
Exception: If the creation fails
|
|
1740
|
+
"""
|
|
1741
|
+
# Convert BaseModel to JSON string if needed
|
|
1742
|
+
if isinstance(value, BaseModel):
|
|
1743
|
+
value = value.model_dump_json()
|
|
1744
|
+
|
|
1745
|
+
return create_memory(
|
|
1746
|
+
user_id=self._user_id,
|
|
1747
|
+
company_id=self._company_id,
|
|
1748
|
+
key=key,
|
|
1749
|
+
value=value,
|
|
1750
|
+
message_id=message_id,
|
|
1751
|
+
)
|
|
1752
|
+
|
|
1753
|
+
async def create_message_memory_by_id_async(
|
|
1754
|
+
self, *, message_id: str, key: str, value: str | dict | BaseModel
|
|
1755
|
+
) -> ShortTermMemory:
|
|
1756
|
+
"""Creates a short-term memory for a specific message asynchronously.
|
|
1757
|
+
|
|
1758
|
+
Args:
|
|
1759
|
+
message_id (str): The message ID
|
|
1760
|
+
key (str): The memory key
|
|
1761
|
+
value (str | dict | BaseModel): The memory value
|
|
1762
|
+
|
|
1763
|
+
Returns:
|
|
1764
|
+
ShortTermMemory: The created short-term memory
|
|
1765
|
+
|
|
1766
|
+
Raises:
|
|
1767
|
+
Exception: If the creation fails
|
|
1768
|
+
"""
|
|
1769
|
+
# Convert BaseModel to JSON string if needed
|
|
1770
|
+
if isinstance(value, BaseModel):
|
|
1771
|
+
value = value.model_dump_json()
|
|
1772
|
+
|
|
1773
|
+
return await create_memory_async(
|
|
1774
|
+
user_id=self._user_id,
|
|
1775
|
+
company_id=self._company_id,
|
|
1776
|
+
key=key,
|
|
1777
|
+
value=value,
|
|
1778
|
+
message_id=message_id,
|
|
1779
|
+
)
|
|
1780
|
+
|
|
1781
|
+
def find_chat_memory_by_id(self, *, chat_id: str, key: str) -> ShortTermMemory:
|
|
1782
|
+
"""Finds the latest short-term memory for a specific chat synchronously.
|
|
1783
|
+
|
|
1784
|
+
Args:
|
|
1785
|
+
chat_id (str): The chat ID
|
|
1786
|
+
key (str): The memory key
|
|
1787
|
+
|
|
1788
|
+
Returns:
|
|
1789
|
+
ShortTermMemory: The latest short-term memory
|
|
1790
|
+
|
|
1791
|
+
Raises:
|
|
1792
|
+
Exception: If the retrieval fails
|
|
1793
|
+
"""
|
|
1794
|
+
return find_latest_memory(
|
|
1795
|
+
user_id=self._user_id,
|
|
1796
|
+
company_id=self._company_id,
|
|
1797
|
+
key=key,
|
|
1798
|
+
chat_id=chat_id,
|
|
1799
|
+
)
|
|
1800
|
+
|
|
1801
|
+
async def find_chat_memory_by_id_async(
|
|
1802
|
+
self, *, chat_id: str, key: str
|
|
1803
|
+
) -> ShortTermMemory:
|
|
1804
|
+
"""Finds the latest short-term memory for a specific chat asynchronously.
|
|
1805
|
+
|
|
1806
|
+
Args:
|
|
1807
|
+
chat_id (str): The chat ID
|
|
1808
|
+
key (str): The memory key
|
|
1809
|
+
|
|
1810
|
+
Returns:
|
|
1811
|
+
ShortTermMemory: The latest short-term memory
|
|
1812
|
+
|
|
1813
|
+
Raises:
|
|
1814
|
+
Exception: If the retrieval fails
|
|
1815
|
+
"""
|
|
1816
|
+
return await find_latest_memory_async(
|
|
1817
|
+
user_id=self._user_id,
|
|
1818
|
+
company_id=self._company_id,
|
|
1819
|
+
key=key,
|
|
1820
|
+
chat_id=chat_id,
|
|
1821
|
+
)
|
|
1822
|
+
|
|
1823
|
+
def find_message_memory_by_id(
|
|
1824
|
+
self, *, message_id: str, key: str
|
|
1825
|
+
) -> ShortTermMemory:
|
|
1826
|
+
"""Finds the latest short-term memory for a specific message synchronously.
|
|
1827
|
+
|
|
1828
|
+
Args:
|
|
1829
|
+
message_id (str): The message ID
|
|
1830
|
+
key (str): The memory key
|
|
1831
|
+
|
|
1832
|
+
Returns:
|
|
1833
|
+
ShortTermMemory: The latest short-term memory
|
|
1834
|
+
|
|
1835
|
+
Raises:
|
|
1836
|
+
Exception: If the retrieval fails
|
|
1837
|
+
"""
|
|
1838
|
+
return find_latest_memory(
|
|
1839
|
+
user_id=self._user_id,
|
|
1840
|
+
company_id=self._company_id,
|
|
1841
|
+
key=key,
|
|
1842
|
+
message_id=message_id,
|
|
1843
|
+
)
|
|
1844
|
+
|
|
1845
|
+
async def find_message_memory_by_id_async(
|
|
1846
|
+
self, *, message_id: str, key: str
|
|
1847
|
+
) -> ShortTermMemory:
|
|
1848
|
+
"""Finds the latest short-term memory for a specific message asynchronously.
|
|
1849
|
+
|
|
1850
|
+
Args:
|
|
1851
|
+
message_id (str): The message ID
|
|
1852
|
+
key (str): The memory key
|
|
1853
|
+
|
|
1854
|
+
Returns:
|
|
1855
|
+
ShortTermMemory: The latest short-term memory
|
|
1856
|
+
|
|
1857
|
+
Raises:
|
|
1858
|
+
Exception: If the retrieval fails
|
|
1859
|
+
"""
|
|
1860
|
+
return await find_latest_memory_async(
|
|
1861
|
+
user_id=self._user_id,
|
|
1862
|
+
company_id=self._company_id,
|
|
1863
|
+
key=key,
|
|
1864
|
+
message_id=message_id,
|
|
1865
|
+
)
|
|
1866
|
+
|
|
1867
|
+
# Convenience methods using current chat/message IDs
|
|
1868
|
+
############################################################################
|
|
1869
|
+
|
|
1870
|
+
def create_chat_memory(
|
|
1871
|
+
self, *, key: str, value: str | dict | BaseModel
|
|
1872
|
+
) -> ShortTermMemory:
|
|
1873
|
+
"""Creates a short-term memory for the current chat synchronously.
|
|
1874
|
+
|
|
1875
|
+
Args:
|
|
1876
|
+
key (str): The memory key
|
|
1877
|
+
value (str | dict | BaseModel): The memory value
|
|
1878
|
+
|
|
1879
|
+
Returns:
|
|
1880
|
+
ShortTermMemory: The created short-term memory
|
|
1881
|
+
|
|
1882
|
+
Raises:
|
|
1883
|
+
Exception: If the creation fails
|
|
1884
|
+
"""
|
|
1885
|
+
return self.create_chat_memory_by_id(
|
|
1886
|
+
chat_id=self._chat_id,
|
|
1887
|
+
key=key,
|
|
1888
|
+
value=value,
|
|
1889
|
+
)
|
|
1890
|
+
|
|
1891
|
+
async def create_chat_memory_async(
|
|
1892
|
+
self, *, key: str, value: str | dict | BaseModel
|
|
1893
|
+
) -> ShortTermMemory:
|
|
1894
|
+
"""Creates a short-term memory for the current chat asynchronously.
|
|
1895
|
+
|
|
1896
|
+
Args:
|
|
1897
|
+
key (str): The memory key
|
|
1898
|
+
value (str | dict | BaseModel): The memory value
|
|
1899
|
+
|
|
1900
|
+
Returns:
|
|
1901
|
+
ShortTermMemory: The created short-term memory
|
|
1902
|
+
|
|
1903
|
+
Raises:
|
|
1904
|
+
Exception: If the creation fails
|
|
1905
|
+
"""
|
|
1906
|
+
return await self.create_chat_memory_by_id_async(
|
|
1907
|
+
chat_id=self._chat_id,
|
|
1908
|
+
key=key,
|
|
1909
|
+
value=value,
|
|
1910
|
+
)
|
|
1911
|
+
|
|
1912
|
+
@overload
|
|
1913
|
+
def create_message_memory(
|
|
1914
|
+
self,
|
|
1915
|
+
*,
|
|
1916
|
+
key: str,
|
|
1917
|
+
value: str | dict | BaseModel,
|
|
1918
|
+
) -> ShortTermMemory: ...
|
|
1919
|
+
|
|
1920
|
+
@overload
|
|
1921
|
+
def create_message_memory(
|
|
1922
|
+
self, *, key: str, value: str | dict | BaseModel, message_id: str
|
|
1923
|
+
) -> ShortTermMemory: ...
|
|
1924
|
+
|
|
1925
|
+
def create_message_memory(
|
|
1926
|
+
self, *, key: str, value: str | dict | BaseModel, message_id: str | None = None
|
|
1927
|
+
) -> ShortTermMemory:
|
|
1928
|
+
"""Creates a short-term memory for the current assistant message synchronously.
|
|
1929
|
+
|
|
1930
|
+
Args:
|
|
1931
|
+
key (str): The memory key
|
|
1932
|
+
value (str | dict | BaseModel): The memory value
|
|
1933
|
+
|
|
1934
|
+
Returns:
|
|
1935
|
+
ShortTermMemory: The created short-term memory
|
|
1936
|
+
|
|
1937
|
+
Raises:
|
|
1938
|
+
Exception: If the creation fails
|
|
1939
|
+
"""
|
|
1940
|
+
return self.create_message_memory_by_id(
|
|
1941
|
+
key=key,
|
|
1942
|
+
value=value,
|
|
1943
|
+
message_id=message_id or self._assistant_message_id,
|
|
1944
|
+
)
|
|
1945
|
+
|
|
1946
|
+
@overload
|
|
1947
|
+
async def create_message_memory_async(
|
|
1948
|
+
self,
|
|
1949
|
+
*,
|
|
1950
|
+
key: str,
|
|
1951
|
+
value: str | dict | BaseModel,
|
|
1952
|
+
) -> ShortTermMemory: ...
|
|
1953
|
+
|
|
1954
|
+
@overload
|
|
1955
|
+
async def create_message_memory_async(
|
|
1956
|
+
self, *, key: str, value: str | dict | BaseModel, message_id: str
|
|
1957
|
+
) -> ShortTermMemory: ...
|
|
1958
|
+
|
|
1959
|
+
async def create_message_memory_async(
|
|
1960
|
+
self, *, key: str, value: str | dict | BaseModel, message_id: str | None = None
|
|
1961
|
+
) -> ShortTermMemory:
|
|
1962
|
+
"""Creates a short-term memory for the current assistant message asynchronously.
|
|
1963
|
+
|
|
1964
|
+
Args:
|
|
1965
|
+
key (str): The memory key
|
|
1966
|
+
value (str | dict | BaseModel): The memory value
|
|
1967
|
+
|
|
1968
|
+
Returns:
|
|
1969
|
+
ShortTermMemory: The created short-term memory
|
|
1970
|
+
|
|
1971
|
+
Raises:
|
|
1972
|
+
Exception: If the creation fails
|
|
1973
|
+
"""
|
|
1974
|
+
return await self.create_message_memory_by_id_async(
|
|
1975
|
+
message_id=message_id or self._assistant_message_id,
|
|
1976
|
+
key=key,
|
|
1977
|
+
value=value,
|
|
1978
|
+
)
|
|
1979
|
+
|
|
1980
|
+
def find_chat_memory(self, *, key: str) -> ShortTermMemory:
|
|
1981
|
+
"""Finds the latest short-term memory for the current chat synchronously.
|
|
1982
|
+
|
|
1983
|
+
Args:
|
|
1984
|
+
key (str): The memory key
|
|
1985
|
+
|
|
1986
|
+
Returns:
|
|
1987
|
+
ShortTermMemory: The latest short-term memory
|
|
1988
|
+
|
|
1989
|
+
Raises:
|
|
1990
|
+
Exception: If the retrieval fails
|
|
1991
|
+
"""
|
|
1992
|
+
return self.find_chat_memory_by_id(
|
|
1993
|
+
chat_id=self._chat_id,
|
|
1994
|
+
key=key,
|
|
1995
|
+
)
|
|
1996
|
+
|
|
1997
|
+
async def find_chat_memory_async(self, *, key: str) -> ShortTermMemory:
|
|
1998
|
+
"""Finds the latest short-term memory for the current chat asynchronously.
|
|
1999
|
+
|
|
2000
|
+
Args:
|
|
2001
|
+
key (str): The memory key
|
|
2002
|
+
|
|
2003
|
+
Returns:
|
|
2004
|
+
ShortTermMemory: The latest short-term memory
|
|
2005
|
+
|
|
2006
|
+
Raises:
|
|
2007
|
+
Exception: If the retrieval fails
|
|
2008
|
+
"""
|
|
2009
|
+
return await self.find_chat_memory_by_id_async(
|
|
2010
|
+
chat_id=self._chat_id,
|
|
2011
|
+
key=key,
|
|
2012
|
+
)
|
|
2013
|
+
|
|
2014
|
+
@overload
|
|
2015
|
+
def find_message_memory(self, *, key: str) -> ShortTermMemory: ...
|
|
2016
|
+
|
|
2017
|
+
@overload
|
|
2018
|
+
def find_message_memory(self, *, key: str, message_id: str) -> ShortTermMemory: ...
|
|
2019
|
+
|
|
2020
|
+
def find_message_memory(
|
|
2021
|
+
self, *, key: str, message_id: str | None = None
|
|
2022
|
+
) -> ShortTermMemory:
|
|
2023
|
+
"""Finds the latest short-term memory for the current assistant message synchronously.
|
|
2024
|
+
|
|
2025
|
+
Args:
|
|
2026
|
+
key (str): The memory key
|
|
2027
|
+
|
|
2028
|
+
Returns:
|
|
2029
|
+
ShortTermMemory: The latest short-term memory
|
|
2030
|
+
|
|
2031
|
+
Raises:
|
|
2032
|
+
Exception: If the retrieval fails
|
|
2033
|
+
"""
|
|
2034
|
+
return self.find_message_memory_by_id(
|
|
2035
|
+
message_id=message_id or self._assistant_message_id,
|
|
2036
|
+
key=key,
|
|
2037
|
+
)
|
|
2038
|
+
|
|
2039
|
+
@overload
|
|
2040
|
+
async def find_message_memory_async(self, *, key: str) -> ShortTermMemory: ...
|
|
2041
|
+
|
|
2042
|
+
@overload
|
|
2043
|
+
async def find_message_memory_async(
|
|
2044
|
+
self, *, key: str, message_id: str
|
|
2045
|
+
) -> ShortTermMemory: ...
|
|
2046
|
+
|
|
2047
|
+
async def find_message_memory_async(
|
|
2048
|
+
self, *, key: str, message_id: str | None = None
|
|
2049
|
+
) -> ShortTermMemory:
|
|
2050
|
+
"""Finds the latest short-term memory for the current assistant message asynchronously.
|
|
2051
|
+
|
|
2052
|
+
Args:
|
|
2053
|
+
key (str): The memory key
|
|
2054
|
+
|
|
2055
|
+
Returns:
|
|
2056
|
+
ShortTermMemory: The latest short-term memory
|
|
2057
|
+
|
|
2058
|
+
Raises:
|
|
2059
|
+
Exception: If the retrieval fails
|
|
2060
|
+
"""
|
|
2061
|
+
return await self.find_message_memory_by_id_async(
|
|
2062
|
+
message_id=message_id or self._assistant_message_id,
|
|
2063
|
+
key=key,
|
|
2064
|
+
)
|
|
@@ -2,6 +2,7 @@ import json
|
|
|
2
2
|
import logging
|
|
3
3
|
|
|
4
4
|
import unique_sdk
|
|
5
|
+
from typing_extensions import Any
|
|
5
6
|
|
|
6
7
|
from unique_toolkit.short_term_memory.constants import DOMAIN_NAME
|
|
7
8
|
from unique_toolkit.short_term_memory.schemas import ShortTermMemory
|
|
@@ -41,7 +42,7 @@ async def find_latest_memory_async(
|
|
|
41
42
|
chatId=chat_id,
|
|
42
43
|
messageId=message_id,
|
|
43
44
|
)
|
|
44
|
-
return ShortTermMemory(
|
|
45
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
45
46
|
except Exception as e:
|
|
46
47
|
logger.error(f"Error finding latest short term memory: {e}")
|
|
47
48
|
raise e
|
|
@@ -79,7 +80,7 @@ def find_latest_memory(
|
|
|
79
80
|
chatId=chat_id,
|
|
80
81
|
messageId=message_id,
|
|
81
82
|
)
|
|
82
|
-
return ShortTermMemory(
|
|
83
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
83
84
|
except Exception as e:
|
|
84
85
|
logger.error(f"Error finding latest short term memory: {e}")
|
|
85
86
|
raise e
|
|
@@ -124,13 +125,14 @@ async def create_memory_async(
|
|
|
124
125
|
messageId=message_id,
|
|
125
126
|
data=value,
|
|
126
127
|
)
|
|
127
|
-
return ShortTermMemory(
|
|
128
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
128
129
|
except Exception as e:
|
|
129
130
|
logger.error(f"Error creating short term memory: {e}")
|
|
130
131
|
raise e
|
|
131
132
|
|
|
132
133
|
|
|
133
134
|
def create_memory(
|
|
135
|
+
*,
|
|
134
136
|
user_id: str,
|
|
135
137
|
company_id: str,
|
|
136
138
|
key: str,
|
|
@@ -169,7 +171,127 @@ def create_memory(
|
|
|
169
171
|
messageId=message_id,
|
|
170
172
|
data=value,
|
|
171
173
|
)
|
|
172
|
-
return ShortTermMemory(
|
|
174
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
173
175
|
except Exception as e:
|
|
174
176
|
logger.error(f"Error creating short term memory: {e}")
|
|
175
177
|
raise e
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def find_last_chat_memory(
|
|
181
|
+
*,
|
|
182
|
+
user_id: str,
|
|
183
|
+
company_id: str,
|
|
184
|
+
key: str,
|
|
185
|
+
chat_id: str,
|
|
186
|
+
) -> ShortTermMemory:
|
|
187
|
+
"""
|
|
188
|
+
Find the last chat short term memory.
|
|
189
|
+
"""
|
|
190
|
+
stm = unique_sdk.ShortTermMemory.find_latest(
|
|
191
|
+
user_id=user_id,
|
|
192
|
+
company_id=company_id,
|
|
193
|
+
memoryName=key,
|
|
194
|
+
chatId=chat_id,
|
|
195
|
+
)
|
|
196
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
async def find_last_chat_memory_async(
|
|
200
|
+
*,
|
|
201
|
+
user_id: str,
|
|
202
|
+
company_id: str,
|
|
203
|
+
key: str,
|
|
204
|
+
chat_id: str,
|
|
205
|
+
) -> ShortTermMemory:
|
|
206
|
+
"""
|
|
207
|
+
Find the last chat short term memory.
|
|
208
|
+
"""
|
|
209
|
+
stm = await unique_sdk.ShortTermMemory.find_latest_async(
|
|
210
|
+
user_id=user_id,
|
|
211
|
+
company_id=company_id,
|
|
212
|
+
memoryName=key,
|
|
213
|
+
chatId=chat_id,
|
|
214
|
+
)
|
|
215
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def find_last_message_memory(
|
|
219
|
+
*,
|
|
220
|
+
user_id: str,
|
|
221
|
+
company_id: str,
|
|
222
|
+
key: str,
|
|
223
|
+
message_id: str,
|
|
224
|
+
) -> ShortTermMemory:
|
|
225
|
+
"""
|
|
226
|
+
Find the last message short term memory.
|
|
227
|
+
"""
|
|
228
|
+
stm = unique_sdk.ShortTermMemory.find_latest(
|
|
229
|
+
user_id=user_id,
|
|
230
|
+
company_id=company_id,
|
|
231
|
+
chatId=None,
|
|
232
|
+
memoryName=key,
|
|
233
|
+
messageId=message_id,
|
|
234
|
+
)
|
|
235
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
async def find_last_message_memory_async(
|
|
239
|
+
*,
|
|
240
|
+
user_id: str,
|
|
241
|
+
company_id: str,
|
|
242
|
+
key: str,
|
|
243
|
+
message_id: str,
|
|
244
|
+
) -> ShortTermMemory:
|
|
245
|
+
"""
|
|
246
|
+
Find the last message short term memory.
|
|
247
|
+
"""
|
|
248
|
+
stm = await unique_sdk.ShortTermMemory.find_latest_async(
|
|
249
|
+
user_id=user_id,
|
|
250
|
+
company_id=company_id,
|
|
251
|
+
chatId=None,
|
|
252
|
+
memoryName=key,
|
|
253
|
+
messageId=message_id,
|
|
254
|
+
)
|
|
255
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def create_chat_memory(
|
|
259
|
+
*,
|
|
260
|
+
user_id: str,
|
|
261
|
+
company_id: str,
|
|
262
|
+
key: str,
|
|
263
|
+
value: dict[str, Any] | str,
|
|
264
|
+
chat_id: str,
|
|
265
|
+
) -> ShortTermMemory:
|
|
266
|
+
"""
|
|
267
|
+
Create a chat short term memory.
|
|
268
|
+
"""
|
|
269
|
+
stm = unique_sdk.ShortTermMemory.create(
|
|
270
|
+
user_id=user_id,
|
|
271
|
+
company_id=company_id,
|
|
272
|
+
memoryName=key,
|
|
273
|
+
chatId=chat_id,
|
|
274
|
+
data=json.dumps(value) if isinstance(value, dict) else value,
|
|
275
|
+
)
|
|
276
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
async def create_chat_memory_async(
|
|
280
|
+
*,
|
|
281
|
+
user_id: str,
|
|
282
|
+
company_id: str,
|
|
283
|
+
key: str,
|
|
284
|
+
value: dict[str, Any] | str,
|
|
285
|
+
chat_id: str,
|
|
286
|
+
) -> ShortTermMemory:
|
|
287
|
+
"""
|
|
288
|
+
Create a chat short term memory.
|
|
289
|
+
"""
|
|
290
|
+
stm = await unique_sdk.ShortTermMemory.create_async(
|
|
291
|
+
user_id=user_id,
|
|
292
|
+
company_id=company_id,
|
|
293
|
+
memoryName=key,
|
|
294
|
+
chatId=chat_id,
|
|
295
|
+
data=json.dumps(value) if isinstance(value, dict) else value,
|
|
296
|
+
)
|
|
297
|
+
return ShortTermMemory.model_validate(stm, by_alias=True, by_name=True)
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import json
|
|
2
|
+
from typing import Any
|
|
2
3
|
|
|
3
4
|
from humps import camelize
|
|
4
|
-
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
|
5
6
|
|
|
6
7
|
model_config = ConfigDict(
|
|
7
8
|
alias_generator=camelize,
|
|
@@ -17,27 +18,31 @@ class ShortTermMemory(BaseModel):
|
|
|
17
18
|
key: str = Field(alias="object")
|
|
18
19
|
chat_id: str | None
|
|
19
20
|
message_id: str | None
|
|
20
|
-
data: str | dict | int | float | bool | list | None
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
21
|
+
data: str | dict | int | float | bool | list | None = Field(deprecated=True)
|
|
22
|
+
value: str | dict[str, Any] = Field(default_factory=dict)
|
|
23
|
+
|
|
24
|
+
@model_validator(mode="after")
|
|
25
|
+
def _data_to_value(self) -> "ShortTermMemory":
|
|
26
|
+
if isinstance(self.data, dict):
|
|
27
|
+
self.value = self.data
|
|
28
|
+
elif isinstance(self.data, str):
|
|
29
|
+
try:
|
|
30
|
+
self.value = json.loads(self.data)
|
|
31
|
+
except json.JSONDecodeError:
|
|
32
|
+
self.value = self.data
|
|
33
|
+
elif self.data is None:
|
|
34
|
+
self.value = ""
|
|
35
|
+
else:
|
|
36
|
+
self.value = str(self.data)
|
|
37
|
+
return self
|
|
38
|
+
|
|
39
|
+
@model_validator(mode="after")
|
|
40
|
+
def validate_message_id_and_chat_id(self):
|
|
41
|
+
if (self.message_id is None and self.chat_id is None) or (
|
|
42
|
+
self.message_id is not None and self.chat_id is not None
|
|
43
|
+
):
|
|
44
|
+
raise ValueError("Either message_id or chat_id must be provided")
|
|
45
|
+
return self
|
|
41
46
|
|
|
42
47
|
@field_validator("data", mode="before")
|
|
43
48
|
def validate_data(cls, v):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: unique_toolkit
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.39.1
|
|
4
4
|
Summary:
|
|
5
5
|
License: Proprietary
|
|
6
6
|
Author: Cedric Klinkert
|
|
@@ -41,9 +41,25 @@ The Toolkit is structured along the following domains:
|
|
|
41
41
|
- `unique_toolkit.language_model`
|
|
42
42
|
- `unique_toolkit.short_term_memory`
|
|
43
43
|
|
|
44
|
-
Each domain comprises a set of
|
|
44
|
+
Each domain comprises a set of schemas (in `schemas.py`) are used in functions (in `functions.py`) which encapsulates the basic functionalities to interact with the plattform.
|
|
45
|
+
The above domains represent the internal structure of the Unique platform.
|
|
45
46
|
|
|
46
|
-
|
|
47
|
+
For the `developers` we expose interfaces via `services` classes that correspond directly to an frontend or an entity the `user` interacts with.
|
|
48
|
+
|
|
49
|
+
The following services are currently available:
|
|
50
|
+
|
|
51
|
+
| Service | Responsability |
|
|
52
|
+
|--|--|
|
|
53
|
+
| ChatService | All interactions with the chat interface |
|
|
54
|
+
| KnowledgeBaseService | All interaction with the knowledgebase |
|
|
55
|
+
|
|
56
|
+
The services can be directly import as
|
|
57
|
+
|
|
58
|
+
```
|
|
59
|
+
from unique_toolkit import ChatService, KnowledgeBaseService
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
In addition, the `unique_toolkit.app` module provides functions to initialize apps and dev utilities to interact with the Unique platform.
|
|
47
63
|
|
|
48
64
|
## Changelog
|
|
49
65
|
|
|
@@ -65,7 +81,6 @@ The `unique_toolkit.app` module encompasses functions for initializing and secur
|
|
|
65
81
|
The `unique_toolkit.chat` module encompasses all chat related functionality.
|
|
66
82
|
|
|
67
83
|
- `functions.py` comprises the functions to manage and load the chat history and interact with the chat ui, e.g., creating a new assistant message.
|
|
68
|
-
- `service.py` comprises the ChatService and provides an interface to manage and load the chat history and interact with the chat ui, e.g., creating a new assistant message and stream complete.
|
|
69
84
|
- `schemas.py` comprises all relevant schemas, e.g., ChatMessage, used in the ChatService.
|
|
70
85
|
- `utils.py` comprises utility functions to use and convert ChatMessage objects in assistants, e.g., convert_chat_history_to_injectable_string converts the chat history to a string that can be injected into a prompt.
|
|
71
86
|
|
|
@@ -74,11 +89,10 @@ The `unique_toolkit.chat` module encompasses all chat related functionality.
|
|
|
74
89
|
The `unique_toolkit.content` module encompasses all content related functionality. Content can be any type of textual data that is stored in the Knowledgebase on the Unique platform. During the ingestion of the content, the content is parsed, split in chunks, indexed, and stored in the database.
|
|
75
90
|
|
|
76
91
|
- `functions.py` comprises the functions to manage and load the chat history and interact with the chat ui, e.g., creating a new assistant message.
|
|
77
|
-
- `service.py` comprises the ContentService and provides an interface to interact with the content, e.g., search content, search content chunks, upload and download content.
|
|
78
92
|
- `schemas.py` comprises all relevant schemas, e.g., Content and ContentChunk, used in the ContentService.
|
|
79
93
|
- `utils.py` comprise utility functions to manipulate Content and ContentChunk objects, e.g., sort_content_chunks and merge_content_chunks.
|
|
80
94
|
|
|
81
|
-
## Embedding
|
|
95
|
+
## Embedding (To be Deprecated)
|
|
82
96
|
|
|
83
97
|
The `unique_toolkit.embedding` module encompasses all embedding related functionality. Embeddings are used to represent textual data in a high-dimensional space. The embeddings can be used to calculate the similarity between two texts, for instance.
|
|
84
98
|
|
|
@@ -86,34 +100,22 @@ The `unique_toolkit.embedding` module encompasses all embedding related function
|
|
|
86
100
|
- `service.py` encompasses the EmbeddingService and provides an interface to interact with the embeddings, e.g., embed text and calculate the similarity between two texts.
|
|
87
101
|
- `schemas.py` comprises all relevant schemas, e.g., Embeddings, used in the EmbeddingService.
|
|
88
102
|
|
|
89
|
-
## Language Model
|
|
103
|
+
## Language Model
|
|
90
104
|
|
|
91
105
|
The `unique_toolkit.language_model` module encompasses all language model related functionality and information on the different language models deployed through the
|
|
92
106
|
Unique platform.
|
|
93
107
|
|
|
94
108
|
- `infos.py` comprises the information on all language models deployed through the Unique platform. We recommend to use the LanguageModel class, initialized with the LanguageModelName, e.g., LanguageModel(LanguageModelName.AZURE_GPT_4o_2024_1120) to get the information on the specific language model like the name, version, token limits or retirement date.
|
|
95
109
|
- `functions.py` comprises the functions to complete and stream complete to chat.
|
|
96
|
-
- `service.py` comprises the LanguageModelService and provides an interface to interact with the language models, e.g., complete.
|
|
97
110
|
- `schemas.py` comprises all relevant schemas, e.g., LanguageModelResponse, used in the LanguageModelService.
|
|
98
111
|
- `utils.py` comprises utility functions to parse the output of the language model, e.g., convert_string_to_json finds and parses the last json object in a string.
|
|
99
112
|
|
|
100
|
-
## Short Term Memory
|
|
113
|
+
## Short Term Memory
|
|
101
114
|
|
|
102
115
|
The `unique_toolkit.short_term_memory` module encompasses all short term memory related functionality.
|
|
103
116
|
|
|
104
117
|
- `functions.py` comprises the functions to manage and load the chat history and interact with the chat ui, e.g., creating a new assistant message.
|
|
105
|
-
- `service.py` comprises the ShortTermMemoryService and provides an interface to interact with the short term memory, e.g., create memory.
|
|
106
118
|
- `schemas.py` comprises all relevant schemas, e.g., ShortTermMemory, used in the ShortTermMemoryService.
|
|
107
|
-
|
|
108
|
-
# Development instructions
|
|
109
|
-
|
|
110
|
-
1. Install poetry on your system (through `brew` or `pipx`).
|
|
111
|
-
|
|
112
|
-
2. Install `pyenv` and install python 3.11. `pyenv` is recommended as otherwise poetry uses the python version used to install itself and not the user preferred python version.
|
|
113
|
-
|
|
114
|
-
3. If you then run `python --version` in your terminal, you should be able to see python version as specified in `.python-version`.
|
|
115
|
-
|
|
116
|
-
4. Then finally run `poetry install` to install the package and all dependencies.
|
|
117
119
|
# Changelog
|
|
118
120
|
|
|
119
121
|
All notable changes to this project will be documented in this file.
|
|
@@ -121,6 +123,12 @@ All notable changes to this project will be documented in this file.
|
|
|
121
123
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
122
124
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
123
125
|
|
|
126
|
+
## [1.39.1] - 2025-12-17
|
|
127
|
+
- Add GPT-5.2, GPT-5.2_CHAT to supported models list
|
|
128
|
+
|
|
129
|
+
## [1.39.0] - 2025-12-17
|
|
130
|
+
- Adding simpler shortterm message abilities to chat service
|
|
131
|
+
|
|
124
132
|
## [1.38.4] - 2025-12-17
|
|
125
133
|
- Improving handling of tool calls with Qwen models
|
|
126
134
|
|
|
@@ -144,7 +144,7 @@ unique_toolkit/app/init_logging.py,sha256=Sh26SRxOj8i8dzobKhYha2lLrkrMTHfB1V4jR3
|
|
|
144
144
|
unique_toolkit/app/init_sdk.py,sha256=5_oDoETr6akwYyBCb0ivTdMNu3SVgPSkrXcDS6ELyY8,2269
|
|
145
145
|
unique_toolkit/app/performance/async_tasks.py,sha256=H0l3OAcosLwNHZ8d2pd-Di4wHIXfclEvagi5kfqLFPA,1941
|
|
146
146
|
unique_toolkit/app/performance/async_wrapper.py,sha256=yVVcRDkcdyfjsxro-N29SBvi-7773wnfDplef6-y8xw,1077
|
|
147
|
-
unique_toolkit/app/schemas.py,sha256=
|
|
147
|
+
unique_toolkit/app/schemas.py,sha256=17Olyqc58LjFaZ6kYtPThWsGmRM0qGzDwRTekDNNw-E,10825
|
|
148
148
|
unique_toolkit/app/unique_settings.py,sha256=NTfa3a8wWzBDx4_4Irqyhy4mpXyPU6Munqs41ozPFnE,12366
|
|
149
149
|
unique_toolkit/app/verification.py,sha256=GxFFwcJMy25fCA_Xe89wKW7bgqOu8PAs5y8QpHF0GSc,3861
|
|
150
150
|
unique_toolkit/app/webhook.py,sha256=k7DP1UTR3p7D4qzuKPKVmGMAkDVHfALrnMIzTZqj_OI,2320
|
|
@@ -194,7 +194,7 @@ unique_toolkit/language_model/builder.py,sha256=4OKfwJfj3TrgO1ezc_ewIue6W7BCQ2ZY
|
|
|
194
194
|
unique_toolkit/language_model/constants.py,sha256=B-topqW0r83dkC_25DeQfnPk3n53qzIHUCBS7YJ0-1U,119
|
|
195
195
|
unique_toolkit/language_model/default_language_model.py,sha256=-_DBsJhLCsFdaU4ynAkyW0jYIl2lhrPybZm1K-GgVJs,125
|
|
196
196
|
unique_toolkit/language_model/functions.py,sha256=PTBm2BBkuqISVHoyUqMIGHGXT-RMSAfz0F_Ylo2esQ8,18246
|
|
197
|
-
unique_toolkit/language_model/infos.py,sha256=
|
|
197
|
+
unique_toolkit/language_model/infos.py,sha256=qrGe4ZzfqRBpXHA67Tl8mEwO1LVfw_aEwAc2rnTjYu8,85791
|
|
198
198
|
unique_toolkit/language_model/prompt.py,sha256=JSawaLjQg3VR-E2fK8engFyJnNdk21zaO8pPIodzN4Q,3991
|
|
199
199
|
unique_toolkit/language_model/reference.py,sha256=nkX2VFz-IrUz8yqyc3G5jUMNwrNpxITBrMEKkbqqYoI,8583
|
|
200
200
|
unique_toolkit/language_model/schemas.py,sha256=ATiHjhfGxoubS332XuhL9PKSoFewcWvPTUVBaNGWlJo,23994
|
|
@@ -202,17 +202,17 @@ unique_toolkit/language_model/service.py,sha256=fI2S5JLawJRRkKg086Ysz2Of4AOBHPN-
|
|
|
202
202
|
unique_toolkit/language_model/utils.py,sha256=bPQ4l6_YO71w-zaIPanUUmtbXC1_hCvLK0tAFc3VCRc,1902
|
|
203
203
|
unique_toolkit/protocols/support.py,sha256=ZEnbQL5w2-T_1AeM8OHycZJ3qbdfVI1nXe0nL9esQEw,5544
|
|
204
204
|
unique_toolkit/services/__init__.py,sha256=90-IT5FjMcnlqxjp5kme9Fqgp_on46rggctIqHMdqsw,195
|
|
205
|
-
unique_toolkit/services/chat_service.py,sha256=
|
|
205
|
+
unique_toolkit/services/chat_service.py,sha256=EdeHseyBXBtXWx2gK5jXoGWBYjG6uyoLusQpGH8I6x0,73065
|
|
206
206
|
unique_toolkit/services/knowledge_base.py,sha256=uc89GL_NZXeFkJKkdHSSh2y1Wx0tmgasWk6uyGi4G_M,36210
|
|
207
207
|
unique_toolkit/short_term_memory/__init__.py,sha256=2mI3AUrffgH7Yt-xS57EGqnHf7jnn6xquoKEhJqk3Wg,185
|
|
208
208
|
unique_toolkit/short_term_memory/constants.py,sha256=698CL6-wjup2MvU19RxSmQk3gX7aqW_OOpZB7sbz_Xg,34
|
|
209
|
-
unique_toolkit/short_term_memory/functions.py,sha256
|
|
210
|
-
unique_toolkit/short_term_memory/schemas.py,sha256=
|
|
209
|
+
unique_toolkit/short_term_memory/functions.py,sha256=-3xEIAnHL4x5cuuWS_yUNB3f2F4HGkTT6nyMjbgtMq4,7555
|
|
210
|
+
unique_toolkit/short_term_memory/schemas.py,sha256=rS8Vvly-FZMiFTn_eGcSQJq_CP5AV-MDNDEmHSReCeI,1637
|
|
211
211
|
unique_toolkit/short_term_memory/service.py,sha256=5PeVBu1ZCAfyDb2HLVvlmqSbyzBBuE9sI2o9Aajqjxg,8884
|
|
212
212
|
unique_toolkit/smart_rules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
213
213
|
unique_toolkit/smart_rules/compile.py,sha256=Ozhh70qCn2yOzRWr9d8WmJeTo7AQurwd3tStgBMPFLA,1246
|
|
214
214
|
unique_toolkit/test_utilities/events.py,sha256=_mwV2bs5iLjxS1ynDCjaIq-gjjKhXYCK-iy3dRfvO3g,6410
|
|
215
|
-
unique_toolkit-1.
|
|
216
|
-
unique_toolkit-1.
|
|
217
|
-
unique_toolkit-1.
|
|
218
|
-
unique_toolkit-1.
|
|
215
|
+
unique_toolkit-1.39.1.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
|
|
216
|
+
unique_toolkit-1.39.1.dist-info/METADATA,sha256=BBk-PZwJU2Ir0AmPhW91Wpv9a33H0vMVL1ZY8jaVDyo,45948
|
|
217
|
+
unique_toolkit-1.39.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
218
|
+
unique_toolkit-1.39.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|