unique_toolkit 0.6.5__py3-none-any.whl → 0.6.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -102,7 +102,7 @@ class ChatEventPayload(BaseModel):
102
102
  metadata_filter: Optional[dict[str, Any]] = None
103
103
 
104
104
 
105
- @deprecated("""UUse `ChatEventPayload` instead.
105
+ @deprecated("""Use `ChatEventPayload` instead.
106
106
  This class will be removed in the next major version.""")
107
107
  class EventPayload(ChatEventPayload):
108
108
  user_message: EventUserMessage
@@ -1,10 +1,15 @@
1
+ import json
1
2
  import logging
3
+ import os
2
4
  from typing import Callable, TypeVar
3
5
 
4
6
  import unique_sdk
7
+ from pydantic import ValidationError
5
8
 
6
9
  from unique_toolkit.app.schemas import Event
7
10
 
11
+ logger = logging.getLogger(f"toolkit.{__name__}")
12
+
8
13
 
9
14
  class WebhookVerificationError(Exception):
10
15
  """Custom exception for webhook verification errors."""
@@ -19,7 +24,7 @@ def verify_signature_and_construct_event(
19
24
  headers: dict[str, str],
20
25
  payload: bytes,
21
26
  endpoint_secret: str,
22
- logger: logging.Logger = logging.getLogger(__name__),
27
+ logger: logging.Logger = logger,
23
28
  event_constructor: Callable[..., T] = Event,
24
29
  ) -> T:
25
30
  """
@@ -57,3 +62,51 @@ def verify_signature_and_construct_event(
57
62
  except unique_sdk.SignatureVerificationError as e:
58
63
  logger.error("⚠️ Webhook signature verification failed. " + str(e))
59
64
  raise WebhookVerificationError(f"Signature verification failed: {str(e)}")
65
+
66
+
67
+ def verify_request_and_construct_event(
68
+ assistant_name: str,
69
+ payload: bytes,
70
+ headers: dict[str, str],
71
+ event_constructor: Callable[..., Event] = Event,
72
+ ) -> tuple[str, int] | tuple[Event, int]:
73
+ """Check the payload, authenticate and genenrate the event if the payload is correct"""
74
+ logger.info(f"{assistant_name} - received request")
75
+
76
+ try:
77
+ payload_decoded = json.loads(payload)
78
+ except json.decoder.JSONDecodeError as e:
79
+ logger.error(f"Error decoding payload: {e}", exc_info=True)
80
+ return "Invalid payload", 400
81
+
82
+ endpoint_secret = os.environ.get("ENDPOINT_SECRET", None)
83
+ if endpoint_secret:
84
+ response = verify_signature_and_construct_event(
85
+ headers=headers, # type: ignore
86
+ payload=payload,
87
+ endpoint_secret=endpoint_secret,
88
+ logger=logger,
89
+ event_constructor=event_constructor,
90
+ )
91
+ if isinstance(response, tuple):
92
+ return response # Error response
93
+ event = response # This is an event since it is not a tuple
94
+ else:
95
+ try:
96
+ event = event_constructor(**payload_decoded)
97
+ except ValidationError as e:
98
+ # pydantic errors https://docs.pydantic.dev/2.10/errors/errors/
99
+ logger.error(f"Validation error with model: {e.json()}", exc_info=True)
100
+ raise ValidationError(e)
101
+ except ValueError as e:
102
+ logger.error(f"Error deserializing event: {e}", exc_info=True)
103
+ return "Invalid event", 400
104
+
105
+ if not event.payload.name == assistant_name:
106
+ logger.error(
107
+ f"{assistant_name}: Incorrect assistant: {event.payload.name}: Expected {assistant_name}"
108
+ )
109
+ return f"Not {assistant_name} event", 400
110
+
111
+ logger.info(f"{assistant_name} - received event")
112
+ return event, 200
@@ -3,6 +3,7 @@ from typing_extensions import Self
3
3
  from unique_toolkit.language_model import (
4
4
  LanguageModelAssistantMessage,
5
5
  LanguageModelMessage,
6
+ LanguageModelMessageRole,
6
7
  LanguageModelMessages,
7
8
  LanguageModelSystemMessage,
8
9
  LanguageModelToolMessage,
@@ -14,6 +15,11 @@ class MessagesBuilder:
14
15
  def __init__(self):
15
16
  self.messages: list[LanguageModelMessage] = []
16
17
 
18
+ def message_append(self, role: LanguageModelMessageRole, content: str):
19
+ message = LanguageModelMessage(role=role, content=content)
20
+ self.messages.append(message)
21
+ return self
22
+
17
23
  def system_message_append(self, content: str) -> Self:
18
24
  """Appends a system message to the messages list."""
19
25
  message = LanguageModelSystemMessage(content=content)
@@ -26,15 +32,18 @@ class MessagesBuilder:
26
32
  self.messages.append(message)
27
33
  return self # Return self to allow method chaining
28
34
 
29
- def image_message_append(self, content: str, images: list[str]) -> Self:
30
- message = LanguageModelUserMessage(
35
+ def image_message_append(
36
+ self, content: str, images: list[str], role=LanguageModelMessageRole.USER
37
+ ) -> Self:
38
+ message = LanguageModelMessage(
39
+ role=role,
31
40
  content=[
32
41
  {"type": "text", "text": content},
33
42
  *[
34
43
  {"type": "image_url", "imageUrl": {"url": image}}
35
44
  for image in images
36
45
  ],
37
- ]
46
+ ],
38
47
  )
39
48
  self.messages.append(message)
40
49
  return self
@@ -1,6 +1,6 @@
1
1
  from datetime import date
2
2
  from enum import StrEnum
3
- from typing import ClassVar, Optional
3
+ from typing import ClassVar, Optional, Self
4
4
 
5
5
  from pydantic import BaseModel
6
6
  from typing_extensions import deprecated
@@ -11,13 +11,14 @@ from unique_toolkit.language_model.schemas import LanguageModelTokenLimits
11
11
  class LanguageModelName(StrEnum):
12
12
  AZURE_GPT_35_TURBO_0125 = "AZURE_GPT_35_TURBO_0125"
13
13
  AZURE_GPT_4_0613 = "AZURE_GPT_4_0613"
14
- AZURE_GPT_4_TURBO_1106 = "AZURE_GPT_4_TURBO_1106"
15
- AZURE_GPT_4_VISION_PREVIEW = "AZURE_GPT_4_VISION_PREVIEW"
16
14
  AZURE_GPT_4_32K_0613 = "AZURE_GPT_4_32K_0613"
17
15
  AZURE_GPT_4_TURBO_2024_0409 = "AZURE_GPT_4_TURBO_2024_0409"
18
16
  AZURE_GPT_4o_2024_0513 = "AZURE_GPT_4o_2024_0513"
19
17
  AZURE_GPT_4o_2024_0806 = "AZURE_GPT_4o_2024_0806"
20
18
  AZURE_GPT_4o_MINI_2024_0718 = "AZURE_GPT_4o_MINI_2024_0718"
19
+ AZURE_GPT_o1_2024_1217 = "AZURE_GPT_o1_2024_1217"
20
+ AZURE_GPT_o1_MINI_2024_0912 = "AZURE_GPT_o1_MINI_2024_0912"
21
+ AZURE_GPT_o3_MINI_2025_0131 = "AZURE_GPT_o3_MINI_2025_0131"
21
22
 
22
23
 
23
24
  class EncoderName(StrEnum):
@@ -25,15 +26,13 @@ class EncoderName(StrEnum):
25
26
  CL100K_BASE = "cl100k_base"
26
27
 
27
28
 
28
- def get_encoder_name(model_name: LanguageModelName) -> Optional[EncoderName]:
29
+ def get_encoder_name(model_name: LanguageModelName) -> EncoderName:
29
30
  LMN = LanguageModelName
30
31
  match model_name:
31
32
  case LMN.AZURE_GPT_35_TURBO_0125:
32
33
  return EncoderName.CL100K_BASE
33
34
  case (
34
35
  LMN.AZURE_GPT_4_0613
35
- | LMN.AZURE_GPT_4_TURBO_1106
36
- | LMN.AZURE_GPT_4_VISION_PREVIEW
37
36
  | LMN.AZURE_GPT_4_32K_0613
38
37
  | LMN.AZURE_GPT_4_TURBO_2024_0409
39
38
  ):
@@ -45,8 +44,10 @@ def get_encoder_name(model_name: LanguageModelName) -> Optional[EncoderName]:
45
44
  ):
46
45
  return EncoderName.O200K_BASE
47
46
  case _:
48
- print(f"{model_name} is not supported. Please add encoder information.")
49
- return None
47
+ print(
48
+ f"{model_name} is not supported. Please add encoder information. Using default"
49
+ )
50
+ return EncoderName.CL100K_BASE
50
51
 
51
52
 
52
53
  class LanguageModelProvider(StrEnum):
@@ -54,13 +55,28 @@ class LanguageModelProvider(StrEnum):
54
55
  CUSTOM = "CUSTOM"
55
56
 
56
57
 
58
+ class ModelCapabilities(StrEnum):
59
+ FUNCTION_CALLING = "function_calling"
60
+ PARALLEL_FUNCTION_CALLING = "parallel_function_calling"
61
+ REPRODUCIBLE_OUTPUT = "reproducible_output"
62
+ STRUCTURED_OUTPUT = "structured_output"
63
+ VISION = "vision"
64
+ STREAMING = "streaming"
65
+ REASONING = "reasoning"
66
+
67
+
57
68
  class LanguageModelInfo(BaseModel):
58
69
  name: LanguageModelName | str
59
70
  version: str
60
71
  provider: LanguageModelProvider
61
72
 
62
- encoder_name: Optional[EncoderName] = None
63
- token_limits: Optional[LanguageModelTokenLimits] = None
73
+ encoder_name: EncoderName = EncoderName.CL100K_BASE
74
+
75
+ # TODO: Discuss if this is a sensible defaut
76
+ token_limits: LanguageModelTokenLimits = LanguageModelTokenLimits(
77
+ token_limit_input=7_000, token_limit_output=1_000
78
+ )
79
+ capabilities: list[ModelCapabilities] = [ModelCapabilities.STREAMING]
64
80
 
65
81
  info_cutoff_at: Optional[date] = None
66
82
  published_at: Optional[date] = None
@@ -70,14 +86,20 @@ class LanguageModelInfo(BaseModel):
70
86
  retirement_text: Optional[str] = None
71
87
 
72
88
  @classmethod
73
- def from_name(cls, model_name: LanguageModelName):
89
+ def from_name(cls, model_name: LanguageModelName) -> Self:
74
90
  match model_name:
75
91
  case LanguageModelName.AZURE_GPT_35_TURBO_0125:
76
92
  return cls(
77
93
  name=model_name,
78
94
  provider=LanguageModelProvider.AZURE,
95
+ capabilities=[
96
+ ModelCapabilities.STRUCTURED_OUTPUT,
97
+ ModelCapabilities.FUNCTION_CALLING,
98
+ ModelCapabilities.PARALLEL_FUNCTION_CALLING,
99
+ ModelCapabilities.REPRODUCIBLE_OUTPUT,
100
+ ],
79
101
  version="0125",
80
- encoder_name=get_encoder_name(model_name),
102
+ encoder_name=EncoderName.CL100K_BASE,
81
103
  token_limits=LanguageModelTokenLimits(
82
104
  token_limit_input=16385, token_limit_output=4096
83
105
  ),
@@ -90,43 +112,27 @@ class LanguageModelInfo(BaseModel):
90
112
  name=model_name,
91
113
  provider=LanguageModelProvider.AZURE,
92
114
  version="0613",
93
- encoder_name=get_encoder_name(model_name),
115
+ encoder_name=EncoderName.CL100K_BASE,
116
+ capabilities=[
117
+ ModelCapabilities.FUNCTION_CALLING,
118
+ ModelCapabilities.STREAMING,
119
+ ],
94
120
  token_limits=LanguageModelTokenLimits(token_limit=8192),
95
121
  info_cutoff_at=date(2021, 9, 1),
96
122
  published_at=date(2023, 6, 13),
97
123
  deprecated_at=date(2024, 10, 1),
98
124
  retirement_at=date(2025, 6, 6),
99
125
  )
100
- case LanguageModelName.AZURE_GPT_4_TURBO_1106:
101
- return cls(
102
- name=model_name,
103
- provider=LanguageModelProvider.AZURE,
104
- version="1106-preview",
105
- encoder_name=get_encoder_name(model_name),
106
- token_limits=LanguageModelTokenLimits(
107
- token_limit_input=128000, token_limit_output=4096
108
- ),
109
- info_cutoff_at=date(2023, 4, 1),
110
- published_at=date(2023, 11, 6),
111
- )
112
- case LanguageModelName.AZURE_GPT_4_VISION_PREVIEW:
113
- return cls(
114
- name=model_name,
115
- provider=LanguageModelProvider.AZURE,
116
- version="vision-preview",
117
- encoder_name=get_encoder_name(model_name),
118
- token_limits=LanguageModelTokenLimits(
119
- token_limit_input=128000, token_limit_output=4096
120
- ),
121
- info_cutoff_at=date(2023, 4, 1),
122
- published_at=date(2023, 11, 6),
123
- )
124
126
  case LanguageModelName.AZURE_GPT_4_32K_0613:
125
127
  return cls(
126
128
  name=model_name,
127
129
  provider=LanguageModelProvider.AZURE,
128
130
  version="1106-preview",
129
- encoder_name=get_encoder_name(model_name),
131
+ capabilities=[
132
+ ModelCapabilities.FUNCTION_CALLING,
133
+ ModelCapabilities.STREAMING,
134
+ ],
135
+ encoder_name=EncoderName.CL100K_BASE,
130
136
  token_limits=LanguageModelTokenLimits(token_limit=32768),
131
137
  info_cutoff_at=date(2021, 9, 1),
132
138
  published_at=date(2023, 6, 13),
@@ -136,7 +142,14 @@ class LanguageModelInfo(BaseModel):
136
142
  case LanguageModelName.AZURE_GPT_4_TURBO_2024_0409:
137
143
  return cls(
138
144
  name=model_name,
139
- encoder_name=get_encoder_name(model_name),
145
+ encoder_name=EncoderName.CL100K_BASE,
146
+ capabilities=[
147
+ ModelCapabilities.FUNCTION_CALLING,
148
+ ModelCapabilities.PARALLEL_FUNCTION_CALLING,
149
+ ModelCapabilities.STRUCTURED_OUTPUT,
150
+ ModelCapabilities.VISION,
151
+ ModelCapabilities.STREAMING,
152
+ ],
140
153
  provider=LanguageModelProvider.AZURE,
141
154
  version="turbo-2024-04-09",
142
155
  token_limits=LanguageModelTokenLimits(
@@ -148,11 +161,18 @@ class LanguageModelInfo(BaseModel):
148
161
  case LanguageModelName.AZURE_GPT_4o_2024_0513:
149
162
  return cls(
150
163
  name=model_name,
151
- encoder_name=get_encoder_name(model_name),
164
+ encoder_name=EncoderName.O200K_BASE,
165
+ capabilities=[
166
+ ModelCapabilities.STRUCTURED_OUTPUT,
167
+ ModelCapabilities.FUNCTION_CALLING,
168
+ ModelCapabilities.PARALLEL_FUNCTION_CALLING,
169
+ ModelCapabilities.STREAMING,
170
+ ModelCapabilities.VISION,
171
+ ],
152
172
  provider=LanguageModelProvider.AZURE,
153
173
  version="2024-05-13",
154
174
  token_limits=LanguageModelTokenLimits(
155
- token_limit_input=128000, token_limit_output=4096
175
+ token_limit_input=128_000, token_limit_output=4_096
156
176
  ),
157
177
  info_cutoff_at=date(2023, 10, 1),
158
178
  published_at=date(2024, 5, 13),
@@ -160,11 +180,18 @@ class LanguageModelInfo(BaseModel):
160
180
  case LanguageModelName.AZURE_GPT_4o_2024_0806:
161
181
  return cls(
162
182
  name=model_name,
163
- encoder_name=get_encoder_name(model_name),
183
+ encoder_name=EncoderName.O200K_BASE,
184
+ capabilities=[
185
+ ModelCapabilities.STRUCTURED_OUTPUT,
186
+ ModelCapabilities.FUNCTION_CALLING,
187
+ ModelCapabilities.PARALLEL_FUNCTION_CALLING,
188
+ ModelCapabilities.STREAMING,
189
+ ModelCapabilities.VISION,
190
+ ],
164
191
  provider=LanguageModelProvider.AZURE,
165
192
  version="2024-08-06",
166
193
  token_limits=LanguageModelTokenLimits(
167
- token_limit_input=128000, token_limit_output=16384
194
+ token_limit_input=128_000, token_limit_output=16_384
168
195
  ),
169
196
  info_cutoff_at=date(2023, 10, 1),
170
197
  published_at=date(2024, 8, 6),
@@ -172,15 +199,78 @@ class LanguageModelInfo(BaseModel):
172
199
  case LanguageModelName.AZURE_GPT_4o_MINI_2024_0718:
173
200
  return cls(
174
201
  name=model_name,
202
+ capabilities=[
203
+ ModelCapabilities.STRUCTURED_OUTPUT,
204
+ ModelCapabilities.FUNCTION_CALLING,
205
+ ModelCapabilities.PARALLEL_FUNCTION_CALLING,
206
+ ModelCapabilities.STREAMING,
207
+ ModelCapabilities.VISION,
208
+ ],
175
209
  provider=LanguageModelProvider.AZURE,
176
210
  version="2024-07-18",
177
- encoder_name=get_encoder_name(model_name),
211
+ encoder_name=EncoderName.O200K_BASE,
178
212
  token_limits=LanguageModelTokenLimits(
179
- token_limit_input=128000, token_limit_output=16384
213
+ token_limit_input=128_000, token_limit_output=16_384
180
214
  ),
181
215
  info_cutoff_at=date(2023, 10, 1),
182
216
  published_at=date(2024, 7, 18),
183
217
  )
218
+ case LanguageModelName.AZURE_GPT_o1_2024_1217:
219
+ return cls(
220
+ name=model_name,
221
+ capabilities=[
222
+ ModelCapabilities.STRUCTURED_OUTPUT,
223
+ ModelCapabilities.FUNCTION_CALLING,
224
+ ModelCapabilities.STREAMING,
225
+ ModelCapabilities.VISION,
226
+ ModelCapabilities.REASONING,
227
+ ],
228
+ provider=LanguageModelProvider.AZURE,
229
+ version="2024-12-17",
230
+ encoder_name=EncoderName.O200K_BASE,
231
+ token_limits=LanguageModelTokenLimits(
232
+ token_limit_input=200_000, token_limit_output=100_000
233
+ ),
234
+ info_cutoff_at=date(2023, 10, 1),
235
+ published_at=date(2024, 12, 17),
236
+ )
237
+ case LanguageModelName.AZURE_GPT_o1_MINI_2024_0912:
238
+ return cls(
239
+ name=model_name,
240
+ capabilities=[
241
+ ModelCapabilities.STRUCTURED_OUTPUT,
242
+ ModelCapabilities.FUNCTION_CALLING,
243
+ ModelCapabilities.STREAMING,
244
+ ModelCapabilities.VISION,
245
+ ModelCapabilities.REASONING,
246
+ ],
247
+ provider=LanguageModelProvider.AZURE,
248
+ version="2024-09-12",
249
+ encoder_name=EncoderName.O200K_BASE,
250
+ token_limits=LanguageModelTokenLimits(
251
+ token_limit_input=128_000, token_limit_output=65_536
252
+ ),
253
+ info_cutoff_at=date(2023, 10, 1),
254
+ published_at=date(2024, 9, 12),
255
+ )
256
+ case LanguageModelName.AZURE_GPT_o3_MINI_2025_0131:
257
+ return cls(
258
+ name=model_name,
259
+ capabilities=[
260
+ ModelCapabilities.STRUCTURED_OUTPUT,
261
+ ModelCapabilities.FUNCTION_CALLING,
262
+ ModelCapabilities.STREAMING,
263
+ ModelCapabilities.REASONING,
264
+ ],
265
+ provider=LanguageModelProvider.AZURE,
266
+ version="2025-01-31",
267
+ encoder_name=EncoderName.O200K_BASE,
268
+ token_limits=LanguageModelTokenLimits(
269
+ token_limit_input=200_000, token_limit_output=100_000
270
+ ),
271
+ info_cutoff_at=date(2023, 10, 1),
272
+ published_at=date(2025, 1, 31),
273
+ )
184
274
  case _:
185
275
  if isinstance(model_name, LanguageModelName):
186
276
  raise ValueError(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.6.5
3
+ Version: 0.6.8
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Martin Fadler
@@ -111,6 +111,17 @@ All notable changes to this project will be documented in this file.
111
111
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
112
112
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
113
113
 
114
+ ## [0.6.8] - 2025-03-11
115
+ - Add `verify_request_and_construct_event` to `verification.py`
116
+
117
+ ## [0.6.7] - 2025-03-10
118
+ - Extend language model message builder
119
+
120
+ ## [0.6.6] - 2025-03-10
121
+ - Add o1, o1-mini and o3-mini models
122
+ - Remove deprecated gpt4 models
123
+ - Make token_limits and encoder a required attribute of LanguageModelInfo
124
+
114
125
  ## [0.6.5] - 2025-03-04
115
126
  - Add `upload_content_from_bytes` to `ContentService`
116
127
  - Add `download_content_to_bytes` to `ContentService`
@@ -9,8 +9,8 @@ unique_toolkit/app/init_logging.py,sha256=Sh26SRxOj8i8dzobKhYha2lLrkrMTHfB1V4jR3
9
9
  unique_toolkit/app/init_sdk.py,sha256=Nv4Now4pMfM0AgRhbtatLpm_39rKxn0WmRLwmPhRl-8,1285
10
10
  unique_toolkit/app/performance/async_tasks.py,sha256=H0l3OAcosLwNHZ8d2pd-Di4wHIXfclEvagi5kfqLFPA,1941
11
11
  unique_toolkit/app/performance/async_wrapper.py,sha256=yVVcRDkcdyfjsxro-N29SBvi-7773wnfDplef6-y8xw,1077
12
- unique_toolkit/app/schemas.py,sha256=hPOh5xLNNWgWVIkdrj6ZHYaGz0cTV-5Kv7OQHOaUgV8,3201
13
- unique_toolkit/app/verification.py,sha256=mffa6wm0i4hJbwzofePrkaia46xumMzECwQ0T3eKAx0,1929
12
+ unique_toolkit/app/schemas.py,sha256=jnW3f_4ohyfM7sVfZlNuV8At19qUwPJGC8XHx4ktlEY,3200
13
+ unique_toolkit/app/verification.py,sha256=GxFFwcJMy25fCA_Xe89wKW7bgqOu8PAs5y8QpHF0GSc,3861
14
14
  unique_toolkit/chat/__init__.py,sha256=LRs2G-JTVuci4lbtHTkVUiNcZcSR6uqqfnAyo7af6nY,619
15
15
  unique_toolkit/chat/constants.py,sha256=05kq6zjqUVB2d6_P7s-90nbljpB3ryxwCI-CAz0r2O4,83
16
16
  unique_toolkit/chat/functions.py,sha256=J9Cmgkhj9bBxZja3ggkSp48af_LPU4Dfi9Sbc_WhhNY,27204
@@ -45,10 +45,10 @@ unique_toolkit/evaluators/hallucination/utils.py,sha256=4KTJH8low_fBzOcuVlcHB2FR
45
45
  unique_toolkit/evaluators/output_parser.py,sha256=eI72qkzK1dZyUvnfP2SOAQCGBj_-PwX5wy_aLPMsJMY,883
46
46
  unique_toolkit/evaluators/schemas.py,sha256=Jaue6Uhx75X1CyHKWj8sT3RE1JZXTqoLtfLt2xQNCX8,2507
47
47
  unique_toolkit/language_model/__init__.py,sha256=jWko_vQj48wjnpTtlkg8iNdef0SMI3FN2kGywXRTMzg,1880
48
- unique_toolkit/language_model/builder.py,sha256=qP1SlUnYJHLqT-fpXs4lgUixnekhx8IIfuoXnMHvRKE,2408
48
+ unique_toolkit/language_model/builder.py,sha256=_lUL2shMN1Je2LLYu2PiehOvfWtn5pg1yUuYyiw3f_c,2710
49
49
  unique_toolkit/language_model/constants.py,sha256=B-topqW0r83dkC_25DeQfnPk3n53qzIHUCBS7YJ0-1U,119
50
50
  unique_toolkit/language_model/functions.py,sha256=I5jHhHsKoq7GwEQyTrM8LXB2n_6dvMAk7UklenjuHSY,7945
51
- unique_toolkit/language_model/infos.py,sha256=-axWHj55mp6tZfX_3i-FSkfh8e9fwORXWMfi9xQ_UjA,12232
51
+ unique_toolkit/language_model/infos.py,sha256=DRkF0HzVemtSsSvdtrdsajNKaQ46Xla0ZXzFhi7xMtc,16338
52
52
  unique_toolkit/language_model/prompt.py,sha256=JSawaLjQg3VR-E2fK8engFyJnNdk21zaO8pPIodzN4Q,3991
53
53
  unique_toolkit/language_model/schemas.py,sha256=rrwzUgKANFOrdehCULW8Hh03uRW3tsE5dXpWqxmClfg,8618
54
54
  unique_toolkit/language_model/service.py,sha256=GupYD4uDZjy1TfVQW3jichmgQwiSgQCj350FtL4O0W4,5569
@@ -58,7 +58,7 @@ unique_toolkit/short_term_memory/constants.py,sha256=698CL6-wjup2MvU19RxSmQk3gX7
58
58
  unique_toolkit/short_term_memory/functions.py,sha256=3WiK-xatY5nh4Dr5zlDUye1k3E6kr41RiscwtTplw5k,4484
59
59
  unique_toolkit/short_term_memory/schemas.py,sha256=OhfcXyF6ACdwIXW45sKzjtZX_gkcJs8FEZXcgQTNenw,1406
60
60
  unique_toolkit/short_term_memory/service.py,sha256=gdsVzoNqTXmLoBR_-p_lJlZDBo8L7Cr5EKchTNVJg1Q,5233
61
- unique_toolkit-0.6.5.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
62
- unique_toolkit-0.6.5.dist-info/METADATA,sha256=QpySQwwkqfBL9Mm9g8urq7LIhQrBy4LF7ZTWQroXED4,19835
63
- unique_toolkit-0.6.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
64
- unique_toolkit-0.6.5.dist-info/RECORD,,
61
+ unique_toolkit-0.6.8.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
62
+ unique_toolkit-0.6.8.dist-info/METADATA,sha256=ur8J07QmoFPjKB1oMwjXXsWhOo9XG1g_gabI3q5dpW0,20157
63
+ unique_toolkit-0.6.8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
64
+ unique_toolkit-0.6.8.dist-info/RECORD,,