unique_toolkit 0.7.14__py3-none-any.whl → 0.7.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -109,31 +109,26 @@ class ChatEventPayload(BaseModel):
109
109
  @deprecated("""Use `ChatEventPayload` instead.
110
110
  This class will be removed in the next major version.""")
111
111
  class EventPayload(ChatEventPayload):
112
- user_message: EventUserMessage
113
- assistant_message: EventAssistantMessage
114
- additional_parameters: Optional[EventAdditionalParameters] = None
115
-
116
-
117
- @deprecated(
118
- """Use the more specific `ChatEvent` instead that has the same properties. \
119
- This class will be removed in the next major version."""
120
- )
121
- class Event(BaseModel):
122
- model_config = model_config
123
-
124
- id: str
125
- event: EventName
126
- user_id: str
127
- company_id: str
128
- payload: EventPayload
129
- created_at: Optional[int] = None
130
- version: Optional[str] = None
112
+ pass
113
+ # user_message: EventUserMessage
114
+ # assistant_message: EventAssistantMessage
115
+ # additional_parameters: Optional[EventAdditionalParameters] = None
131
116
 
132
117
 
133
118
  class ChatEvent(BaseEvent):
134
119
  model_config = model_config
135
120
 
136
- event: EventName
137
121
  payload: ChatEventPayload
138
122
  created_at: Optional[int] = None
139
123
  version: Optional[str] = None
124
+
125
+
126
+ @deprecated(
127
+ """Use the more specific `ChatEvent` instead that has the same properties. \
128
+ This class will be removed in the next major version."""
129
+ )
130
+ class Event(ChatEvent):
131
+ pass
132
+ # The below should only affect type hints
133
+ # event: EventName T
134
+ # payload: EventPayload
@@ -41,6 +41,7 @@ from unique_toolkit.language_model.infos import (
41
41
  )
42
42
  from unique_toolkit.language_model.schemas import (
43
43
  LanguageModelMessages,
44
+ LanguageModelResponse,
44
45
  LanguageModelStreamResponse,
45
46
  LanguageModelTool,
46
47
  )
@@ -81,7 +82,7 @@ class ChatService:
81
82
  @deprecated(
82
83
  "The event property is deprecated and will be removed in a future version."
83
84
  )
84
- def event(self) -> Event | ChatEvent | None:
85
+ def event(self) -> Event | ChatEvent:
85
86
  """
86
87
  Get the event object (deprecated).
87
88
 
@@ -279,14 +280,30 @@ class ChatService:
279
280
  Args:
280
281
  debug_info (dict): The new debug information.
281
282
  """
282
-
283
+ [
284
+ company_id,
285
+ user_id,
286
+ assistant_message_id,
287
+ user_message_id,
288
+ chat_id,
289
+ user_message_text,
290
+ ] = validate_required_values(
291
+ [
292
+ self._company_id,
293
+ self._user_id,
294
+ self._assistant_message_id,
295
+ self._user_message_id,
296
+ self._chat_id,
297
+ self._user_message_text,
298
+ ]
299
+ )
283
300
  return await modify_message_async(
284
- user_id=self._user_id,
285
- company_id=self._company_id,
286
- assistant_message_id=self._assistant_message_id,
287
- chat_id=self._chat_id,
288
- user_message_id=self._user_message_id,
289
- user_message_text=self._user_message_text,
301
+ user_id=user_id,
302
+ company_id=company_id,
303
+ assistant_message_id=assistant_message_id,
304
+ chat_id=chat_id,
305
+ user_message_id=user_message_id,
306
+ user_message_text=user_message_text,
290
307
  assistant=False,
291
308
  debug_info=debug_info,
292
309
  )
@@ -298,14 +315,31 @@ class ChatService:
298
315
  Args:
299
316
  debug_info (dict): The new debug information.
300
317
  """
318
+ [
319
+ company_id,
320
+ user_id,
321
+ assistant_message_id,
322
+ user_message_id,
323
+ chat_id,
324
+ user_message_text,
325
+ ] = validate_required_values(
326
+ [
327
+ self._company_id,
328
+ self._user_id,
329
+ self._assistant_message_id,
330
+ self._user_message_id,
331
+ self._chat_id,
332
+ self._user_message_text,
333
+ ]
334
+ )
301
335
 
302
336
  return modify_message(
303
- user_id=self._user_id,
304
- company_id=self._company_id,
305
- assistant_message_id=self._assistant_message_id,
306
- chat_id=self._chat_id,
307
- user_message_id=self._user_message_id,
308
- user_message_text=self._user_message_text,
337
+ user_id=user_id,
338
+ company_id=company_id,
339
+ assistant_message_id=assistant_message_id,
340
+ chat_id=chat_id,
341
+ user_message_id=user_message_id,
342
+ user_message_text=user_message_text,
309
343
  assistant=False,
310
344
  debug_info=debug_info,
311
345
  )
@@ -334,13 +368,31 @@ class ChatService:
334
368
  Raises:
335
369
  Exception: If the modification fails.
336
370
  """
371
+ [
372
+ company_id,
373
+ user_id,
374
+ assistant_message_id,
375
+ user_message_id,
376
+ chat_id,
377
+ user_message_text,
378
+ ] = validate_required_values(
379
+ [
380
+ self._company_id,
381
+ self._user_id,
382
+ self._assistant_message_id,
383
+ self._user_message_id,
384
+ self._chat_id,
385
+ self._user_message_text,
386
+ ]
387
+ )
388
+
337
389
  return modify_message(
338
- user_id=self._user_id,
339
- company_id=self._company_id,
340
- assistant_message_id=self._assistant_message_id,
341
- chat_id=self._chat_id,
342
- user_message_id=self._user_message_id,
343
- user_message_text=self._user_message_text,
390
+ user_id=user_id,
391
+ company_id=company_id,
392
+ assistant_message_id=assistant_message_id,
393
+ chat_id=chat_id,
394
+ user_message_id=user_message_id,
395
+ user_message_text=user_message_text,
344
396
  assistant=False,
345
397
  content=content,
346
398
  references=references,
@@ -373,13 +425,32 @@ class ChatService:
373
425
  Raises:
374
426
  Exception: If the modification fails.
375
427
  """
428
+
429
+ [
430
+ company_id,
431
+ user_id,
432
+ assistant_message_id,
433
+ user_message_id,
434
+ chat_id,
435
+ user_message_text,
436
+ ] = validate_required_values(
437
+ [
438
+ self._company_id,
439
+ self._user_id,
440
+ self._assistant_message_id,
441
+ self._user_message_id,
442
+ self._chat_id,
443
+ self._user_message_text,
444
+ ]
445
+ )
446
+
376
447
  return await modify_message_async(
377
- user_id=self._user_id,
378
- company_id=self._company_id,
379
- assistant_message_id=self._assistant_message_id,
380
- chat_id=self._chat_id,
381
- user_message_id=self._user_message_id,
382
- user_message_text=self._user_message_text,
448
+ user_id=user_id,
449
+ company_id=company_id,
450
+ assistant_message_id=assistant_message_id,
451
+ chat_id=chat_id,
452
+ user_message_id=user_message_id,
453
+ user_message_text=user_message_text,
383
454
  assistant=False,
384
455
  content=content,
385
456
  references=references,
@@ -414,13 +485,31 @@ class ChatService:
414
485
  Raises:
415
486
  Exception: If the modification fails.
416
487
  """
488
+ [
489
+ company_id,
490
+ user_id,
491
+ assistant_message_id,
492
+ user_message_id,
493
+ chat_id,
494
+ user_message_text,
495
+ ] = validate_required_values(
496
+ [
497
+ self._company_id,
498
+ self._user_id,
499
+ self._assistant_message_id,
500
+ self._user_message_id,
501
+ self._chat_id,
502
+ self._user_message_text,
503
+ ]
504
+ )
505
+
417
506
  return modify_message(
418
- user_id=self._user_id,
419
- company_id=self._company_id,
420
- assistant_message_id=self._assistant_message_id,
421
- chat_id=self._chat_id,
422
- user_message_id=self._user_message_id,
423
- user_message_text=self._user_message_text,
507
+ user_id=user_id,
508
+ company_id=company_id,
509
+ assistant_message_id=assistant_message_id,
510
+ chat_id=chat_id,
511
+ user_message_id=user_message_id,
512
+ user_message_text=user_message_text,
424
513
  assistant=True,
425
514
  content=content,
426
515
  original_content=original_content,
@@ -456,14 +545,30 @@ class ChatService:
456
545
  Raises:
457
546
  Exception: If the modification fails.
458
547
  """
459
-
548
+ [
549
+ company_id,
550
+ user_id,
551
+ assistant_message_id,
552
+ user_message_id,
553
+ chat_id,
554
+ user_message_text,
555
+ ] = validate_required_values(
556
+ [
557
+ self._company_id,
558
+ self._user_id,
559
+ self._assistant_message_id,
560
+ self._user_message_id,
561
+ self._chat_id,
562
+ self._user_message_text,
563
+ ]
564
+ )
460
565
  return await modify_message_async(
461
- user_id=self._user_id,
462
- company_id=self._company_id,
463
- assistant_message_id=self._assistant_message_id,
464
- chat_id=self._chat_id,
465
- user_message_id=self._user_message_id,
466
- user_message_text=self._user_message_text,
566
+ user_id=user_id,
567
+ company_id=company_id,
568
+ assistant_message_id=assistant_message_id,
569
+ chat_id=chat_id,
570
+ user_message_id=user_message_id,
571
+ user_message_text=user_message_text,
467
572
  assistant=True,
468
573
  content=content,
469
574
  original_content=original_content,
@@ -595,11 +700,25 @@ class ChatService:
595
700
  Raises:
596
701
  Exception: If the creation fails.
597
702
  """
703
+ [
704
+ company_id,
705
+ user_id,
706
+ assistant_id,
707
+ chat_id,
708
+ ] = validate_required_values(
709
+ [
710
+ self._company_id,
711
+ self._user_id,
712
+ self._assistant_id,
713
+ self._chat_id,
714
+ ]
715
+ )
716
+
598
717
  chat_message = create_message(
599
- user_id=self._user_id,
600
- company_id=self._company_id,
601
- chat_id=self._chat_id,
602
- assistant_id=self._assistant_id,
718
+ user_id=user_id,
719
+ company_id=company_id,
720
+ chat_id=chat_id,
721
+ assistant_id=assistant_id,
603
722
  role=ChatMessageRole.ASSISTANT,
604
723
  content=content,
605
724
  original_content=original_content,
@@ -635,12 +754,24 @@ class ChatService:
635
754
  Raises:
636
755
  Exception: If the creation fails.
637
756
  """
638
-
757
+ [
758
+ company_id,
759
+ user_id,
760
+ assistant_id,
761
+ chat_id,
762
+ ] = validate_required_values(
763
+ [
764
+ self._company_id,
765
+ self._user_id,
766
+ self._assistant_id,
767
+ self._chat_id,
768
+ ]
769
+ )
639
770
  chat_message = await create_message_async(
640
- user_id=self._user_id,
641
- company_id=self._company_id,
642
- chat_id=self._chat_id,
643
- assistant_id=self._assistant_id,
771
+ user_id=user_id,
772
+ company_id=company_id,
773
+ chat_id=chat_id,
774
+ assistant_id=assistant_id,
644
775
  role=ChatMessageRole.ASSISTANT,
645
776
  content=content,
646
777
  original_content=original_content,
@@ -676,11 +807,24 @@ class ChatService:
676
807
  Raises:
677
808
  Exception: If the creation fails.
678
809
  """
810
+ [
811
+ company_id,
812
+ user_id,
813
+ assistant_id,
814
+ chat_id,
815
+ ] = validate_required_values(
816
+ [
817
+ self._company_id,
818
+ self._user_id,
819
+ self._assistant_id,
820
+ self._chat_id,
821
+ ]
822
+ )
679
823
  chat_message = create_message(
680
- user_id=self._user_id,
681
- company_id=self._company_id,
682
- chat_id=self._chat_id,
683
- assistant_id=self._assistant_id,
824
+ user_id=user_id,
825
+ company_id=company_id,
826
+ chat_id=chat_id,
827
+ assistant_id=assistant_id,
684
828
  role=ChatMessageRole.USER,
685
829
  content=content,
686
830
  original_content=original_content,
@@ -716,12 +860,24 @@ class ChatService:
716
860
  Raises:
717
861
  Exception: If the creation fails.
718
862
  """
719
-
863
+ [
864
+ company_id,
865
+ user_id,
866
+ assistant_id,
867
+ chat_id,
868
+ ] = validate_required_values(
869
+ [
870
+ self._company_id,
871
+ self._user_id,
872
+ self._assistant_id,
873
+ self._chat_id,
874
+ ]
875
+ )
720
876
  chat_message = await create_message_async(
721
- user_id=self._user_id,
722
- company_id=self._company_id,
723
- chat_id=self._chat_id,
724
- assistant_id=self._assistant_id,
877
+ user_id=user_id,
878
+ company_id=company_id,
879
+ chat_id=chat_id,
880
+ assistant_id=assistant_id,
725
881
  role=ChatMessageRole.USER,
726
882
  content=content,
727
883
  original_content=original_content,
@@ -761,9 +917,19 @@ class ChatService:
761
917
  Raises:
762
918
  Exception: If the creation fails
763
919
  """
920
+ [
921
+ company_id,
922
+ user_id,
923
+ ] = validate_required_values(
924
+ [
925
+ self._company_id,
926
+ self._user_id,
927
+ ]
928
+ )
929
+
764
930
  return create_message_assessment(
765
- user_id=self._user_id,
766
- company_id=self._company_id,
931
+ user_id=user_id,
932
+ company_id=company_id,
767
933
  assistant_message_id=assistant_message_id,
768
934
  status=status,
769
935
  type=type,
@@ -801,9 +967,19 @@ class ChatService:
801
967
  Raises:
802
968
  Exception: If the creation fails
803
969
  """
970
+ [
971
+ company_id,
972
+ user_id,
973
+ ] = validate_required_values(
974
+ [
975
+ self._company_id,
976
+ self._user_id,
977
+ ]
978
+ )
979
+
804
980
  return await create_message_assessment_async(
805
- user_id=self._user_id,
806
- company_id=self._company_id,
981
+ user_id=user_id,
982
+ company_id=company_id,
807
983
  assistant_message_id=assistant_message_id,
808
984
  status=status,
809
985
  type=type,
@@ -839,9 +1015,19 @@ class ChatService:
839
1015
  Raises:
840
1016
  Exception: If the modification fails
841
1017
  """
1018
+ [
1019
+ company_id,
1020
+ user_id,
1021
+ ] = validate_required_values(
1022
+ [
1023
+ self._company_id,
1024
+ self._user_id,
1025
+ ]
1026
+ )
1027
+
842
1028
  return modify_message_assessment(
843
- user_id=self._user_id,
844
- company_id=self._company_id,
1029
+ user_id=user_id,
1030
+ company_id=company_id,
845
1031
  assistant_message_id=assistant_message_id,
846
1032
  status=status,
847
1033
  type=type,
@@ -876,9 +1062,19 @@ class ChatService:
876
1062
  Raises:
877
1063
  Exception: If the modification fails
878
1064
  """
1065
+ [
1066
+ company_id,
1067
+ user_id,
1068
+ ] = validate_required_values(
1069
+ [
1070
+ self._company_id,
1071
+ self._user_id,
1072
+ ]
1073
+ )
1074
+
879
1075
  return await modify_message_assessment_async(
880
- user_id=self._user_id,
881
- company_id=self._company_id,
1076
+ user_id=user_id,
1077
+ company_id=company_id,
882
1078
  assistant_message_id=assistant_message_id,
883
1079
  status=status,
884
1080
  type=type,
@@ -938,6 +1134,32 @@ class ChatService:
938
1134
  other_options=other_options,
939
1135
  )
940
1136
 
1137
+ def complete(
1138
+ self,
1139
+ messages: LanguageModelMessages,
1140
+ model_name: LanguageModelName | str,
1141
+ content_chunks: list[ContentChunk] = [],
1142
+ debug_info: dict = {},
1143
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
1144
+ timeout: int = DEFAULT_COMPLETE_TIMEOUT,
1145
+ tools: Optional[list[LanguageModelTool]] = None,
1146
+ start_text: Optional[str] = None,
1147
+ other_options: Optional[dict] = None,
1148
+ ) -> LanguageModelResponse:
1149
+ response = self.stream_complete(
1150
+ messages=messages,
1151
+ model_name=model_name,
1152
+ content_chunks=content_chunks,
1153
+ debug_info=debug_info,
1154
+ temperature=temperature,
1155
+ timeout=timeout,
1156
+ tools=tools,
1157
+ start_text=start_text,
1158
+ other_options=other_options,
1159
+ )
1160
+
1161
+ return LanguageModelResponse.from_stream_response(response)
1162
+
941
1163
  async def stream_complete_async(
942
1164
  self,
943
1165
  messages: LanguageModelMessages,
@@ -989,3 +1211,29 @@ class ChatService:
989
1211
  start_text=start_text,
990
1212
  other_options=other_options,
991
1213
  )
1214
+
1215
+ async def complete_async(
1216
+ self,
1217
+ messages: LanguageModelMessages,
1218
+ model_name: LanguageModelName | str,
1219
+ content_chunks: list[ContentChunk] = [],
1220
+ debug_info: dict = {},
1221
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
1222
+ timeout: int = DEFAULT_COMPLETE_TIMEOUT,
1223
+ tools: Optional[list[LanguageModelTool]] = None,
1224
+ start_text: Optional[str] = None,
1225
+ other_options: Optional[dict] = None,
1226
+ ) -> LanguageModelResponse:
1227
+ response = self.stream_complete_async(
1228
+ messages=messages,
1229
+ model_name=model_name,
1230
+ content_chunks=content_chunks,
1231
+ debug_info=debug_info,
1232
+ temperature=temperature,
1233
+ timeout=timeout,
1234
+ tools=tools,
1235
+ start_text=start_text,
1236
+ other_options=other_options,
1237
+ )
1238
+
1239
+ return LanguageModelResponse.from_stream_response(await response)
@@ -213,6 +213,8 @@ def upload_content_from_bytes(
213
213
  scope_id: str | None = None,
214
214
  chat_id: str | None = None,
215
215
  skip_ingestion: bool = False,
216
+ ingestion_config: unique_sdk.Content.IngestionConfig | None = None,
217
+ metadata: dict[str, any] | None = None,
216
218
  ):
217
219
  """
218
220
  Uploads content to the knowledge base.
@@ -226,6 +228,8 @@ def upload_content_from_bytes(
226
228
  scope_id (str | None): The scope ID. Defaults to None.
227
229
  chat_id (str | None): The chat ID. Defaults to None.
228
230
  skip_ingestion (bool): Whether to skip ingestion. Defaults to False.
231
+ ingestion_config (unique_sdk.Content.IngestionConfig | None): The ingestion configuration. Defaults to None.
232
+ metadata ( dict[str, any] | None): The metadata for the content. Defaults to None.
229
233
 
230
234
  Returns:
231
235
  Content: The uploaded content.
@@ -241,6 +245,8 @@ def upload_content_from_bytes(
241
245
  scope_id=scope_id,
242
246
  chat_id=chat_id,
243
247
  skip_ingestion=skip_ingestion,
248
+ ingestion_config=ingestion_config,
249
+ metadata=metadata,
244
250
  )
245
251
  except Exception as e:
246
252
  logger.error(f"Error while uploading content: {e}")
@@ -256,6 +262,8 @@ def upload_content(
256
262
  scope_id: str | None = None,
257
263
  chat_id: str | None = None,
258
264
  skip_ingestion: bool = False,
265
+ ingestion_config: unique_sdk.Content.IngestionConfig | None = None,
266
+ metadata: dict[str, any] | None = None,
259
267
  ):
260
268
  """
261
269
  Uploads content to the knowledge base.
@@ -269,6 +277,8 @@ def upload_content(
269
277
  scope_id (str | None): The scope ID. Defaults to None.
270
278
  chat_id (str | None): The chat ID. Defaults to None.
271
279
  skip_ingestion (bool): Whether to skip ingestion. Defaults to False.
280
+ ingestion_config (unique_sdk.Content.IngestionConfig | None): The ingestion configuration. Defaults to None.
281
+ metadata ( dict[str, any] | None): The metadata for the content. Defaults to None.
272
282
 
273
283
  Returns:
274
284
  Content: The uploaded content.
@@ -284,6 +294,8 @@ def upload_content(
284
294
  scope_id=scope_id,
285
295
  chat_id=chat_id,
286
296
  skip_ingestion=skip_ingestion,
297
+ ingestion_config=ingestion_config,
298
+ metadata=metadata,
287
299
  )
288
300
  except Exception as e:
289
301
  logger.error(f"Error while uploading content: {e}")
@@ -299,6 +311,8 @@ def _trigger_upload_content(
299
311
  scope_id: str | None = None,
300
312
  chat_id: str | None = None,
301
313
  skip_ingestion: bool = False,
314
+ ingestion_config: unique_sdk.Content.IngestionConfig | None = None,
315
+ metadata: dict[str, any] | None = None,
302
316
  ):
303
317
  """
304
318
  Uploads content to the knowledge base.
@@ -312,6 +326,8 @@ def _trigger_upload_content(
312
326
  scope_id (str | None): The scope ID. Defaults to None.
313
327
  chat_id (str | None): The chat ID. Defaults to None.
314
328
  skip_ingestion (bool): Whether to skip ingestion. Defaults to False.
329
+ ingestion_config (unique_sdk.Content.IngestionConfig | None): The ingestion configuration. Defaults to None.
330
+ metadata (dict[str, any] | None): The metadata for the content. Defaults to None.
315
331
 
316
332
  Returns:
317
333
  Content: The uploaded content.
@@ -368,16 +384,21 @@ def _trigger_upload_content(
368
384
  logger.error(error_msg)
369
385
  raise ValueError(error_msg)
370
386
 
387
+ if ingestion_config is None:
388
+ ingestion_config = {}
389
+
390
+ if skip_ingestion:
391
+ ingestion_config["uniqueIngestionMode"] = "SKIP_INGESTION"
392
+
371
393
  input_dict = {
372
394
  "key": content_name,
373
395
  "title": content_name,
374
396
  "mimeType": mime_type,
375
397
  "byteSize": byte_size,
398
+ "ingestionConfig": ingestion_config,
399
+ "metadata": metadata,
376
400
  }
377
401
 
378
- if skip_ingestion:
379
- input_dict["ingestionConfig"] = {"uniqueIngestionMode": "SKIP_INGESTION"}
380
-
381
402
  if chat_id:
382
403
  _upsert_content(
383
404
  user_id=user_id,
@@ -54,6 +54,8 @@ class Content(BaseModel):
54
54
  read_url: str | None = None
55
55
  created_at: datetime | None = None
56
56
  updated_at: datetime | None = None
57
+ metadata: dict[str, any] | None = None
58
+ ingestion_config: dict | None = None
57
59
 
58
60
 
59
61
  class ContentReference(BaseModel):
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  from pathlib import Path
3
3
 
4
+ import unique_sdk
4
5
  from requests import Response
5
6
  from typing_extensions import deprecated
6
7
 
@@ -365,6 +366,8 @@ class ContentService:
365
366
  scope_id: str | None = None,
366
367
  chat_id: str | None = None,
367
368
  skip_ingestion: bool = False,
369
+ ingestion_config: unique_sdk.Content.IngestionConfig | None = None,
370
+ metadata: dict | None = None,
368
371
  ) -> Content:
369
372
  """
370
373
  Uploads content to the knowledge base.
@@ -376,6 +379,8 @@ class ContentService:
376
379
  scope_id (str | None): The scope ID. Defaults to None.
377
380
  chat_id (str | None): The chat ID. Defaults to None.
378
381
  skip_ingestion (bool): Whether to skip ingestion. Defaults to False.
382
+ ingestion_config (unique_sdk.Content.IngestionConfig | None): The ingestion configuration. Defaults to None.
383
+ metadata (dict | None): The metadata to associate with the content. Defaults to None.
379
384
 
380
385
  Returns:
381
386
  Content: The uploaded content.
@@ -390,6 +395,8 @@ class ContentService:
390
395
  scope_id=scope_id,
391
396
  chat_id=chat_id,
392
397
  skip_ingestion=skip_ingestion,
398
+ ingestion_config=ingestion_config,
399
+ metadata=metadata,
393
400
  )
394
401
 
395
402
  def upload_content(
@@ -400,6 +407,8 @@ class ContentService:
400
407
  scope_id: str | None = None,
401
408
  chat_id: str | None = None,
402
409
  skip_ingestion: bool = False,
410
+ ingestion_config: unique_sdk.Content.IngestionConfig | None = None,
411
+ metadata: dict[str, any] | None = None,
403
412
  ):
404
413
  """
405
414
  Uploads content to the knowledge base.
@@ -411,6 +420,8 @@ class ContentService:
411
420
  scope_id (str | None): The scope ID. Defaults to None.
412
421
  chat_id (str | None): The chat ID. Defaults to None.
413
422
  skip_ingestion (bool): Whether to skip ingestion. Defaults to False.
423
+ ingestion_config (unique_sdk.Content.IngestionConfig | None): The ingestion configuration. Defaults to None.
424
+ metadata (dict[str, any] | None): The metadata to associate with the content. Defaults to None.
414
425
 
415
426
  Returns:
416
427
  Content: The uploaded content.
@@ -425,6 +436,8 @@ class ContentService:
425
436
  scope_id=scope_id,
426
437
  chat_id=chat_id,
427
438
  skip_ingestion=skip_ingestion,
439
+ ingestion_config=ingestion_config,
440
+ metadata=metadata,
428
441
  )
429
442
 
430
443
  def request_content_by_id(
@@ -17,6 +17,10 @@ class MessagesBuilder:
17
17
  def __init__(self):
18
18
  self.messages: list[LanguageModelMessage] = []
19
19
 
20
+ def append(self, message: LanguageModelMessage) -> Self:
21
+ self.messages.append(message)
22
+ return self
23
+
20
24
  def message_append(self, role: LanguageModelMessageRole, content: str):
21
25
  message = LanguageModelMessage(role=role, content=content)
22
26
  self.messages.append(message)
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import math
3
3
  from enum import StrEnum
4
- from typing import Any, Optional, Self
4
+ from typing import Any, Self
5
5
  from uuid import uuid4
6
6
 
7
7
  from humps import camelize
@@ -27,6 +27,8 @@ model_config = ConfigDict(
27
27
  )
28
28
 
29
29
 
30
+ # Equivalent to
31
+ # from openai.types.chat.chat_completion_role import ChatCompletionRole
30
32
  class LanguageModelMessageRole(StrEnum):
31
33
  USER = "user"
32
34
  SYSTEM = "system"
@@ -34,12 +36,32 @@ class LanguageModelMessageRole(StrEnum):
34
36
  TOOL = "tool"
35
37
 
36
38
 
39
+ # This is tailored to the unique backend
40
+ class LanguageModelStreamResponseMessage(BaseModel):
41
+ model_config = model_config
42
+
43
+ id: str
44
+ previous_message_id: (
45
+ str | None
46
+ ) # Stream response can return a null previous_message_id if an assisstant message is manually added
47
+ role: LanguageModelMessageRole
48
+ text: str
49
+ original_text: str | None = None
50
+ references: list[dict[str, list | dict | str | int | float | bool]] = [] # type: ignore
51
+
52
+ # TODO make sdk return role in lowercase
53
+ # Currently needed as sdk returns role in uppercase
54
+ @field_validator("role", mode="before")
55
+ def set_role(cls, value: str):
56
+ return value.lower()
57
+
58
+
37
59
  class LanguageModelFunction(BaseModel):
38
60
  model_config = model_config
39
61
 
40
62
  id: str | None = None
41
63
  name: str
42
- arguments: Optional[dict[str, Any] | str] = None # type: ignore
64
+ arguments: dict[str, Any] | str | None = None # type: ignore
43
65
 
44
66
  @field_validator("arguments", mode="before")
45
67
  def set_arguments(cls, value):
@@ -62,6 +84,14 @@ class LanguageModelFunction(BaseModel):
62
84
  return seralization
63
85
 
64
86
 
87
+ # This is tailored to the unique backend
88
+ class LanguageModelStreamResponse(BaseModel):
89
+ model_config = model_config
90
+
91
+ message: LanguageModelStreamResponseMessage
92
+ tool_calls: list[LanguageModelFunction] | None = None
93
+
94
+
65
95
  class LanguageModelFunctionCall(BaseModel):
66
96
  model_config = model_config
67
97
 
@@ -69,6 +99,8 @@ class LanguageModelFunctionCall(BaseModel):
69
99
  type: str | None = None
70
100
  function: LanguageModelFunction
71
101
 
102
+ # TODO: Circular reference of types
103
+ @deprecated("Use LanguageModelAssistantMessage.from_functions instead.")
72
104
  @staticmethod
73
105
  def create_assistant_message_from_tool_calls(
74
106
  tool_calls: list[LanguageModelFunction],
@@ -93,8 +125,7 @@ class LanguageModelMessage(BaseModel):
93
125
  content: str | list[dict] | None = None
94
126
 
95
127
  def __str__(self):
96
- if not self.content:
97
- message = ""
128
+ message = ""
98
129
  if isinstance(self.content, str):
99
130
  message = self.content
100
131
  elif isinstance(self.content, list):
@@ -103,6 +134,8 @@ class LanguageModelMessage(BaseModel):
103
134
  return format_message(self.role.capitalize(), message=message, num_tabs=1)
104
135
 
105
136
 
137
+ # Equivalent to
138
+ # from openai.types.chat.chat_completion_system_message_param import ChatCompletionSystemMessageParam
106
139
  class LanguageModelSystemMessage(LanguageModelMessage):
107
140
  role: LanguageModelMessageRole = LanguageModelMessageRole.SYSTEM
108
141
 
@@ -111,6 +144,10 @@ class LanguageModelSystemMessage(LanguageModelMessage):
111
144
  return LanguageModelMessageRole.SYSTEM
112
145
 
113
146
 
147
+ # Equivalent to
148
+ # from openai.types.chat.chat_completion_user_message_param import ChatCompletionUserMessageParam
149
+
150
+
114
151
  class LanguageModelUserMessage(LanguageModelMessage):
115
152
  role: LanguageModelMessageRole = LanguageModelMessageRole.USER
116
153
 
@@ -119,6 +156,8 @@ class LanguageModelUserMessage(LanguageModelMessage):
119
156
  return LanguageModelMessageRole.USER
120
157
 
121
158
 
159
+ # Equivalent to
160
+ # from openai.types.chat.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
122
161
  class LanguageModelAssistantMessage(LanguageModelMessage):
123
162
  role: LanguageModelMessageRole = LanguageModelMessageRole.ASSISTANT
124
163
  parsed: dict | None = None
@@ -129,6 +168,47 @@ class LanguageModelAssistantMessage(LanguageModelMessage):
129
168
  def set_role(cls, value):
130
169
  return LanguageModelMessageRole.ASSISTANT
131
170
 
171
+ @classmethod
172
+ def from_functions(
173
+ cls,
174
+ tool_calls: list[LanguageModelFunction],
175
+ ):
176
+ return cls(
177
+ content="",
178
+ tool_calls=[
179
+ LanguageModelFunctionCall(
180
+ id=tool_call.id,
181
+ type="function",
182
+ function=tool_call,
183
+ )
184
+ for tool_call in tool_calls
185
+ ],
186
+ )
187
+
188
+ @classmethod
189
+ def from_stream_response(cls, response: LanguageModelStreamResponse):
190
+ tool_calls = [
191
+ LanguageModelFunctionCall(
192
+ id=None,
193
+ type=None,
194
+ function=f,
195
+ )
196
+ for f in response.tool_calls or []
197
+ ]
198
+
199
+ tool_calls = tool_calls if len(tool_calls) > 0 else None
200
+
201
+ return cls(
202
+ content=response.message.text,
203
+ parsed=None,
204
+ refusal=None,
205
+ tool_calls=tool_calls,
206
+ )
207
+
208
+
209
+ # Equivalent to
210
+ # from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
211
+
132
212
 
133
213
  class LanguageModelToolMessage(LanguageModelMessage):
134
214
  role: LanguageModelMessageRole = LanguageModelMessageRole.TOOL
@@ -147,6 +227,11 @@ class LanguageModelToolMessage(LanguageModelMessage):
147
227
  return LanguageModelMessageRole.TOOL
148
228
 
149
229
 
230
+ # Equivalent implementation for list of
231
+ # from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
232
+ # with the addition of the builder
233
+
234
+
150
235
  class LanguageModelMessages(RootModel):
151
236
  root: list[
152
237
  LanguageModelMessage
@@ -174,6 +259,11 @@ class LanguageModelMessages(RootModel):
174
259
  return builder
175
260
 
176
261
 
262
+ # This seems similar to
263
+ # from openai.types.completion_choice import CompletionChoice
264
+ # but is missing multiple attributes and uses message instead of text
265
+
266
+
177
267
  class LanguageModelCompletionChoice(BaseModel):
178
268
  model_config = model_config
179
269
 
@@ -182,38 +272,26 @@ class LanguageModelCompletionChoice(BaseModel):
182
272
  finish_reason: str
183
273
 
184
274
 
275
+ # This seems similar to
276
+ # from openai.types.completion import Completion
277
+ # but is missing multiple attributes
185
278
  class LanguageModelResponse(BaseModel):
186
279
  model_config = model_config
187
280
 
188
281
  choices: list[LanguageModelCompletionChoice]
189
282
 
283
+ @classmethod
284
+ def from_stream_response(cls, response: LanguageModelStreamResponse):
285
+ choice = LanguageModelCompletionChoice(
286
+ index=0,
287
+ message=LanguageModelAssistantMessage.from_stream_response(response),
288
+ finish_reason="",
289
+ )
190
290
 
191
- class LanguageModelStreamResponseMessage(BaseModel):
192
- model_config = model_config
193
-
194
- id: str
195
- previous_message_id: (
196
- str | None
197
- ) # Stream response can return a null previous_message_id if an assisstant message is manually added
198
- role: LanguageModelMessageRole
199
- text: str
200
- original_text: str | None = None
201
- references: list[dict[str, list | dict | str | int | float | bool]] = [] # type: ignore
202
-
203
- # TODO make sdk return role in lowercase
204
- # Currently needed as sdk returns role in uppercase
205
- @field_validator("role", mode="before")
206
- def set_role(cls, value: str):
207
- return value.lower()
208
-
209
-
210
- class LanguageModelStreamResponse(BaseModel):
211
- model_config = model_config
212
-
213
- message: LanguageModelStreamResponseMessage
214
- tool_calls: Optional[list[LanguageModelFunction]] = None
291
+ return cls(choices=[choice])
215
292
 
216
293
 
294
+ # This is tailored for unique and only used in language model info
217
295
  class LanguageModelTokenLimits(BaseModel):
218
296
  token_limit_input: int
219
297
  token_limit_output: int
@@ -255,29 +333,35 @@ class LanguageModelTokenLimits(BaseModel):
255
333
 
256
334
  data["token_limit_input"] = math.floor(fraction_input * token_limit)
257
335
  data["token_limit_output"] = math.floor(
258
- (1 - fraction_input) * token_limit
336
+ (1 - fraction_input) * token_limit,
259
337
  )
260
338
  data["_fraction_adaptpable"] = True
261
339
  return data
262
340
 
263
341
  raise ValueError(
264
- 'Either "token_limit_input" and "token_limit_output" must be provided together, or "token_limit" must be provided.'
342
+ 'Either "token_limit_input" and "token_limit_output" must be provided together, or "token_limit" must be provided.',
265
343
  )
266
344
 
267
345
 
346
+ # This is more restrictive than what openai allows
268
347
  class LanguageModelToolParameterProperty(BaseModel):
269
348
  type: str
270
349
  description: str
271
- enum: Optional[list[Any]] = None
272
- items: Optional[Self] = None
350
+ enum: list[Any] | None = None
351
+ items: Self | None = None
273
352
 
274
353
 
354
+ # Looks most like
355
+ # from openai.types.shared.function_parameters import FunctionParameters
275
356
  class LanguageModelToolParameters(BaseModel):
276
357
  type: str = "object"
277
358
  properties: dict[str, LanguageModelToolParameterProperty]
278
359
  required: list[str]
279
360
 
280
361
 
362
+ # Looks most like
363
+ # from openai.types.shared_params.function_definition import FunctionDefinition
364
+ # but returns parameter is not known
281
365
  class LanguageModelTool(BaseModel):
282
366
  name: str = Field(
283
367
  ...,
@@ -0,0 +1,28 @@
1
+ from typing import Protocol
2
+
3
+ from unique_toolkit.language_model import (
4
+ LanguageModelMessages,
5
+ LanguageModelName,
6
+ LanguageModelResponse,
7
+ LanguageModelTool,
8
+ )
9
+ from unique_toolkit.language_model.constants import (
10
+ DEFAULT_COMPLETE_TEMPERATURE,
11
+ DEFAULT_COMPLETE_TIMEOUT,
12
+ )
13
+
14
+ # As soon as we have multiple, remember
15
+ # https://pypi.org/project/typing-protocol-intersection/
16
+ # to generate combinations of protocols without inheritance
17
+
18
+
19
+ class SupportsComplete(Protocol):
20
+ def complete(
21
+ self,
22
+ messages: LanguageModelMessages,
23
+ model_name: LanguageModelName | str,
24
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
25
+ timeout: int = DEFAULT_COMPLETE_TIMEOUT,
26
+ tools: list[LanguageModelTool] | None = None,
27
+ **kwargs,
28
+ ) -> LanguageModelResponse: ...
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.7.14
3
+ Version: 0.7.19
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Martin Fadler
@@ -17,7 +17,7 @@ Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
17
17
  Requires-Dist: regex (>=2024.5.15,<2025.0.0)
18
18
  Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
19
19
  Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
20
- Requires-Dist: unique-sdk (>=0.9.17,<0.10.0)
20
+ Requires-Dist: unique-sdk (>=0.9.27,<0.10.0)
21
21
  Description-Content-Type: text/markdown
22
22
 
23
23
  # Unique Toolkit
@@ -111,6 +111,23 @@ All notable changes to this project will be documented in this file.
111
111
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
112
112
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
113
113
 
114
+ ## [0.7.19] - 2025-05-20
115
+ - Extend the `MessageBuilder` to allow for appending any `LanguageModelMessage`
116
+
117
+ ## [0.7.18] - 2025-05-20
118
+ - Add the possibility to specify metadata when creating or updating a Content.
119
+
120
+ ## [0.7.17] - 2025-05-16
121
+ - Change inheritance hierarchy of events for easier deprecation
122
+
123
+ ## [0.7.16] - 2025-05-16
124
+ - Add classmethods to create LanguageModelAssistatnMessage from functions and stream response
125
+ - Add completion like method to chat
126
+ - Add protocol for completion like method
127
+
128
+ ## [0.7.15] - 2025-05-13
129
+ - Add the possibility to specify ingestionConfig when creating or updating a Content.
130
+
114
131
  ## [0.7.14] - 2025-05-08
115
132
  - Fix bug not selecting the correct llm
116
133
  - Add LMI type for flexible init of LanguageModelInfo
@@ -9,20 +9,20 @@ unique_toolkit/app/init_logging.py,sha256=Sh26SRxOj8i8dzobKhYha2lLrkrMTHfB1V4jR3
9
9
  unique_toolkit/app/init_sdk.py,sha256=Nv4Now4pMfM0AgRhbtatLpm_39rKxn0WmRLwmPhRl-8,1285
10
10
  unique_toolkit/app/performance/async_tasks.py,sha256=H0l3OAcosLwNHZ8d2pd-Di4wHIXfclEvagi5kfqLFPA,1941
11
11
  unique_toolkit/app/performance/async_wrapper.py,sha256=yVVcRDkcdyfjsxro-N29SBvi-7773wnfDplef6-y8xw,1077
12
- unique_toolkit/app/schemas.py,sha256=WsdiK39za5h7ir__DBv_FYV-i2_FMucNCWdFgXWN7NE,3373
12
+ unique_toolkit/app/schemas.py,sha256=fNPRQPrpJjYrtkkXPR7sNFjP0AYPZtKe3H1YZkXd2QQ,3275
13
13
  unique_toolkit/app/verification.py,sha256=GxFFwcJMy25fCA_Xe89wKW7bgqOu8PAs5y8QpHF0GSc,3861
14
14
  unique_toolkit/chat/__init__.py,sha256=LRs2G-JTVuci4lbtHTkVUiNcZcSR6uqqfnAyo7af6nY,619
15
15
  unique_toolkit/chat/constants.py,sha256=05kq6zjqUVB2d6_P7s-90nbljpB3ryxwCI-CAz0r2O4,83
16
16
  unique_toolkit/chat/functions.py,sha256=J9Cmgkhj9bBxZja3ggkSp48af_LPU4Dfi9Sbc_WhhNY,27204
17
17
  unique_toolkit/chat/schemas.py,sha256=MNcGAXjK1K8zOODeMFz3FHVQL5sIBQXRwkr_2hFkG8k,2672
18
- unique_toolkit/chat/service.py,sha256=nDXc9PMnepkFDViCAvno-HSQDBaeuG2p8FaHF0TG_9w,34302
18
+ unique_toolkit/chat/service.py,sha256=C8L5Alc9BKmXau5kcbQWKBjg1OGc5fmtO0F9xooxSCw,40641
19
19
  unique_toolkit/chat/state.py,sha256=Cjgwv_2vhDFbV69xxsn7SefhaoIAEqLx3ferdVFCnOg,1445
20
20
  unique_toolkit/chat/utils.py,sha256=ihm-wQykBWhB4liR3LnwPVPt_qGW6ETq21Mw4HY0THE,854
21
21
  unique_toolkit/content/__init__.py,sha256=EdJg_A_7loEtCQf4cah3QARQreJx6pdz89Rm96YbMVg,940
22
22
  unique_toolkit/content/constants.py,sha256=1iy4Y67xobl5VTnJB6SxSyuoBWbdLl9244xfVMUZi5o,60
23
- unique_toolkit/content/functions.py,sha256=yB87wrbtmHzr3jGJUHetmuhy-7RVtnqG2IQ6gqFAun8,17093
24
- unique_toolkit/content/schemas.py,sha256=zks_Pkki2VhxICJJgHZyc-LPmRuj5dLbw3pgcUT7SW8,2362
25
- unique_toolkit/content/service.py,sha256=A9M8C9fI73nfzsAqVAfJuMHGyneYZxATJxT3uTMgDs0,18578
23
+ unique_toolkit/content/functions.py,sha256=Chf2QcnnWvKvXMF4IUmU-_aUN6nTZIfsbM7ds77olcY,18344
24
+ unique_toolkit/content/schemas.py,sha256=28Cj0R9JzJ4s0qR2Sfunr7luwYjMF2I8TepVxt5ZE2o,2446
25
+ unique_toolkit/content/service.py,sha256=27awBOsYHdfSxwHM1UzCQLnHuo-M49ej3jpFwBLRflM,19438
26
26
  unique_toolkit/content/utils.py,sha256=GUVPrkZfMoAj4MRoBs5BD_7vSuLZTZx69hyWzYFrI50,7747
27
27
  unique_toolkit/embedding/__init__.py,sha256=uUyzjonPvuDCYsvXCIt7ErQXopLggpzX-MEQd3_e2kE,250
28
28
  unique_toolkit/embedding/constants.py,sha256=Lj8-Lcy1FvuC31PM9Exq7vaFuxQV4pEI1huUMFX-J2M,52
@@ -45,20 +45,21 @@ unique_toolkit/evaluators/hallucination/utils.py,sha256=gO2AOzDQwVTev2_5vDKgJ9A6
45
45
  unique_toolkit/evaluators/output_parser.py,sha256=eI72qkzK1dZyUvnfP2SOAQCGBj_-PwX5wy_aLPMsJMY,883
46
46
  unique_toolkit/evaluators/schemas.py,sha256=Jaue6Uhx75X1CyHKWj8sT3RE1JZXTqoLtfLt2xQNCX8,2507
47
47
  unique_toolkit/language_model/__init__.py,sha256=jWko_vQj48wjnpTtlkg8iNdef0SMI3FN2kGywXRTMzg,1880
48
- unique_toolkit/language_model/builder.py,sha256=aIAXWWUoB5G-HONJiAt3MdRGd4jdP8nA-HYX2D2WlSI,3048
48
+ unique_toolkit/language_model/builder.py,sha256=69WCcmkm2rMP2-YEH_EjHiEp6OzwjwCs8VbhjVJaCe0,3168
49
49
  unique_toolkit/language_model/constants.py,sha256=B-topqW0r83dkC_25DeQfnPk3n53qzIHUCBS7YJ0-1U,119
50
50
  unique_toolkit/language_model/functions.py,sha256=0oSkG4xpbxeaVTJide6g-zunBrsBRuvp7UQlKVbjpSk,7949
51
51
  unique_toolkit/language_model/infos.py,sha256=qPf4Xlanet8jf0apZ6-qxS_6zmDd6p9D40it2TqmF3w,25910
52
52
  unique_toolkit/language_model/prompt.py,sha256=JSawaLjQg3VR-E2fK8engFyJnNdk21zaO8pPIodzN4Q,3991
53
- unique_toolkit/language_model/schemas.py,sha256=rrwzUgKANFOrdehCULW8Hh03uRW3tsE5dXpWqxmClfg,8618
53
+ unique_toolkit/language_model/schemas.py,sha256=Wc_OeML0AYPTfIC1BObwumsunq23h12qVzi4hVlaZPE,11389
54
54
  unique_toolkit/language_model/service.py,sha256=FUf-HTKNslrMAh8qFMco_ZpP-N0t_iAFWK3juldoUe8,8343
55
55
  unique_toolkit/language_model/utils.py,sha256=bPQ4l6_YO71w-zaIPanUUmtbXC1_hCvLK0tAFc3VCRc,1902
56
+ unique_toolkit/protocols/support.py,sha256=iSSoERUZGLbmY2DGBqGeFTCRtH3ClhzAUutqNxwYgKs,823
56
57
  unique_toolkit/short_term_memory/__init__.py,sha256=2mI3AUrffgH7Yt-xS57EGqnHf7jnn6xquoKEhJqk3Wg,185
57
58
  unique_toolkit/short_term_memory/constants.py,sha256=698CL6-wjup2MvU19RxSmQk3gX7aqW_OOpZB7sbz_Xg,34
58
59
  unique_toolkit/short_term_memory/functions.py,sha256=3WiK-xatY5nh4Dr5zlDUye1k3E6kr41RiscwtTplw5k,4484
59
60
  unique_toolkit/short_term_memory/schemas.py,sha256=OhfcXyF6ACdwIXW45sKzjtZX_gkcJs8FEZXcgQTNenw,1406
60
61
  unique_toolkit/short_term_memory/service.py,sha256=vEKFxP1SScPrFniso492fVthWR1sosdFibhiNF3zRvI,8081
61
- unique_toolkit-0.7.14.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
62
- unique_toolkit-0.7.14.dist-info/METADATA,sha256=PE6_LSa3whHD6727vPI4Wu2JEuHpPchisWC1RcuFNMA,22172
63
- unique_toolkit-0.7.14.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
64
- unique_toolkit-0.7.14.dist-info/RECORD,,
62
+ unique_toolkit-0.7.19.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
63
+ unique_toolkit-0.7.19.dist-info/METADATA,sha256=7P27qJw5oRh6q1WUTcF-Qvik67x3u1H7dSJZVZhWInI,22784
64
+ unique_toolkit-0.7.19.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
65
+ unique_toolkit-0.7.19.dist-info/RECORD,,