speedy-utils 1.1.0__tar.gz → 1.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/PKG-INFO +1 -1
  2. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/pyproject.toml +1 -1
  3. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/lm/async_lm.py +16 -9
  4. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/README.md +0 -0
  5. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/__init__.py +0 -0
  6. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/chat_format/__init__.py +0 -0
  7. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/chat_format/display.py +0 -0
  8. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/chat_format/transform.py +0 -0
  9. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/chat_format/utils.py +0 -0
  10. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/group_messages.py +0 -0
  11. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/lm/__init__.py +0 -0
  12. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/lm/chat_html.py +0 -0
  13. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/lm/lm_json.py +0 -0
  14. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/lm/sync_lm.py +0 -0
  15. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/lm/utils.py +0 -0
  16. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/scripts/README.md +0 -0
  17. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/scripts/vllm_load_balancer.py +0 -0
  18. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/llm_utils/scripts/vllm_serve.py +0 -0
  19. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/__init__.py +0 -0
  20. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/all.py +0 -0
  21. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/__init__.py +0 -0
  22. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/clock.py +0 -0
  23. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/function_decorator.py +0 -0
  24. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/logger.py +0 -0
  25. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/notebook_utils.py +0 -0
  26. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/report_manager.py +0 -0
  27. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/utils_cache.py +0 -0
  28. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/utils_io.py +0 -0
  29. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/utils_misc.py +0 -0
  30. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/common/utils_print.py +0 -0
  31. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/multi_worker/__init__.py +0 -0
  32. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/multi_worker/process.py +0 -0
  33. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/multi_worker/thread.py +0 -0
  34. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/scripts/__init__.py +0 -0
  35. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/scripts/mpython.py +0 -0
  36. {speedy_utils-1.1.0 → speedy_utils-1.1.2}/src/speedy_utils/scripts/openapi_client_codegen.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: speedy-utils
3
- Version: 1.1.0
3
+ Version: 1.1.2
4
4
  Summary: Fast and easy-to-use package for data science
5
5
  Author: AnhVTH
6
6
  Author-email: anhvth.226@gmail.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "speedy-utils"
3
- version = "1.1.0"
3
+ version = "1.1.2"
4
4
  description = "Fast and easy-to-use package for data science"
5
5
  authors = ["AnhVTH <anhvth.226@gmail.com>"]
6
6
  readme = "README.md"
@@ -78,7 +78,7 @@ import hashlib
78
78
  import json
79
79
  import os
80
80
  from abc import ABC
81
- from functools import lru_cache
81
+ from functools import cache, lru_cache
82
82
  from typing import (
83
83
  Any,
84
84
  Dict,
@@ -146,10 +146,13 @@ def _yellow(t):
146
146
  return _color(33, t)
147
147
 
148
148
 
149
- class ParsedOutput(TypedDict):
149
+
150
+ TParsed = TypeVar('TParsed', bound=BaseModel)
151
+
152
+ class ParsedOutput(TypedDict, Generic[TParsed]):
150
153
  messages: List
151
154
  completion: Any
152
- parsed: BaseModel
155
+ parsed: TParsed
153
156
 
154
157
 
155
158
  class AsyncLM:
@@ -460,7 +463,7 @@ class AsyncLM:
460
463
  # ------------------------------------------------------------------ #
461
464
  async def parse(
462
465
  self,
463
- response_model: Type[BaseModel],
466
+ response_model: Type[TParsed],
464
467
  instruction: Optional[str] = None,
465
468
  prompt: Optional[str] = None,
466
469
  messages: Optional[RawMsgs] = None,
@@ -470,7 +473,7 @@ class AsyncLM:
470
473
  max_tokens: Optional[int] = None,
471
474
  cache: Optional[bool] = True,
472
475
  **kwargs,
473
- ) -> ParsedOutput: # -> dict[str, Any]:
476
+ ) -> ParsedOutput[TParsed]:
474
477
  """Parse response using guided JSON generation."""
475
478
  if messages is None:
476
479
  assert instruction is not None, "Instruction must be provided."
@@ -513,6 +516,7 @@ class AsyncLM:
513
516
 
514
517
  use_cache = self.do_cache if cache is None else cache
515
518
  cache_key = None
519
+ completion = None
516
520
  if use_cache:
517
521
  cache_data = {
518
522
  "messages": messages,
@@ -522,7 +526,7 @@ class AsyncLM:
522
526
  }
523
527
  cache_key = self._cache_key(cache_data, {}, response_model)
524
528
  completion = self._load_cache(cache_key) # dict
525
- else:
529
+ if not completion:
526
530
  completion = await self.client.chat.completions.create(
527
531
  model=self.model, # type: ignore
528
532
  messages=messages, # type: ignore
@@ -532,10 +536,12 @@ class AsyncLM:
532
536
  completion = completion.model_dump()
533
537
  if cache_key:
534
538
  self._dump_cache(cache_key, completion)
535
-
539
+ assert isinstance(completion, dict), (
540
+ "Completion must be a dictionary with OpenAI response format."
541
+ )
536
542
  self.last_log = [prompt, messages, completion]
537
543
 
538
- output = self._parse_complete_output(completion, response_model)
544
+ output = cast(TParsed, self._parse_complete_output(completion, response_model))
539
545
  full_messages = messages + [completion]
540
546
  return ParsedOutput(
541
547
  messages=full_messages,
@@ -894,6 +900,7 @@ class AsyncLLMTask(ABC, Generic[InputModelType, OutputModelType]):
894
900
  temperature: float = 0.6
895
901
  think: bool = False
896
902
  add_json_schema: bool = False
903
+ cache: bool = False
897
904
 
898
905
  async def __call__(
899
906
  self,
@@ -942,7 +949,7 @@ class AsyncLLMTask(ABC, Generic[InputModelType, OutputModelType]):
942
949
  temperature=temperature or self.temperature,
943
950
  think=self.think,
944
951
  add_json_schema_to_instruction=self.add_json_schema,
945
- cache=cache,
952
+ cache=self.cache or cache,
946
953
  )
947
954
 
948
955
  return (
File without changes