speedy-utils 1.1.26__tar.gz → 1.1.28__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/PKG-INFO +1 -1
  2. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/examples/temperature_range_example.py +5 -2
  3. speedy_utils-1.1.28/examples/vision_utils_example.py +110 -0
  4. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/notebooks/llm_utils/llm_as_a_judge.ipynb +9 -10
  5. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/pyproject.toml +2 -2
  6. speedy_utils-1.1.28/ruff.toml +81 -0
  7. speedy_utils-1.1.28/scripts/debug_import_time.py +148 -0
  8. speedy_utils-1.1.28/scripts/imports.sh +9 -0
  9. speedy_utils-1.1.28/scripts/lazy_import.py +11 -0
  10. speedy_utils-1.1.28/scripts/test_import_time_vision.py +9 -0
  11. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/__init__.py +16 -4
  12. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/chat_format/display.py +33 -21
  13. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/chat_format/transform.py +17 -19
  14. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/chat_format/utils.py +6 -4
  15. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/group_messages.py +17 -14
  16. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/__init__.py +6 -5
  17. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/async_lm/__init__.py +1 -0
  18. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/async_lm/_utils.py +10 -9
  19. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/async_lm/async_llm_task.py +141 -137
  20. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/async_lm/async_lm.py +48 -42
  21. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/async_lm/async_lm_base.py +59 -60
  22. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/async_lm/lm_specific.py +4 -3
  23. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/base_prompt_builder.py +93 -70
  24. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/llm.py +126 -108
  25. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/llm_signature.py +4 -2
  26. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/lm_base.py +72 -73
  27. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/mixins.py +102 -62
  28. speedy_utils-1.1.28/src/llm_utils/lm/openai_memoize.py +128 -0
  29. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/signature.py +105 -92
  30. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/lm/utils.py +42 -23
  31. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/scripts/vllm_load_balancer.py +23 -30
  32. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/scripts/vllm_serve.py +8 -7
  33. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/vector_cache/__init__.py +9 -3
  34. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/vector_cache/cli.py +1 -1
  35. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/vector_cache/core.py +59 -63
  36. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/vector_cache/types.py +7 -5
  37. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/vector_cache/utils.py +12 -8
  38. speedy_utils-1.1.28/src/speedy_utils/__imports.py +244 -0
  39. speedy_utils-1.1.28/src/speedy_utils/__init__.py +155 -0
  40. speedy_utils-1.1.28/src/speedy_utils/all.py +129 -0
  41. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/clock.py +37 -42
  42. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/function_decorator.py +6 -12
  43. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/logger.py +43 -52
  44. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/notebook_utils.py +13 -21
  45. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/patcher.py +21 -17
  46. speedy_utils-1.1.28/src/speedy_utils/common/report_manager.py +108 -0
  47. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/utils_cache.py +152 -169
  48. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/utils_io.py +137 -103
  49. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/utils_misc.py +15 -21
  50. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/utils_print.py +22 -28
  51. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/multi_worker/process.py +66 -79
  52. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/multi_worker/thread.py +78 -155
  53. speedy_utils-1.1.28/src/speedy_utils/scripts/mpython.py +108 -0
  54. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/scripts/openapi_client_codegen.py +10 -10
  55. speedy_utils-1.1.28/src/vision_utils/README.md +202 -0
  56. speedy_utils-1.1.28/src/vision_utils/__init__.py +5 -0
  57. speedy_utils-1.1.28/src/vision_utils/io_utils.py +470 -0
  58. speedy_utils-1.1.28/src/vision_utils/plot.py +345 -0
  59. speedy_utils-1.1.28/tests/llm_utils/test_llm_mixins.py +193 -0
  60. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test.py +0 -1
  61. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test_logger_format.py +6 -6
  62. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test_memoize_typing.py +21 -17
  63. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test_multithread_error_trace.py +26 -25
  64. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test_process.py +17 -6
  65. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test_process_update.py +4 -4
  66. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test_thread.py +31 -32
  67. speedy_utils-1.1.26/IMPROVEMENTS.md +0 -141
  68. speedy_utils-1.1.26/examples_improved_error_tracing.py +0 -85
  69. speedy_utils-1.1.26/ruff.toml +0 -4
  70. speedy_utils-1.1.26/simple_test_imemoize.py +0 -64
  71. speedy_utils-1.1.26/src/llm_utils/lm/openai_memoize.py +0 -91
  72. speedy_utils-1.1.26/src/speedy_utils/__init__.py +0 -259
  73. speedy_utils-1.1.26/src/speedy_utils/all.py +0 -231
  74. speedy_utils-1.1.26/src/speedy_utils/common/report_manager.py +0 -110
  75. speedy_utils-1.1.26/src/speedy_utils/scripts/mpython.py +0 -106
  76. speedy_utils-1.1.26/tests/llm_utils/test_llm_mixins.py +0 -153
  77. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/.github/copilot-instructions.md +0 -0
  78. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/.github/workflows/publish.yml +0 -0
  79. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/.gitignore +0 -0
  80. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/.pre-commit-config.yaml +0 -0
  81. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/README.md +0 -0
  82. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/bumpversion.sh +0 -0
  83. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/notebooks/test_multi_thread.ipynb +0 -0
  84. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/scripts/deploy.sh +0 -0
  85. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/setup.cfg +0 -0
  86. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/chat_format/__init__.py +10 -10
  87. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/llm_utils/scripts/README.md +0 -0
  88. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/common/__init__.py +0 -0
  89. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/multi_worker/__init__.py +0 -0
  90. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/src/speedy_utils/scripts/__init__.py +0 -0
  91. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/sample_objects.py +0 -0
  92. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test_logger.py +0 -0
  93. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/tests/test_mpython.py +0 -0
  94. {speedy_utils-1.1.26 → speedy_utils-1.1.28}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: speedy-utils
3
- Version: 1.1.26
3
+ Version: 1.1.28
4
4
  Summary: Fast and easy-to-use package for data science
5
5
  Project-URL: Homepage, https://github.com/anhvth/speedy
6
6
  Project-URL: Repository, https://github.com/anhvth/speedy
@@ -1,8 +1,9 @@
1
1
  """Example demonstrating temperature range sampling with LLM."""
2
2
 
3
- from llm_utils import LLM
4
3
  from pydantic import BaseModel
5
4
 
5
+ from llm_utils import LLM
6
+
6
7
 
7
8
  class CreativeStory(BaseModel):
8
9
  """A creative story output."""
@@ -76,7 +77,9 @@ def example_two_step_parsing():
76
77
  print("=" * 60)
77
78
 
78
79
  llm = LLM(
79
- instruction=("Analyze the given text and extract structured information. Think through your analysis first."),
80
+ instruction=(
81
+ "Analyze the given text and extract structured information. Think through your analysis first."
82
+ ),
80
83
  output_model=CreativeStory,
81
84
  )
82
85
 
@@ -0,0 +1,110 @@
1
+ """
2
+ Example usage of vision_utils.plot_images_notebook
3
+ """
4
+
5
+ import numpy as np
6
+
7
+ from vision_utils import plot_images_notebook
8
+
9
+
10
+ def test_auto_grid():
11
+ """Test auto grid layout with sqrt calculation."""
12
+ print("Testing auto grid (sqrt) with 9 images...")
13
+ images = np.random.rand(9, 64, 64, 3)
14
+ plot_images_notebook(images) # Should create 3x3 grid
15
+
16
+
17
+ def test_auto_grid_non_square():
18
+ """Test auto grid with non-perfect square number of images."""
19
+ print("Testing auto grid with 8 images...")
20
+ images = np.random.rand(8, 64, 64, 3)
21
+ plot_images_notebook(images) # Should create 3x3 grid (with 1 empty)
22
+
23
+
24
+ def test_manual_grid():
25
+ """Test with manual grid specification."""
26
+ print("Testing manual 2x4 grid...")
27
+ images = np.random.rand(8, 64, 64, 3)
28
+ plot_images_notebook(images, nrows=2, ncols=4)
29
+
30
+
31
+ def test_many_images():
32
+ """Test with many images (adaptive sizing)."""
33
+ print("Testing with 25 images (adaptive sizing)...")
34
+ images = np.random.rand(25, 64, 64, 3)
35
+ plot_images_notebook(images) # Should create 5x5 grid with smaller cells
36
+
37
+
38
+ def test_numpy_bhwc():
39
+ """Test with numpy array in (B, H, W, C) format."""
40
+ print("Testing numpy array (B, H, W, C) format...")
41
+ images = np.random.rand(8, 64, 64, 3)
42
+ plot_images_notebook(images)
43
+
44
+
45
+ def test_numpy_bchw():
46
+ """Test with numpy array in (B, C, H, W) format."""
47
+ print("Testing numpy array (B, C, H, W) format...")
48
+ images = np.random.rand(8, 3, 64, 64)
49
+ plot_images_notebook(images)
50
+
51
+
52
+ def test_list_of_arrays():
53
+ """Test with list of numpy arrays in different formats."""
54
+ print("Testing list of numpy arrays...")
55
+ images = [
56
+ np.random.rand(64, 64, 3), # (H, W, C)
57
+ np.random.rand(3, 64, 64), # (C, H, W)
58
+ np.random.rand(64, 64), # Grayscale (H, W)
59
+ np.random.rand(64, 64, 1), # Grayscale (H, W, 1)
60
+ ]
61
+ plot_images_notebook(images, titles=["HWC", "CHW", "Gray", "Gray1"])
62
+
63
+
64
+ def test_torch_tensor():
65
+ """Test with PyTorch tensor."""
66
+ try:
67
+ import torch
68
+
69
+ print("Testing PyTorch tensor (B, C, H, W) format...")
70
+ images = torch.rand(8, 3, 64, 64)
71
+ plot_images_notebook(images)
72
+ except ImportError:
73
+ print("PyTorch not installed, skipping torch test")
74
+
75
+
76
+ def test_single_image():
77
+ """Test with single image."""
78
+ print("Testing single image...")
79
+ image = np.random.rand(128, 128, 3)
80
+ plot_images_notebook(image)
81
+
82
+
83
+ def test_custom_dpi():
84
+ """Test with custom DPI for high resolution."""
85
+ print("Testing custom DPI...")
86
+ images = np.random.rand(4, 64, 64, 3)
87
+ plot_images_notebook(images, dpi=100)
88
+
89
+
90
+ if __name__ == "__main__":
91
+ # Run examples
92
+ print("=== Auto Grid Tests ===")
93
+ test_auto_grid()
94
+ test_auto_grid_non_square()
95
+
96
+ print("\n=== Manual Grid Test ===")
97
+ test_manual_grid()
98
+
99
+ print("\n=== Adaptive Sizing Test ===")
100
+ test_many_images()
101
+
102
+ print("\n=== Format Tests ===")
103
+ test_numpy_bhwc()
104
+ test_numpy_bchw()
105
+ test_list_of_arrays()
106
+ test_torch_tensor()
107
+
108
+ print("\n=== Edge Cases ===")
109
+ test_single_image()
110
+ test_custom_dpi()
@@ -26,14 +26,11 @@
26
26
  "outputs": [],
27
27
  "source": [
28
28
  "\n",
29
- "from llm_utils import (\n",
30
- " LLMJudgeBase, \n",
31
- " Signature, \n",
32
- " InputField, \n",
33
- " OutputField\n",
34
- ")\n",
29
+ "import json\n",
30
+ "\n",
35
31
  "from pydantic import BaseModel\n",
36
- "import json"
32
+ "\n",
33
+ "from llm_utils import InputField, LLMJudgeBase, OutputField, Signature"
37
34
  ]
38
35
  },
39
36
  {
@@ -120,7 +117,7 @@
120
117
  "# Define a signature like DSPy (original syntax - no more type warnings!)\n",
121
118
  "class FactJudge(Signature):\n",
122
119
  " \"\"\"Judge if the answer is factually correct based on the context.\"\"\"\n",
123
- " \n",
120
+ "\n",
124
121
  " # No more type warnings with the updated InputField/OutputField!\n",
125
122
  " context: str = InputField(desc=\"Context for the prediction\")\n",
126
123
  " question: str = InputField(desc=\"Question to be answered\")\n",
@@ -194,14 +191,16 @@
194
191
  " HUMAN_REFERENCE: str = InputField(desc=\"A reference human translation, to be used for guidance but not as ground truth.\")\n",
195
192
  " SYSTEM_MESSAGE: str = InputField(desc=\"An automated hint about a possible structural error in the AI translation.\")\n",
196
193
  " GLOSSARIES: str = InputField(desc=\"Optional terminology constraints; may be empty.\")\n",
197
- " \n",
194
+ "\n",
198
195
  " structure_score: int = OutputField(desc=\"Score for structural correctness: 0 (wrong), 1 (partially correct), 2 (correct)\")\n",
199
196
  " glossary_score: int = OutputField(desc=\"Score for glossary adherence: 0 (not followed), 1 (partially followed), 2 (fully followed or no glossary)\")\n",
200
197
  " translation_score: int = OutputField(desc=\"Score for translation quality: 0 (unfaithful), 1 (somewhat faithful), 2 (faithful)\")\n",
201
- " \n",
198
+ "\n",
202
199
  "# --- Updated evaluation prompt ---\n",
203
200
  "\n",
204
201
  "import os\n",
202
+ "\n",
203
+ "\n",
205
204
  "judge = LLMJudgeBase(signature=Sig, client=8000) # vllm is hosted at port 8000\n",
206
205
  "judge = LLMJudgeBase(signature=Sig, model='gpt-4.1-mini', client=None) # use openai's gpt-4.1 model"
207
206
  ]
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "speedy-utils"
3
- version = "1.1.26"
3
+ version = "1.1.28"
4
4
  description = "Fast and easy-to-use package for data science"
5
5
  authors = [{ name = "AnhVTH", email = "anhvth.226@gmail.com" }]
6
6
  readme = "README.md"
@@ -65,7 +65,7 @@ requires = ["hatchling"]
65
65
  build-backend = "hatchling.build"
66
66
 
67
67
  [tool.hatch.build.targets.wheel]
68
- packages = ["src/speedy_utils", "src/llm_utils"]
68
+ packages = ["src/speedy_utils", "src/llm_utils", "src/vision_utils"]
69
69
 
70
70
  [tool.black]
71
71
  line-length = 88
@@ -0,0 +1,81 @@
1
+ # Ruff configuration for speedy_utils
2
+ # See: https://docs.astral.sh/ruff/
3
+
4
+ line-length = 88
5
+ target-version = "py310"
6
+ respect-gitignore = true
7
+
8
+ [lint]
9
+ # Enable specific rule sets
10
+ select = [
11
+ "E", # pycodestyle errors
12
+ "W", # pycodestyle warnings
13
+ "F", # pyflakes
14
+ "I", # isort
15
+ "N", # pep8-naming
16
+ "UP", # pyupgrade
17
+ "B", # flake8-bugbear
18
+ "C4", # flake8-comprehensions
19
+ "SIM", # flake8-simplify
20
+ "PIE", # flake8-pie
21
+ "RET", # flake8-return
22
+ "PTH", # flake8-use-pathlib
23
+ ]
24
+
25
+ # Ignore specific rules
26
+ ignore = [
27
+ "E501", # Line too long (handled by formatter)
28
+ "E731", # Lambda assignment
29
+ "E402", # Module level import not at top
30
+ "F401", # Unused imports (common in __init__.py)
31
+ "F403", # Star imports
32
+ "F405", # Name may be undefined from star imports
33
+ "N802", # Function name should be lowercase
34
+ "N803", # Argument name should be lowercase
35
+ "N806", # Variable in function should be lowercase
36
+ "B008", # Do not perform function calls in argument defaults
37
+ "SIM108", # Use ternary operator (sometimes less readable)
38
+ "RET504", # Unnecessary variable assignment before return
39
+ "PTH123", # open() should be replaced by Path.open()
40
+ "UP035", # f-string support for str.format()
41
+ "PTH", # Pathlib usage warnings (handled by code review)
42
+ "UP", # pyupgrade warnings (handled by code review)
43
+ "N", # pep8-naming warnings (handled by code review)
44
+ ]
45
+
46
+ # Allow autofix for all enabled rules
47
+ fixable = ["ALL"]
48
+ unfixable = []
49
+
50
+ # Exclude patterns
51
+ exclude = [
52
+ ".git",
53
+ ".ruff_cache",
54
+ ".venv",
55
+ "__pycache__",
56
+ "build",
57
+ "dist",
58
+ "*.egg-info",
59
+ ".pytest_cache",
60
+ ".mypy_cache",
61
+ "notebooks",
62
+ ]
63
+
64
+ [lint.per-file-ignores]
65
+ "__init__.py" = ["F401", "F403", "E402"]
66
+ "tests/*" = ["S101"] # Allow assert statements in tests
67
+ "examples/*" = ["T201"] # Allow print statements in examples
68
+
69
+ [lint.isort]
70
+ known-first-party = ["speedy_utils", "llm_utils", "vision_utils"]
71
+ force-single-line = false
72
+ lines-after-imports = 2
73
+
74
+ [lint.mccabe]
75
+ max-complexity = 15
76
+
77
+ [format]
78
+ quote-style = "single"
79
+ indent-style = "space"
80
+ line-ending = "auto"
81
+ skip-magic-trailing-comma = false
@@ -0,0 +1,148 @@
1
+ """Concise import-time helper.
2
+
3
+ Run this script to get a short list of top-level modules that take a
4
+ non-trivial amount of time to import. The script will try CPython's
5
+ ``-X importtime`` and parse it into aggregated per-top-level times.
6
+
7
+ If the interpreter doesn't support ``-X importtime``, the script
8
+ falls back to a small instrumented subprocess that wraps
9
+ ``builtins.__import__`` and reports per-top-level timings.
10
+
11
+ Default threshold: 0.2 seconds. Use --min-sec to change.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import argparse
17
+ import json
18
+ import re
19
+ import subprocess
20
+ import sys
21
+ from typing import Dict, List, Tuple
22
+
23
+
24
+ DEFAULT_MODULES = ['speedy_utils', 'llm_utils', 'vision_utils']
25
+
26
+
27
+ def parse_x_importtime(stderr: str) -> List[Tuple[str, float]]:
28
+ """Parse -X importtime stderr into per-top-level module seconds.
29
+
30
+ We use the first column (self-time) aggregated per top-level module
31
+ as a good approximation of which third-party or heavy packages cost
32
+ time during import.
33
+ """
34
+
35
+ times: Dict[str, float] = {}
36
+ pattern = re.compile(r'^\s*import time:\s*(\d+)\s*\|\s*(\d+)\s*\|\s*(.+)$')
37
+ for line in stderr.splitlines():
38
+ match = pattern.match(line)
39
+ if not match:
40
+ continue
41
+ try:
42
+ self_us = int(match.group(1))
43
+ mod_name = match.group(3).strip()
44
+ except Exception:
45
+ continue
46
+
47
+ top = mod_name.split('.', 1)[0]
48
+ times[top] = times.get(top, 0.0) + (self_us / 1_000_000.0)
49
+
50
+ # return sorted list (desc)
51
+ return sorted(times.items(), key=lambda it: it[1], reverse=True)
52
+
53
+
54
+ def run_importtime(module: str) -> Tuple[bool, str]:
55
+ exe = sys.executable
56
+ cmd = [exe, '-X', 'importtime', '-c', f'from {module} import *']
57
+ p = subprocess.run(cmd, capture_output=True, text=True, check=False)
58
+ ok = p.returncode == 0 and bool(p.stderr.strip())
59
+ out = p.stderr if p.stderr else p.stdout
60
+ return ok, out
61
+
62
+
63
+ def run_timed_import(module: str) -> Tuple[bool, str]:
64
+ code = (
65
+ 'import builtins, time, json\n'
66
+ 'orig = builtins.__import__\n'
67
+ 'times = {}\n'
68
+ 'def timed(name, globals=None, locals=None, fromlist=(), level=0):\n'
69
+ ' start = time.perf_counter()\n'
70
+ ' try:\n'
71
+ ' return orig(name, globals, locals, fromlist, level)\n'
72
+ ' finally:\n'
73
+ ' elapsed = time.perf_counter() - start\n'
74
+ " key = name.split('.',1)[0]\n"
75
+ ' times[key] = times.get(key, 0.0) + elapsed\n'
76
+ 'builtins.__import__ = timed\n'
77
+ f'from {module} import *\n'
78
+ 'builtins.__import__ = orig\n'
79
+ 'print(json.dumps(sorted(times.items(), key=lambda it: it[1], reverse=True)))\n'
80
+ )
81
+
82
+ cmd = [sys.executable, '-c', code]
83
+ p = subprocess.run(cmd, capture_output=True, text=True, check=False)
84
+ if p.returncode != 0:
85
+ return False, (p.stderr or p.stdout).strip()
86
+ return True, p.stdout.strip()
87
+
88
+
89
+ def pretty_print_list(items: List[Tuple[str, float]]) -> None:
90
+ for name, sec in items:
91
+ print(f'{sec:6.3f}s {name}')
92
+
93
+
94
+ def main(argv: List[str] | None = None) -> int:
95
+ parser = argparse.ArgumentParser()
96
+ parser.add_argument('modules', nargs='*', default=DEFAULT_MODULES)
97
+ parser.add_argument(
98
+ '--min-sec', type=float, default=0.2, help='Minimum seconds to show'
99
+ )
100
+ parser.add_argument('--no-x', action='store_true', help="Don't try -X importtime")
101
+ parser.add_argument(
102
+ '--raw', action='store_true', help='Show raw -X output in addition'
103
+ )
104
+ parser.add_argument('-n', '--top', type=int, default=20)
105
+ args = parser.parse_args(argv)
106
+
107
+ for module in args.modules:
108
+ print('=' * 60)
109
+ print(f'Module: {module}')
110
+ print('=' * 60)
111
+
112
+ if not args.no_x:
113
+ ok, out = run_importtime(module)
114
+ if ok:
115
+ parsed = parse_x_importtime(out)
116
+ filtered = [it for it in parsed if it[1] >= args.min_sec]
117
+ if filtered:
118
+ print('Top heavy imports (from -X importtime):')
119
+ pretty_print_list(filtered[: args.top])
120
+ else:
121
+ print(
122
+ f'No top-level modules >= {args.min_sec:.3f}s (from -X importtime)'
123
+ )
124
+ if args.raw:
125
+ print('\nRaw -X importtime output:\n')
126
+ print(out)
127
+ continue
128
+
129
+ # Fallback instrumentation
130
+ ok, out = run_timed_import(module)
131
+ if not ok:
132
+ print('Failed to measure imports:\n', out)
133
+ continue
134
+
135
+ items = json.loads(out)
136
+ filtered = [it for it in items if it[1] >= args.min_sec]
137
+ if not filtered:
138
+ print(f'No imports >= {args.min_sec:.3f}s (fallback)')
139
+ continue
140
+
141
+ print('Top heavy imports (fallback):')
142
+ pretty_print_list(filtered[: args.top])
143
+
144
+ return 0
145
+
146
+
147
+ if __name__ == '__main__':
148
+ raise SystemExit(main())
@@ -0,0 +1,9 @@
1
+ python -X importtime -c "from speedy_utils import *" 2>&1 \
2
+ | awk '
3
+ /import time:/ {
4
+ # second-last column looks like: 0.123>
5
+ raw=$(NF-1)
6
+ gsub(/[>]/,"",raw)
7
+ if (raw > 900) print
8
+ }
9
+ '
@@ -0,0 +1,11 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ import lazy_loader as lazy
4
+
5
+
6
+ torch = lazy.load('torch')
7
+ if TYPE_CHECKING:
8
+ import torch
9
+
10
+ ran = torch.random.rand(2, 2)
11
+ # print(ran)
@@ -0,0 +1,9 @@
1
+ import time
2
+
3
+
4
+ t = time.time()
5
+ from vision_utils import *
6
+
7
+
8
+ load_time = time.time() - t
9
+ print(f'Imported vision_utils in {load_time:.4f} seconds')
@@ -1,8 +1,19 @@
1
+ from llm_utils.lm import (
2
+ LLM,
3
+ AsyncLLMTask,
4
+ AsyncLM,
5
+ Input,
6
+ InputField,
7
+ LLMSignature,
8
+ Output,
9
+ OutputField,
10
+ Signature,
11
+ )
12
+ from llm_utils.lm.base_prompt_builder import BasePromptBuilder
13
+ from llm_utils.lm.lm_base import get_model_name
1
14
  from llm_utils.lm.openai_memoize import MOpenAI
2
- from llm_utils.lm import LLM, AsyncLM, AsyncLLMTask, LLMSignature, Signature, InputField, OutputField, Input, Output
3
15
  from llm_utils.vector_cache import VectorCache
4
- from llm_utils.lm.lm_base import get_model_name
5
- from llm_utils.lm.base_prompt_builder import BasePromptBuilder
16
+
6
17
 
7
18
  LLM_TASK = LLM
8
19
 
@@ -24,13 +35,14 @@ from llm_utils.chat_format import (
24
35
  display_conversations,
25
36
  format_msgs,
26
37
  get_conversation_one_turn,
27
- show_chat_v2,
28
38
  show_chat,
39
+ show_chat_v2,
29
40
  show_string_diff,
30
41
  transform_messages,
31
42
  transform_messages_to_chatml,
32
43
  )
33
44
 
45
+
34
46
  __all__ = [
35
47
  "transform_messages",
36
48
  "transform_messages_to_chatml",
@@ -77,7 +77,7 @@ def show_chat(
77
77
  theme: str = "default",
78
78
  as_markdown: bool = False,
79
79
  as_json: bool = False,
80
- ) -> Optional[str]:
80
+ ) -> str | None:
81
81
  """
82
82
  Display chat messages as HTML.
83
83
 
@@ -168,7 +168,10 @@ def show_chat(
168
168
  content = content.replace("\t", "    ")
169
169
  content = content.replace(" ", "  ")
170
170
  content = (
171
- content.replace("<br>", "TEMP_BR").replace("<", "&lt;").replace(">", "&gt;").replace("TEMP_BR", "<br>")
171
+ content.replace("<br>", "TEMP_BR")
172
+ .replace("<", "&lt;")
173
+ .replace(">", "&gt;")
174
+ .replace("TEMP_BR", "<br>")
172
175
  )
173
176
  if role in color_scheme:
174
177
  background_color = color_scheme[role]["background"]
@@ -239,15 +242,15 @@ def show_chat(
239
242
  f.write(html)
240
243
  if return_html:
241
244
  return html
242
- else:
243
- display(HTML(html))
245
+ display(HTML(html))
246
+ return None
244
247
 
245
248
 
246
249
  def get_conversation_one_turn(
247
- system_msg: Optional[str] = None,
248
- user_msg: Optional[str] = None,
249
- assistant_msg: Optional[str] = None,
250
- assistant_prefix: Optional[str] = None,
250
+ system_msg: str | None = None,
251
+ user_msg: str | None = None,
252
+ assistant_msg: str | None = None,
253
+ assistant_prefix: str | None = None,
251
254
  return_format: str = "chatml",
252
255
  ) -> Any:
253
256
  """
@@ -261,7 +264,9 @@ def get_conversation_one_turn(
261
264
  if assistant_msg is not None:
262
265
  messages.append({"role": "assistant", "content": assistant_msg})
263
266
  if assistant_prefix is not None:
264
- assert return_format != "chatml", 'Change return_format to "text" if you want to use assistant_prefix'
267
+ assert (
268
+ return_format != "chatml"
269
+ ), 'Change return_format to "text" if you want to use assistant_prefix'
265
270
  assert messages[-1]["role"] == "user"
266
271
  from .transform import transform_messages
267
272
 
@@ -270,9 +275,8 @@ def get_conversation_one_turn(
270
275
  msg = str(msg)
271
276
  msg += assistant_prefix
272
277
  return msg
273
- else:
274
- assert return_format in ["chatml"]
275
- return messages
278
+ assert return_format in ["chatml"]
279
+ return messages
276
280
 
277
281
 
278
282
  def highlight_diff_chars(text1: str, text2: str) -> str:
@@ -286,13 +290,21 @@ def highlight_diff_chars(text1: str, text2: str) -> str:
286
290
  html.append(text1[i1:i2])
287
291
  elif tag == "replace":
288
292
  if i1 != i2:
289
- html.append(f'<span style="background-color:#ffd6d6; color:#b20000;">{text1[i1:i2]}</span>')
293
+ html.append(
294
+ f'<span style="background-color:#ffd6d6; color:#b20000;">{text1[i1:i2]}</span>'
295
+ )
290
296
  if j1 != j2:
291
- html.append(f'<span style="background-color:#d6ffd6; color:#006600;">{text2[j1:j2]}</span>')
297
+ html.append(
298
+ f'<span style="background-color:#d6ffd6; color:#006600;">{text2[j1:j2]}</span>'
299
+ )
292
300
  elif tag == "delete":
293
- html.append(f'<span style="background-color:#ffd6d6; color:#b20000;">{text1[i1:i2]}</span>')
301
+ html.append(
302
+ f'<span style="background-color:#ffd6d6; color:#b20000;">{text1[i1:i2]}</span>'
303
+ )
294
304
  elif tag == "insert":
295
- html.append(f'<span style="background-color:#d6ffd6; color:#006600;">{text2[j1:j2]}</span>')
305
+ html.append(
306
+ f'<span style="background-color:#d6ffd6; color:#006600;">{text2[j1:j2]}</span>'
307
+ )
296
308
  return "".join(html)
297
309
 
298
310
 
@@ -321,7 +333,7 @@ def show_chat_v2(messages: list[dict[str, str]]):
321
333
 
322
334
  if is_notebook:
323
335
  # Use HTML display in notebook
324
- from IPython.display import display, HTML
336
+ from IPython.display import HTML, display
325
337
 
326
338
  role_colors = {
327
339
  "system": "red",
@@ -353,9 +365,7 @@ def show_chat_v2(messages: list[dict[str, str]]):
353
365
  html += f"<div style='color:{color}'><strong>{label}</strong><br>{content}</div>"
354
366
  # Add separator except after last message
355
367
  if i < len(messages) - 1:
356
- html += (
357
- "<div style='color:#888; margin:0.5em 0;'>───────────────────────────────────────────────────</div>"
358
- )
368
+ html += "<div style='color:#888; margin:0.5em 0;'>───────────────────────────────────────────────────</div>"
359
369
  html += "</div>"
360
370
 
361
371
  display(HTML(html))
@@ -385,7 +395,9 @@ def show_chat_v2(messages: list[dict[str, str]]):
385
395
  print(f"{color}{content}{reset}")
386
396
  # Add separator except after last message
387
397
  if i < len(messages) - 1:
388
- print(f"{separator_color}─────────────────────────────────────────────────────────{reset}")
398
+ print(
399
+ f"{separator_color}─────────────────────────────────────────────────────────{reset}"
400
+ )
389
401
 
390
402
 
391
403
  def display_conversations(data1: Any, data2: Any, theme: str = "light") -> None: