speedy-utils 1.1.27__py3-none-any.whl → 1.1.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_utils/__init__.py +16 -4
- llm_utils/chat_format/__init__.py +10 -10
- llm_utils/chat_format/display.py +33 -21
- llm_utils/chat_format/transform.py +17 -19
- llm_utils/chat_format/utils.py +6 -4
- llm_utils/group_messages.py +17 -14
- llm_utils/lm/__init__.py +6 -5
- llm_utils/lm/async_lm/__init__.py +1 -0
- llm_utils/lm/async_lm/_utils.py +10 -9
- llm_utils/lm/async_lm/async_llm_task.py +141 -137
- llm_utils/lm/async_lm/async_lm.py +48 -42
- llm_utils/lm/async_lm/async_lm_base.py +59 -60
- llm_utils/lm/async_lm/lm_specific.py +4 -3
- llm_utils/lm/base_prompt_builder.py +93 -70
- llm_utils/lm/llm.py +126 -108
- llm_utils/lm/llm_signature.py +4 -2
- llm_utils/lm/lm_base.py +72 -73
- llm_utils/lm/mixins.py +102 -62
- llm_utils/lm/openai_memoize.py +124 -87
- llm_utils/lm/signature.py +105 -92
- llm_utils/lm/utils.py +42 -23
- llm_utils/scripts/vllm_load_balancer.py +23 -30
- llm_utils/scripts/vllm_serve.py +8 -7
- llm_utils/vector_cache/__init__.py +9 -3
- llm_utils/vector_cache/cli.py +1 -1
- llm_utils/vector_cache/core.py +59 -63
- llm_utils/vector_cache/types.py +7 -5
- llm_utils/vector_cache/utils.py +12 -8
- speedy_utils/__imports.py +244 -0
- speedy_utils/__init__.py +90 -194
- speedy_utils/all.py +125 -227
- speedy_utils/common/clock.py +37 -42
- speedy_utils/common/function_decorator.py +6 -12
- speedy_utils/common/logger.py +43 -52
- speedy_utils/common/notebook_utils.py +13 -21
- speedy_utils/common/patcher.py +21 -17
- speedy_utils/common/report_manager.py +42 -44
- speedy_utils/common/utils_cache.py +152 -169
- speedy_utils/common/utils_io.py +137 -103
- speedy_utils/common/utils_misc.py +15 -21
- speedy_utils/common/utils_print.py +22 -28
- speedy_utils/multi_worker/process.py +66 -79
- speedy_utils/multi_worker/thread.py +78 -155
- speedy_utils/scripts/mpython.py +38 -36
- speedy_utils/scripts/openapi_client_codegen.py +10 -10
- {speedy_utils-1.1.27.dist-info → speedy_utils-1.1.29.dist-info}/METADATA +1 -1
- speedy_utils-1.1.29.dist-info/RECORD +57 -0
- vision_utils/README.md +202 -0
- vision_utils/__init__.py +4 -0
- vision_utils/io_utils.py +735 -0
- vision_utils/plot.py +345 -0
- speedy_utils-1.1.27.dist-info/RECORD +0 -52
- {speedy_utils-1.1.27.dist-info → speedy_utils-1.1.29.dist-info}/WHEEL +0 -0
- {speedy_utils-1.1.27.dist-info → speedy_utils-1.1.29.dist-info}/entry_points.txt +0 -0
vision_utils/plot.py
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
|
|
3
|
+
|
|
4
|
+
from speedy_utils.__imports import np, plt
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
|
|
9
|
+
import lazy_loader as lazy
|
|
10
|
+
import matplotlib.pyplot as plt
|
|
11
|
+
import numpy as np
|
|
12
|
+
import torch
|
|
13
|
+
|
|
14
|
+
from .io_utils import read_images
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _check_torch_available():
|
|
18
|
+
"""Check if torch is available without importing at module level."""
|
|
19
|
+
try:
|
|
20
|
+
import torch
|
|
21
|
+
|
|
22
|
+
return True, torch
|
|
23
|
+
except ImportError:
|
|
24
|
+
return False, None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _check_matplotlib_available():
|
|
28
|
+
"""Check if matplotlib is available without importing at module level."""
|
|
29
|
+
try:
|
|
30
|
+
import matplotlib.pyplot as plt
|
|
31
|
+
|
|
32
|
+
return True, plt
|
|
33
|
+
except ImportError:
|
|
34
|
+
return False, None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _to_numpy(img: Any) -> np.ndarray:
|
|
38
|
+
"""Convert image to numpy array."""
|
|
39
|
+
torch_available, torch = _check_torch_available()
|
|
40
|
+
if torch_available and torch is not None and isinstance(img, torch.Tensor):
|
|
41
|
+
return img.detach().cpu().numpy()
|
|
42
|
+
if isinstance(img, np.ndarray):
|
|
43
|
+
return img
|
|
44
|
+
raise TypeError(
|
|
45
|
+
f'Unsupported image type: {type(img)}. Expected numpy.ndarray or torch.Tensor'
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _normalize_image_format(img: np.ndarray) -> np.ndarray:
|
|
50
|
+
"""
|
|
51
|
+
Normalize image to (H, W, C) format.
|
|
52
|
+
|
|
53
|
+
Detects and converts from:
|
|
54
|
+
- (C, H, W) where C is 1 or 3
|
|
55
|
+
- (H, W) grayscale
|
|
56
|
+
- (H, W, C) already correct
|
|
57
|
+
"""
|
|
58
|
+
if img.ndim == 2:
|
|
59
|
+
# Grayscale (H, W) -> (H, W, 1)
|
|
60
|
+
return img[:, :, np.newaxis]
|
|
61
|
+
if img.ndim == 3:
|
|
62
|
+
# Check if it's (C, H, W) format
|
|
63
|
+
if img.shape[0] in [1, 3] and img.shape[0] < min(img.shape[1:]):
|
|
64
|
+
# Likely (C, H, W) -> transpose to (H, W, C)
|
|
65
|
+
return np.transpose(img, (1, 2, 0))
|
|
66
|
+
# Already (H, W, C)
|
|
67
|
+
return img
|
|
68
|
+
raise ValueError(f'Invalid image shape: {img.shape}. Expected 2D or 3D array')
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _normalize_batch(
|
|
72
|
+
images: Union[np.ndarray, List[np.ndarray], List[Any], Any],
|
|
73
|
+
) -> List[np.ndarray]:
|
|
74
|
+
"""
|
|
75
|
+
Normalize batch of images to list of (H, W, C) numpy arrays.
|
|
76
|
+
|
|
77
|
+
Handles:
|
|
78
|
+
- List of numpy arrays or torch tensors
|
|
79
|
+
- List of file paths (strings or Path objects)
|
|
80
|
+
- Single numpy array of shape (B, H, W, C) or (B, C, H, W)
|
|
81
|
+
- Single torch tensor of shape (B, H, W, C) or (B, C, H, W)
|
|
82
|
+
"""
|
|
83
|
+
# Convert to numpy if torch tensor
|
|
84
|
+
torch_available, torch = _check_torch_available()
|
|
85
|
+
if torch_available and torch is not None and isinstance(images, torch.Tensor):
|
|
86
|
+
images = images.detach().cpu().numpy()
|
|
87
|
+
|
|
88
|
+
# Handle single numpy array with batch dimension
|
|
89
|
+
if isinstance(images, np.ndarray):
|
|
90
|
+
if images.ndim == 4:
|
|
91
|
+
# (B, H, W, C) or (B, C, H, W)
|
|
92
|
+
# Check if it's (B, C, H, W) format
|
|
93
|
+
if images.shape[1] in [1, 3] and images.shape[1] < min(images.shape[2:]):
|
|
94
|
+
# (B, C, H, W) -> transpose to (B, H, W, C)
|
|
95
|
+
images = np.transpose(images, (0, 2, 3, 1))
|
|
96
|
+
# Convert to list of images
|
|
97
|
+
images = [images[i] for i in range(images.shape[0])]
|
|
98
|
+
elif images.ndim == 3:
|
|
99
|
+
# Single image (H, W, C) or (C, H, W)
|
|
100
|
+
images = [_normalize_image_format(images)]
|
|
101
|
+
elif images.ndim == 2:
|
|
102
|
+
# Single grayscale image (H, W)
|
|
103
|
+
images = [images[:, :, np.newaxis]]
|
|
104
|
+
else:
|
|
105
|
+
raise ValueError(
|
|
106
|
+
f'Invalid array shape: {images.shape}. Expected 2D, 3D, or 4D array'
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
# Handle list of images
|
|
110
|
+
if isinstance(images, list):
|
|
111
|
+
path_indices = [
|
|
112
|
+
idx for idx, img in enumerate(images) if isinstance(img, (str, Path))
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
# Bulk load any file paths while preserving order
|
|
116
|
+
loaded_paths = {}
|
|
117
|
+
if path_indices:
|
|
118
|
+
loaded_arrays = read_images([str(images[idx]) for idx in path_indices])
|
|
119
|
+
|
|
120
|
+
if len(loaded_arrays) != len(path_indices):
|
|
121
|
+
raise ValueError(
|
|
122
|
+
'Number of loaded images does not match number of paths provided.'
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
for idx, arr in zip(path_indices, loaded_arrays, strict=False):
|
|
126
|
+
loaded_paths[idx] = arr
|
|
127
|
+
|
|
128
|
+
normalized = []
|
|
129
|
+
for idx, img in enumerate(images):
|
|
130
|
+
if idx in loaded_paths:
|
|
131
|
+
img_np = loaded_paths[idx]
|
|
132
|
+
else:
|
|
133
|
+
img_np = _to_numpy(img)
|
|
134
|
+
img_normalized = _normalize_image_format(img_np)
|
|
135
|
+
normalized.append(img_normalized)
|
|
136
|
+
return normalized
|
|
137
|
+
|
|
138
|
+
raise TypeError(
|
|
139
|
+
f'Unsupported images type: {type(images)}. '
|
|
140
|
+
'Expected list, numpy.ndarray, or torch.Tensor'
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def plot_images_notebook(
|
|
145
|
+
images: Union[np.ndarray, List[np.ndarray], List[Any], Any],
|
|
146
|
+
nrows: Optional[int] = None,
|
|
147
|
+
ncols: Optional[int] = None,
|
|
148
|
+
figsize: Optional[Tuple[float, float]] = None,
|
|
149
|
+
titles: Optional[List[str]] = None,
|
|
150
|
+
cmap: Optional[str] = None,
|
|
151
|
+
dpi: int = 300,
|
|
152
|
+
max_figure_width: float = 15.0,
|
|
153
|
+
max_figure_height: float = 20.0,
|
|
154
|
+
):
|
|
155
|
+
"""
|
|
156
|
+
Plot a batch of images in a notebook with smart grid layout.
|
|
157
|
+
Handles images of different shapes gracefully.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
images: Images to plot. Can be:
|
|
161
|
+
- List of numpy arrays or torch tensors (can have different shapes)
|
|
162
|
+
- List of image file paths (strings or Path objects)
|
|
163
|
+
- Numpy array of shape (B, H, W, C) or (B, C, H, W)
|
|
164
|
+
- Torch tensor of shape (B, H, W, C) or (B, C, H, W)
|
|
165
|
+
Each image can be (H, W), (H, W, C), (C, H, W) format
|
|
166
|
+
nrows: Number of rows in grid. If None, auto-calculated from sqrt
|
|
167
|
+
ncols: Number of columns in grid. If None, auto-calculated from sqrt
|
|
168
|
+
figsize: Figure size (width, height). If None, auto-calculated
|
|
169
|
+
titles: List of titles for each image
|
|
170
|
+
cmap: Colormap for grayscale images (default: 'gray')
|
|
171
|
+
dpi: Dots per inch for the figure (default: 72)
|
|
172
|
+
max_figure_width: Maximum figure width in inches (default: 15)
|
|
173
|
+
max_figure_height: Maximum figure height in inches (default: 20)
|
|
174
|
+
|
|
175
|
+
Example:
|
|
176
|
+
>>> import numpy as np
|
|
177
|
+
>>> # Auto grid layout with sqrt
|
|
178
|
+
>>> images = np.random.rand(9, 64, 64, 3)
|
|
179
|
+
>>> plot_images_notebook(images) # 3x3 grid
|
|
180
|
+
|
|
181
|
+
>>> # Custom grid
|
|
182
|
+
>>> images = np.random.rand(8, 64, 64, 3)
|
|
183
|
+
>>> plot_images_notebook(images, nrows=2, ncols=4)
|
|
184
|
+
|
|
185
|
+
>>> # PyTorch tensor in (B, C, H, W) format
|
|
186
|
+
>>> import torch
|
|
187
|
+
>>> images = torch.rand(8, 3, 64, 64)
|
|
188
|
+
>>> plot_images_notebook(images)
|
|
189
|
+
|
|
190
|
+
>>> # List of images with different formats and shapes
|
|
191
|
+
>>> images = [
|
|
192
|
+
... np.random.rand(64, 64, 3), # (H, W, C)
|
|
193
|
+
... np.random.rand(3, 128, 128), # (C, H, W) - different size
|
|
194
|
+
... torch.rand(32, 48), # Grayscale - different size
|
|
195
|
+
... np.random.rand(100, 200, 3), # Different aspect ratio
|
|
196
|
+
... ]
|
|
197
|
+
>>> plot_images_notebook(images, ncols=2)
|
|
198
|
+
"""
|
|
199
|
+
# Check matplotlib availability
|
|
200
|
+
mpl_available, plt = _check_matplotlib_available()
|
|
201
|
+
if not mpl_available:
|
|
202
|
+
raise ImportError("matplotlib is required for plotting. Install it with: pip install matplotlib")
|
|
203
|
+
|
|
204
|
+
# Normalize all images to list of (H, W, C) numpy arrays
|
|
205
|
+
images_list = _normalize_batch(images)
|
|
206
|
+
|
|
207
|
+
n_images = len(images_list)
|
|
208
|
+
|
|
209
|
+
# Smart grid layout calculation
|
|
210
|
+
if nrows is None and ncols is None:
|
|
211
|
+
# Use sqrt to get roughly square grid
|
|
212
|
+
ncols_calc = int(np.ceil(np.sqrt(n_images)))
|
|
213
|
+
nrows_calc = int(np.ceil(n_images / ncols_calc))
|
|
214
|
+
nrows = nrows_calc
|
|
215
|
+
ncols = ncols_calc
|
|
216
|
+
elif nrows is None:
|
|
217
|
+
# Calculate rows from columns
|
|
218
|
+
assert ncols is not None
|
|
219
|
+
nrows = int(np.ceil(n_images / ncols))
|
|
220
|
+
elif ncols is None:
|
|
221
|
+
# Calculate columns from rows
|
|
222
|
+
assert nrows is not None
|
|
223
|
+
ncols = int(np.ceil(n_images / nrows))
|
|
224
|
+
|
|
225
|
+
# At this point, both nrows and ncols are guaranteed to be int
|
|
226
|
+
assert nrows is not None and ncols is not None
|
|
227
|
+
|
|
228
|
+
# Auto-calculate figure size if not provided
|
|
229
|
+
if figsize is None:
|
|
230
|
+
# Calculate based on average aspect ratio across all images
|
|
231
|
+
avg_aspect_ratio = 0.0
|
|
232
|
+
for img in images_list:
|
|
233
|
+
img_height, img_width = img.shape[:2]
|
|
234
|
+
avg_aspect_ratio += img_width / img_height
|
|
235
|
+
avg_aspect_ratio /= n_images
|
|
236
|
+
|
|
237
|
+
# Target cell size in inches (smaller for many images)
|
|
238
|
+
if n_images <= 4:
|
|
239
|
+
cell_width = 4.0
|
|
240
|
+
elif n_images <= 9:
|
|
241
|
+
cell_width = 3.0
|
|
242
|
+
elif n_images <= 16:
|
|
243
|
+
cell_width = 2.5
|
|
244
|
+
else:
|
|
245
|
+
cell_width = 2.0
|
|
246
|
+
|
|
247
|
+
cell_height = cell_width / avg_aspect_ratio
|
|
248
|
+
|
|
249
|
+
fig_width = ncols * cell_width
|
|
250
|
+
fig_height = nrows * cell_height
|
|
251
|
+
|
|
252
|
+
# Constrain to max sizes to prevent notebook breaking
|
|
253
|
+
if fig_width > max_figure_width:
|
|
254
|
+
scale = max_figure_width / fig_width
|
|
255
|
+
fig_width = max_figure_width
|
|
256
|
+
fig_height *= scale
|
|
257
|
+
|
|
258
|
+
if fig_height > max_figure_height:
|
|
259
|
+
scale = max_figure_height / fig_height
|
|
260
|
+
fig_height = max_figure_height
|
|
261
|
+
fig_width *= scale
|
|
262
|
+
|
|
263
|
+
figsize = (fig_width, fig_height)
|
|
264
|
+
|
|
265
|
+
fig, axes = plt.subplots(nrows, ncols, figsize=figsize, dpi=dpi, squeeze=False)
|
|
266
|
+
|
|
267
|
+
# Flatten axes for easier iteration
|
|
268
|
+
axes_flat = axes.flatten()
|
|
269
|
+
|
|
270
|
+
for idx, (ax, img) in enumerate(zip(axes_flat, images_list, strict=False)):
|
|
271
|
+
# Determine if grayscale
|
|
272
|
+
is_grayscale = img.shape[-1] == 1
|
|
273
|
+
|
|
274
|
+
if is_grayscale:
|
|
275
|
+
ax.imshow(img[:, :, 0], cmap=cmap or 'gray', aspect='auto')
|
|
276
|
+
else:
|
|
277
|
+
# Clip values to [0, 1] if they look like normalized images
|
|
278
|
+
if img.max() <= 1.0:
|
|
279
|
+
img_display = np.clip(img, 0, 1)
|
|
280
|
+
else:
|
|
281
|
+
# Assume [0, 255] range
|
|
282
|
+
img_display = np.clip(img / 255.0, 0, 1)
|
|
283
|
+
ax.imshow(img_display, aspect='auto')
|
|
284
|
+
|
|
285
|
+
ax.axis('off')
|
|
286
|
+
|
|
287
|
+
if titles and idx < len(titles):
|
|
288
|
+
ax.set_title(titles[idx], fontsize=8 if n_images > 9 else 10)
|
|
289
|
+
|
|
290
|
+
# Hide unused subplots
|
|
291
|
+
for idx in range(n_images, len(axes_flat)):
|
|
292
|
+
axes_flat[idx].axis('off')
|
|
293
|
+
|
|
294
|
+
plt.tight_layout()
|
|
295
|
+
plt.show()
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def visualize_tensor(img_tensor, mode='hwc', normalize=True, max_cols=8):
|
|
299
|
+
"""
|
|
300
|
+
Visualize a tensor as an image or grid.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
img_tensor: torch.Tensor, shape (C,H,W), (H,W,C), or (B,C,H,W)
|
|
304
|
+
mode: "hwc", "chw", or "bchw"
|
|
305
|
+
normalize: scale float tensor to 0–255 uint8 for display
|
|
306
|
+
max_cols: max columns when tiling a batch
|
|
307
|
+
"""
|
|
308
|
+
# Check matplotlib availability
|
|
309
|
+
mpl_available, plt = _check_matplotlib_available()
|
|
310
|
+
if not mpl_available:
|
|
311
|
+
raise ImportError("matplotlib is required for plotting. Install it with: pip install matplotlib")
|
|
312
|
+
|
|
313
|
+
if mode == 'chw':
|
|
314
|
+
img_tensor = img_tensor.permute(1, 2, 0)
|
|
315
|
+
imgs = [img_tensor]
|
|
316
|
+
elif mode == 'bchw':
|
|
317
|
+
b, c, h, w = img_tensor.shape
|
|
318
|
+
imgs = [img_tensor[i].permute(1, 2, 0) for i in range(b)]
|
|
319
|
+
elif mode == 'hwc':
|
|
320
|
+
imgs = [img_tensor]
|
|
321
|
+
else:
|
|
322
|
+
raise ValueError("mode must be 'hwc', 'chw', or 'bchw'")
|
|
323
|
+
|
|
324
|
+
# normalize each image
|
|
325
|
+
processed = []
|
|
326
|
+
for img in imgs:
|
|
327
|
+
img = img.detach().cpu().numpy()
|
|
328
|
+
if normalize:
|
|
329
|
+
img = (img - img.min()) / (img.max() - img.min() + 1e-8)
|
|
330
|
+
img = (img * 255).astype(np.uint8)
|
|
331
|
+
processed.append(img)
|
|
332
|
+
|
|
333
|
+
if len(processed) == 1:
|
|
334
|
+
plt.imshow(processed[0])
|
|
335
|
+
else:
|
|
336
|
+
cols = min(max_cols, len(processed))
|
|
337
|
+
rows = int(np.ceil(len(processed) / cols))
|
|
338
|
+
fig, axes = plt.subplots(rows, cols, figsize=(cols * 2, rows * 2))
|
|
339
|
+
axes = np.atleast_2d(axes)
|
|
340
|
+
for ax, img in zip(axes.flat, processed, strict=False):
|
|
341
|
+
ax.imshow(img)
|
|
342
|
+
ax.axis('off')
|
|
343
|
+
for ax in axes.flat[len(processed) :]:
|
|
344
|
+
ax.axis('off')
|
|
345
|
+
plt.show()
|
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
llm_utils/__init__.py,sha256=pbnOQddU5KnhP8uqMqN9E87BeDeCxFrgta2m2P89LmM,1591
|
|
2
|
-
llm_utils/group_messages.py,sha256=Oe2tlhg-zRodG1-hodYebddrR77j9UdE05LzJw0EvYI,3622
|
|
3
|
-
llm_utils/chat_format/__init__.py,sha256=MCNT8o-BZWmoOFE5VLyhJJOqHg8lJGqHXEKSXU08fK0,775
|
|
4
|
-
llm_utils/chat_format/display.py,sha256=HiAOAC8FY7956gNuwE7rxii1MCCebn0avbXi1iIcDSc,17178
|
|
5
|
-
llm_utils/chat_format/transform.py,sha256=eU0c3PdAHCNLuGP1UqPwln0B34Lv3bt_uV9v9BrlCN4,5402
|
|
6
|
-
llm_utils/chat_format/utils.py,sha256=xTxN4HrLHcRO2PfCTR43nH1M5zCa7v0kTTdzAcGkZg0,1229
|
|
7
|
-
llm_utils/lm/__init__.py,sha256=FBe8wVNWDMpvJ2kQYedJ3HH5L2BCAZBQVE0zEjND0Vo,729
|
|
8
|
-
llm_utils/lm/base_prompt_builder.py,sha256=OLqyxbA8QeYIVFzB9EqxUiE_P2p4_MD_Lq4WSwxFtKU,12136
|
|
9
|
-
llm_utils/lm/llm.py,sha256=uk45JhVcWDMaqezn9Yn_K5hehFSmQ4txU901fn_PcQg,16262
|
|
10
|
-
llm_utils/lm/llm_signature.py,sha256=SP72cWXaVGcZs3m2V361DcLk_St7aYJamNapUiFBB6Q,1242
|
|
11
|
-
llm_utils/lm/lm_base.py,sha256=pqbHZOdR7yUMpvwt8uBG1dZnt76SY_Wk8BkXQQ-mpWs,9557
|
|
12
|
-
llm_utils/lm/mixins.py,sha256=Sn5KyPKGCT_HVJmmosmy3XSlZ0_k5Kds0VvSJqeUDpI,13695
|
|
13
|
-
llm_utils/lm/openai_memoize.py,sha256=PDs3YCXKgHXaHlegkhouzPtf2Gom_o7pvzChCT-NQyQ,3870
|
|
14
|
-
llm_utils/lm/signature.py,sha256=16QOHnGc-p7H8rR3j1dPg8AokdV_rEGUYCGGkIHIghE,10240
|
|
15
|
-
llm_utils/lm/utils.py,sha256=oiJ50b8WV6oktnW4BByr1gRaGc55VJeF3IyhHqoofp4,12193
|
|
16
|
-
llm_utils/lm/async_lm/__init__.py,sha256=PUBbCuf5u6-0GBUu-2PI6YAguzsyXj-LPkU6vccqT6E,121
|
|
17
|
-
llm_utils/lm/async_lm/_utils.py,sha256=P1-pUDf_0pDmo8WTIi43t5ARlyGA1RIJfpAhz-gfA5g,6105
|
|
18
|
-
llm_utils/lm/async_lm/async_llm_task.py,sha256=-BVOk18ZD8eC2obTLgiPq39f2PP3cji17Ku-Gb7c7Xo,18683
|
|
19
|
-
llm_utils/lm/async_lm/async_lm.py,sha256=e3o9cyMbkVz_jQDTjJv2ybET_5mY012zdZGjNwi4Qk4,13719
|
|
20
|
-
llm_utils/lm/async_lm/async_lm_base.py,sha256=iJgtzI6pVJzWtlXGqVLwgCIb-FzZAa3E5xW8yhyHUmM,8426
|
|
21
|
-
llm_utils/lm/async_lm/lm_specific.py,sha256=KmqdCm3SJ5MqN-dRJd6S5tq5-ve1X2eNWf2CMFtc_3s,3926
|
|
22
|
-
llm_utils/scripts/README.md,sha256=yuOLnLa2od2jp4wVy3rV0rESeiV3o8zol5MNMsZx0DY,999
|
|
23
|
-
llm_utils/scripts/vllm_load_balancer.py,sha256=TT5Ypq7gUcl52gRFp--ORFFjzhfGlcaX2rkRv8NxlxU,37259
|
|
24
|
-
llm_utils/scripts/vllm_serve.py,sha256=gJ0-y4kybMfSt8qzye1pJqGMY3x9JLRi6Tu7RjJMnss,14771
|
|
25
|
-
llm_utils/vector_cache/__init__.py,sha256=i1KQuC4OhPewYpFl9X6HlWFBuASCTx2qgGizhpZhmn0,862
|
|
26
|
-
llm_utils/vector_cache/cli.py,sha256=DMXTj8nZ2_LRjprbYPb4uzq04qZtOfBbmblmaqDcCuM,6251
|
|
27
|
-
llm_utils/vector_cache/core.py,sha256=J8ocRX9sBfzboQkf5vFF2cx0SK-nftmKWJUa91WUBy8,31134
|
|
28
|
-
llm_utils/vector_cache/types.py,sha256=ru8qmUZ8_lNd3_oYpjCMtpXTsqmwsSBe56Z4hTWm3xI,435
|
|
29
|
-
llm_utils/vector_cache/utils.py,sha256=dwbbXlRrARrpmS4YqSlYQqrTURg0UWe8XvaAWcX05MM,1458
|
|
30
|
-
speedy_utils/__init__.py,sha256=wPz1MNAicV7skqqZloUFt5QrJcAhxtPQ4jFXk2lz6YA,6190
|
|
31
|
-
speedy_utils/all.py,sha256=gXXRlBLvU8AON7XqO6iFQ8LCIQEIcP_2CDumd_U1ppI,5171
|
|
32
|
-
speedy_utils/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
33
|
-
speedy_utils/common/clock.py,sha256=3n4FkCW0dz46O8By09V5Pve1DSMgpLDRbWEVRryryeQ,7423
|
|
34
|
-
speedy_utils/common/function_decorator.py,sha256=BspJ0YuGL6elS7lWBAgELZ-sCfED_1N2P5fgH-fCRUQ,2132
|
|
35
|
-
speedy_utils/common/logger.py,sha256=a2iZx0eWyfi2-2X_H2QmfuA3tfR7_XSM7Nd0GdUnUOs,6435
|
|
36
|
-
speedy_utils/common/notebook_utils.py,sha256=-97kehJ_Gg3TzDLubsLIYJcykqX1NXhbvBO6nniZSYM,2063
|
|
37
|
-
speedy_utils/common/patcher.py,sha256=VCmdxyTF87qroggQkQklRPhAOPJbeBqhcJoTsLcDxNw,2303
|
|
38
|
-
speedy_utils/common/report_manager.py,sha256=eBiw5KY6bWUhwki3B4lK5o8bFsp7L5x28X9GCI-Sd1w,3899
|
|
39
|
-
speedy_utils/common/utils_cache.py,sha256=h3JbIi0V5pTaFNJDjfwORSN63bc0SrRq_dm8KZJiL94,27023
|
|
40
|
-
speedy_utils/common/utils_io.py,sha256=E7mbxB_OpLvNWoFM2Qpxi1jaD8VwF-tvNOpGbf7swuU,14849
|
|
41
|
-
speedy_utils/common/utils_misc.py,sha256=yYlyP0eXQuapY1dn5O8-UDePPq5bb6FxKFjb1kfZy5o,2354
|
|
42
|
-
speedy_utils/common/utils_print.py,sha256=syRrnSFtguxrV-elx6DDVcSGu4Qy7D_xVNZhPwbUY4A,4864
|
|
43
|
-
speedy_utils/multi_worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
44
|
-
speedy_utils/multi_worker/process.py,sha256=RGGGnbZXCbEbdmxFVmnNfyccClAlflzRPE0d1C3CeeE,11385
|
|
45
|
-
speedy_utils/multi_worker/thread.py,sha256=bRjxUHkBjbXHQ2KSsf-Zao28zbSId-8mqMFHwSG1l1s,25206
|
|
46
|
-
speedy_utils/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
|
-
speedy_utils/scripts/mpython.py,sha256=IvywP7Y0_V6tWfMP-4MjPvN5_KfxWF21xaLJsCIayCk,3821
|
|
48
|
-
speedy_utils/scripts/openapi_client_codegen.py,sha256=f2125S_q0PILgH5dyzoKRz7pIvNEjCkzpi4Q4pPFRZE,9683
|
|
49
|
-
speedy_utils-1.1.27.dist-info/METADATA,sha256=5My5GhQX7OMEm4TYHSd9Pupc3ejUxnqzS3gLigj4gtQ,8028
|
|
50
|
-
speedy_utils-1.1.27.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
51
|
-
speedy_utils-1.1.27.dist-info/entry_points.txt,sha256=1rrFMfqvaMUE9hvwGiD6vnVh98kmgy0TARBj-v0Lfhs,244
|
|
52
|
-
speedy_utils-1.1.27.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|