speedy-utils 1.1.23__tar.gz → 1.1.25__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- speedy_utils-1.1.25/IMPROVEMENTS.md +141 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/PKG-INFO +1 -1
- speedy_utils-1.1.25/debug_imemoize.py +66 -0
- speedy_utils-1.1.25/debug_imemoize_flow.py +95 -0
- speedy_utils-1.1.25/examples/temperature_range_example.py +119 -0
- speedy_utils-1.1.25/examples_improved_error_tracing.py +85 -0
- speedy_utils-1.1.25/notebooks/llm_utils/llm_as_a_judge.ipynb +300 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/pyproject.toml +1 -1
- speedy_utils-1.1.25/simple_test_imemoize.py +64 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/__init__.py +12 -8
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/chat_format/__init__.py +2 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/chat_format/display.py +115 -44
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/__init__.py +14 -6
- speedy_utils-1.1.25/src/llm_utils/lm/llm.py +413 -0
- speedy_utils-1.1.25/src/llm_utils/lm/llm_signature.py +35 -0
- speedy_utils-1.1.25/src/llm_utils/lm/mixins.py +379 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/openai_memoize.py +18 -7
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/signature.py +26 -37
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/utils.py +61 -76
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/__init__.py +31 -2
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/all.py +30 -1
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/utils_cache.py +142 -1
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/utils_io.py +36 -26
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/utils_misc.py +25 -1
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/multi_worker/thread.py +145 -58
- speedy_utils-1.1.25/test_imemoize.py +116 -0
- speedy_utils-1.1.25/test_imemoize_persistence.py +122 -0
- speedy_utils-1.1.25/tests/llm_utils/test_llm_mixins.py +153 -0
- speedy_utils-1.1.25/tests/test_multithread_error_trace.py +117 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/uv.lock +8 -8
- speedy_utils-1.1.23/notebooks/llm_utils/llm_as_a_judge.ipynb +0 -642
- speedy_utils-1.1.23/src/llm_utils/lm/llm_as_a_judge.py +0 -390
- speedy_utils-1.1.23/src/llm_utils/lm/llm_task.py +0 -614
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/.github/copilot-instructions.md +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/.github/workflows/publish.yml +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/.gitignore +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/.pre-commit-config.yaml +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/README.md +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/bumpversion.sh +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/notebooks/test_multi_thread.ipynb +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/ruff.toml +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/scripts/deploy.sh +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/setup.cfg +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/chat_format/transform.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/chat_format/utils.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/group_messages.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/async_lm/__init__.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/async_lm/_utils.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/async_lm/async_llm_task.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/async_lm/async_lm.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/async_lm/async_lm_base.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/async_lm/lm_specific.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/base_prompt_builder.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/lm/lm_base.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/scripts/README.md +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/scripts/vllm_load_balancer.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/scripts/vllm_serve.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/vector_cache/__init__.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/vector_cache/cli.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/vector_cache/core.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/vector_cache/types.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/llm_utils/vector_cache/utils.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/__init__.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/clock.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/function_decorator.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/logger.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/notebook_utils.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/patcher.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/report_manager.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/common/utils_print.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/multi_worker/__init__.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/multi_worker/process.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/scripts/__init__.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/scripts/mpython.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/src/speedy_utils/scripts/openapi_client_codegen.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/sample_objects.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/test.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/test_logger.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/test_logger_format.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/test_memoize_typing.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/test_mpython.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/test_process.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/test_process_update.py +0 -0
- {speedy_utils-1.1.23 → speedy_utils-1.1.25}/tests/test_thread.py +0 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
# Multi-thread Error Tracing Improvements
|
|
2
|
+
|
|
3
|
+
## Summary
|
|
4
|
+
|
|
5
|
+
Significantly improved error tracing in `multi_thread` to focus on user code rather than infrastructure frames, making debugging much faster and easier.
|
|
6
|
+
|
|
7
|
+
## Problem
|
|
8
|
+
|
|
9
|
+
Previously, when errors occurred in functions executed by `multi_thread`, the traceback was cluttered with infrastructure frames:
|
|
10
|
+
|
|
11
|
+
- `concurrent.futures` internals
|
|
12
|
+
- `threading.py` frames
|
|
13
|
+
- `multi_worker/thread.py` infrastructure code
|
|
14
|
+
|
|
15
|
+
This made it difficult to quickly identify the actual problem in user code.
|
|
16
|
+
|
|
17
|
+
### Example of OLD behavior:
|
|
18
|
+
|
|
19
|
+
```
|
|
20
|
+
TypeError Traceback (most recent call last)
|
|
21
|
+
Cell In[810], line 35
|
|
22
|
+
33 choices = multi_thread(fns, range(n))
|
|
23
|
+
34 return choices
|
|
24
|
+
---> 35 choices = translate()
|
|
25
|
+
|
|
26
|
+
File ~/projects/speedy_utils/src/speedy_utils/multi_worker/thread.py:474, in multi_thread(...)
|
|
27
|
+
472 idx, logical_size = _future_meta(fut)
|
|
28
|
+
473 try:
|
|
29
|
+
--> 474 result = fut.result()
|
|
30
|
+
475 except Exception as exc:
|
|
31
|
+
476 if stop_on_error:
|
|
32
|
+
|
|
33
|
+
File ~/.local/share/uv/python/.../concurrent/futures/_base.py:449, in Future.result(...)
|
|
34
|
+
447 raise CancelledError()
|
|
35
|
+
448 elif self._state == FINISHED:
|
|
36
|
+
--> 449 return self.__get_result()
|
|
37
|
+
|
|
38
|
+
... (many more infrastructure frames) ...
|
|
39
|
+
|
|
40
|
+
TypeError: 'list' object is not callable
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Solution
|
|
44
|
+
|
|
45
|
+
### 1. Added `UserFunctionError` Exception Class
|
|
46
|
+
|
|
47
|
+
A custom exception wrapper that:
|
|
48
|
+
|
|
49
|
+
- Captures the original exception
|
|
50
|
+
- Stores the function name and input that caused the error
|
|
51
|
+
- Filters traceback to include only user code frames
|
|
52
|
+
- Provides clear, focused error messages
|
|
53
|
+
|
|
54
|
+
### 2. Enhanced `_worker` Function
|
|
55
|
+
|
|
56
|
+
- Added validation to detect common mistakes (e.g., passing a list instead of a function)
|
|
57
|
+
- Filters tracebacks to remove infrastructure frames
|
|
58
|
+
- Wraps user function errors in `UserFunctionError` with clean context
|
|
59
|
+
- Provides helpful hints for common mistakes
|
|
60
|
+
|
|
61
|
+
### 3. Improved Error Reporting in `multi_thread`
|
|
62
|
+
|
|
63
|
+
- Logs clear error messages showing function name and input
|
|
64
|
+
- Displays only user code in tracebacks
|
|
65
|
+
- Re-raises exceptions with cleaned messages
|
|
66
|
+
- Maintains proper exception chaining while hiding infrastructure noise
|
|
67
|
+
|
|
68
|
+
## Benefits
|
|
69
|
+
|
|
70
|
+
### Clear Error Messages
|
|
71
|
+
|
|
72
|
+
```
|
|
73
|
+
Error in function "process_item" with input: 0
|
|
74
|
+
|
|
75
|
+
User code traceback:
|
|
76
|
+
File "your_script.py", line 20, in process_item
|
|
77
|
+
return my_list(x)
|
|
78
|
+
^^^^^^^^^^
|
|
79
|
+
TypeError: 'list' object is not callable
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### Helpful Hints
|
|
83
|
+
|
|
84
|
+
```
|
|
85
|
+
TypeError:
|
|
86
|
+
multi_thread: func parameter must be callable, got list: [...]
|
|
87
|
+
Hint: Did you accidentally pass a list instead of a function?
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
### Nested Function Support
|
|
91
|
+
|
|
92
|
+
Shows complete call chain through user code:
|
|
93
|
+
|
|
94
|
+
```
|
|
95
|
+
Error in function "process_data" with input: 0
|
|
96
|
+
|
|
97
|
+
User code traceback:
|
|
98
|
+
File "your_script.py", line 44, in process_data
|
|
99
|
+
return validate_and_calc(val)
|
|
100
|
+
^^^^^^^^^^^^^^^^^^^^^^
|
|
101
|
+
File "your_script.py", line 42, in validate_and_calc
|
|
102
|
+
return 100 / x
|
|
103
|
+
~~~~^~~
|
|
104
|
+
ZeroDivisionError: division by zero
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
## Key Improvements
|
|
108
|
+
|
|
109
|
+
✅ **Errors show function name and problematic input**
|
|
110
|
+
✅ **Tracebacks filtered to show only user code**
|
|
111
|
+
✅ **No concurrent.futures/threading clutter**
|
|
112
|
+
✅ **Helpful hints for common mistakes**
|
|
113
|
+
✅ **Clear, actionable error messages**
|
|
114
|
+
✅ **Maintains backward compatibility - all existing tests pass**
|
|
115
|
+
|
|
116
|
+
## Testing
|
|
117
|
+
|
|
118
|
+
Run the comprehensive demo to see all improvements:
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
python tests/test_multithread_error_trace.py
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
This demonstrates:
|
|
125
|
+
|
|
126
|
+
1. Simple function errors
|
|
127
|
+
2. Nested function call traces
|
|
128
|
+
3. Common parameter type mistakes
|
|
129
|
+
4. Various exception types (TypeError, ValueError, AttributeError, etc.)
|
|
130
|
+
|
|
131
|
+
## Code Changes
|
|
132
|
+
|
|
133
|
+
Main files modified:
|
|
134
|
+
|
|
135
|
+
- `src/speedy_utils/multi_worker/thread.py`:
|
|
136
|
+
- Added `UserFunctionError` exception class
|
|
137
|
+
- Enhanced `_worker` function with validation and error filtering
|
|
138
|
+
- Improved error handling in `multi_thread` main loop
|
|
139
|
+
- Added imports for `sys` and `traceback`
|
|
140
|
+
|
|
141
|
+
All changes maintain backward compatibility - existing code continues to work unchanged.
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Debug the imemoize cache key generation
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import sys
|
|
7
|
+
import time
|
|
8
|
+
import os.path as osp
|
|
9
|
+
sys.path.insert(0, osp.join(osp.dirname(__file__), 'src'))
|
|
10
|
+
|
|
11
|
+
from speedy_utils.common.utils_cache import imemoize, _GLOBAL_MEMORY_CACHE, get_source, identify, _compute_cache_components
|
|
12
|
+
|
|
13
|
+
def debug_cache_keys():
|
|
14
|
+
"""Debug what cache keys are generated"""
|
|
15
|
+
print("=== Debugging cache key generation ===")
|
|
16
|
+
|
|
17
|
+
# Clear cache
|
|
18
|
+
_GLOBAL_MEMORY_CACHE.clear()
|
|
19
|
+
|
|
20
|
+
# First function
|
|
21
|
+
@imemoize
|
|
22
|
+
def test_func(x):
|
|
23
|
+
return x * x
|
|
24
|
+
|
|
25
|
+
# Get cache components
|
|
26
|
+
func_source1, sub_dir1, key_id1 = _compute_cache_components(
|
|
27
|
+
test_func, (5,), {}, True, None, None
|
|
28
|
+
)
|
|
29
|
+
cache_key1 = identify((func_source1, sub_dir1, key_id1))
|
|
30
|
+
|
|
31
|
+
print(f"Function 1 source length: {len(func_source1)}")
|
|
32
|
+
print(f"Function 1 source: {func_source1[:100]}...")
|
|
33
|
+
print(f"Function 1 cache key: {cache_key1}")
|
|
34
|
+
|
|
35
|
+
# Call function
|
|
36
|
+
result1 = test_func(5)
|
|
37
|
+
print(f"After call 1, cache size: {len(_GLOBAL_MEMORY_CACHE)}")
|
|
38
|
+
|
|
39
|
+
# Redefine same function
|
|
40
|
+
@imemoize
|
|
41
|
+
def test_func(x):
|
|
42
|
+
return x * x
|
|
43
|
+
|
|
44
|
+
# Get cache components for redefined function
|
|
45
|
+
func_source2, sub_dir2, key_id2 = _compute_cache_components(
|
|
46
|
+
test_func, (5,), {}, True, None, None
|
|
47
|
+
)
|
|
48
|
+
cache_key2 = identify((func_source2, sub_dir2, key_id2))
|
|
49
|
+
|
|
50
|
+
print(f"\nFunction 2 source length: {len(func_source2)}")
|
|
51
|
+
print(f"Function 2 source: {func_source2[:100]}...")
|
|
52
|
+
print(f"Function 2 cache key: {cache_key2}")
|
|
53
|
+
|
|
54
|
+
print(f"\nSource code same: {func_source1 == func_source2}")
|
|
55
|
+
print(f"Cache keys same: {cache_key1 == cache_key2}")
|
|
56
|
+
|
|
57
|
+
# Call function again
|
|
58
|
+
result2 = test_func(5)
|
|
59
|
+
print(f"After call 2, cache size: {len(_GLOBAL_MEMORY_CACHE)}")
|
|
60
|
+
|
|
61
|
+
print(f"\nCache contents:")
|
|
62
|
+
for i, (key, value) in enumerate(_GLOBAL_MEMORY_CACHE.items()):
|
|
63
|
+
print(f" {i+1}. {key[:50]}... -> {value}")
|
|
64
|
+
|
|
65
|
+
if __name__ == "__main__":
|
|
66
|
+
debug_cache_keys()
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Debug the imemoize execution flow
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import sys
|
|
7
|
+
import time
|
|
8
|
+
import os.path as osp
|
|
9
|
+
sys.path.insert(0, osp.join(osp.dirname(__file__), 'src'))
|
|
10
|
+
|
|
11
|
+
from speedy_utils.common.utils_cache import _GLOBAL_MEMORY_CACHE, identify, _compute_cache_components
|
|
12
|
+
from speedy_utils.common.utils_cache import mem_lock
|
|
13
|
+
|
|
14
|
+
def create_debug_imemoize():
|
|
15
|
+
"""Create a debug version of imemoize with detailed logging"""
|
|
16
|
+
import functools
|
|
17
|
+
import inspect
|
|
18
|
+
|
|
19
|
+
def debug_imemoize(func):
|
|
20
|
+
print(f"Creating imemoize wrapper for {func.__name__}")
|
|
21
|
+
|
|
22
|
+
@functools.wraps(func)
|
|
23
|
+
def wrapper(*args, **kwargs):
|
|
24
|
+
print(f"\n--- Calling {func.__name__}({args}, {kwargs}) ---")
|
|
25
|
+
|
|
26
|
+
# Compute cache key
|
|
27
|
+
func_source, sub_dir, key_id = _compute_cache_components(
|
|
28
|
+
func, args, kwargs, True, None, None
|
|
29
|
+
)
|
|
30
|
+
cache_key = identify((func_source, sub_dir, key_id))
|
|
31
|
+
|
|
32
|
+
print(f"Function source: {func_source[:50]}...")
|
|
33
|
+
print(f"Cache key: {cache_key}")
|
|
34
|
+
print(f"Cache size before lookup: {len(_GLOBAL_MEMORY_CACHE)}")
|
|
35
|
+
|
|
36
|
+
# Check cache
|
|
37
|
+
with mem_lock:
|
|
38
|
+
if cache_key in _GLOBAL_MEMORY_CACHE:
|
|
39
|
+
cached_result = _GLOBAL_MEMORY_CACHE[cache_key]
|
|
40
|
+
print(f"CACHE HIT: Found {cached_result}")
|
|
41
|
+
return cached_result
|
|
42
|
+
else:
|
|
43
|
+
print(f"CACHE MISS: Key not found")
|
|
44
|
+
print(f"Available keys:")
|
|
45
|
+
for i, key in enumerate(_GLOBAL_MEMORY_CACHE.keys()):
|
|
46
|
+
print(f" {i+1}. {key}")
|
|
47
|
+
|
|
48
|
+
# Compute result
|
|
49
|
+
print(f"Computing result...")
|
|
50
|
+
start = time.time()
|
|
51
|
+
result = func(*args, **kwargs)
|
|
52
|
+
end = time.time()
|
|
53
|
+
print(f"Computation took {end - start:.6f}s")
|
|
54
|
+
|
|
55
|
+
# Store in cache
|
|
56
|
+
with mem_lock:
|
|
57
|
+
_GLOBAL_MEMORY_CACHE[cache_key] = result
|
|
58
|
+
print(f"Stored result in cache. Cache size now: {len(_GLOBAL_MEMORY_CACHE)}")
|
|
59
|
+
|
|
60
|
+
return result
|
|
61
|
+
|
|
62
|
+
return wrapper
|
|
63
|
+
|
|
64
|
+
return debug_imemoize
|
|
65
|
+
|
|
66
|
+
def test_debug():
|
|
67
|
+
debug_imemoize = create_debug_imemoize()
|
|
68
|
+
|
|
69
|
+
# Clear cache
|
|
70
|
+
_GLOBAL_MEMORY_CACHE.clear()
|
|
71
|
+
print("Cache cleared")
|
|
72
|
+
|
|
73
|
+
# First function
|
|
74
|
+
@debug_imemoize
|
|
75
|
+
def test_func(x):
|
|
76
|
+
time.sleep(0.05)
|
|
77
|
+
return x * x
|
|
78
|
+
|
|
79
|
+
print("\n=== FIRST CALL ===")
|
|
80
|
+
result1 = test_func(5)
|
|
81
|
+
|
|
82
|
+
print("\n=== SECOND CALL (same function object) ===")
|
|
83
|
+
result2 = test_func(5)
|
|
84
|
+
|
|
85
|
+
# Redefine function
|
|
86
|
+
@debug_imemoize
|
|
87
|
+
def test_func(x):
|
|
88
|
+
time.sleep(0.05)
|
|
89
|
+
return x * x
|
|
90
|
+
|
|
91
|
+
print("\n=== THIRD CALL (new function object, same source) ===")
|
|
92
|
+
result3 = test_func(5)
|
|
93
|
+
|
|
94
|
+
if __name__ == "__main__":
|
|
95
|
+
test_debug()
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""Example demonstrating temperature range sampling with LLM."""
|
|
2
|
+
|
|
3
|
+
from llm_utils import LLM
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class CreativeStory(BaseModel):
|
|
8
|
+
"""A creative story output."""
|
|
9
|
+
|
|
10
|
+
title: str
|
|
11
|
+
story: str
|
|
12
|
+
moral: str
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def example_temperature_range_text():
|
|
16
|
+
"""Example: Sample text responses with different temperatures."""
|
|
17
|
+
print("=" * 60)
|
|
18
|
+
print("Example 1: Temperature Range Sampling (Text Completion)")
|
|
19
|
+
print("=" * 60)
|
|
20
|
+
|
|
21
|
+
llm = LLM(
|
|
22
|
+
instruction="You are a creative writer. Write a very short story.",
|
|
23
|
+
output_model=str,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
prompt = "Write a one-sentence story about a brave mouse."
|
|
27
|
+
|
|
28
|
+
# Sample with 5 different temperatures from 0.1 to 1.0
|
|
29
|
+
responses = llm(
|
|
30
|
+
prompt,
|
|
31
|
+
temperature_ranges=(0.1, 1.0),
|
|
32
|
+
n=5,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
print(f"\nGenerated {len(responses)} responses with varying temperatures:\n")
|
|
36
|
+
for i, resp in enumerate(responses):
|
|
37
|
+
temp = 0.1 + i * ((1.0 - 0.1) / (5 - 1))
|
|
38
|
+
print(f"Temperature ~{temp:.2f}:")
|
|
39
|
+
print(f" {resp['parsed']}\n")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def example_temperature_range_pydantic():
|
|
43
|
+
"""Example: Sample structured responses with different temperatures."""
|
|
44
|
+
print("=" * 60)
|
|
45
|
+
print("Example 2: Temperature Range with Pydantic Models")
|
|
46
|
+
print("=" * 60)
|
|
47
|
+
|
|
48
|
+
llm = LLM(
|
|
49
|
+
instruction="Create a creative short story with a moral lesson.",
|
|
50
|
+
output_model=CreativeStory,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
prompt = "Topic: A robot learning to feel emotions"
|
|
54
|
+
|
|
55
|
+
# Sample with 3 different temperatures from 0.5 to 1.5
|
|
56
|
+
responses = llm(
|
|
57
|
+
prompt,
|
|
58
|
+
temperature_ranges=(0.5, 1.5),
|
|
59
|
+
n=3,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
print(f"\nGenerated {len(responses)} stories with varying creativity:\n")
|
|
63
|
+
for i, resp in enumerate(responses):
|
|
64
|
+
temp = 0.5 + i * ((1.5 - 0.5) / (3 - 1))
|
|
65
|
+
story = resp["parsed"]
|
|
66
|
+
print(f"Temperature ~{temp:.2f}:")
|
|
67
|
+
print(f" Title: {story.title}")
|
|
68
|
+
print(f" Story: {story.story[:80]}...")
|
|
69
|
+
print(f" Moral: {story.moral}\n")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def example_two_step_parsing():
|
|
73
|
+
"""Example: Two-step Pydantic parsing for models with reasoning."""
|
|
74
|
+
print("=" * 60)
|
|
75
|
+
print("Example 3: Two-Step Pydantic Parsing")
|
|
76
|
+
print("=" * 60)
|
|
77
|
+
|
|
78
|
+
llm = LLM(
|
|
79
|
+
instruction=("Analyze the given text and extract structured information. Think through your analysis first."),
|
|
80
|
+
output_model=CreativeStory,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
prompt = "Analyze the story: 'The tortoise won the race by persistence.'"
|
|
84
|
+
|
|
85
|
+
# Use two-step parsing (useful for reasoning models)
|
|
86
|
+
response = llm(
|
|
87
|
+
prompt,
|
|
88
|
+
two_step_parse_pydantic=True,
|
|
89
|
+
)[0]
|
|
90
|
+
|
|
91
|
+
story = response["parsed"]
|
|
92
|
+
print("\nExtracted structure:")
|
|
93
|
+
print(f" Title: {story.title}")
|
|
94
|
+
print(f" Story: {story.story}")
|
|
95
|
+
print(f" Moral: {story.moral}")
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
if __name__ == "__main__":
|
|
99
|
+
# Run examples
|
|
100
|
+
# Note: These require a working OpenAI API key or local LLM server
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
example_temperature_range_text()
|
|
104
|
+
except Exception as e:
|
|
105
|
+
print(f"Example 1 failed: {e}\n")
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
example_temperature_range_pydantic()
|
|
109
|
+
except Exception as e:
|
|
110
|
+
print(f"Example 2 failed: {e}\n")
|
|
111
|
+
|
|
112
|
+
try:
|
|
113
|
+
example_two_step_parsing()
|
|
114
|
+
except Exception as e:
|
|
115
|
+
print(f"Example 3 failed: {e}\n")
|
|
116
|
+
|
|
117
|
+
print("\n" + "=" * 60)
|
|
118
|
+
print("Examples complete!")
|
|
119
|
+
print("=" * 60)
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Direct comparison: Before and After error tracing improvements.
|
|
3
|
+
|
|
4
|
+
This demonstrates the exact improvement for the user's original error case.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from speedy_utils import multi_thread
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def simulate_original_error():
|
|
11
|
+
"""
|
|
12
|
+
Simulates the exact error from the user's example:
|
|
13
|
+
- User has a function that creates lambda functions
|
|
14
|
+
- Accidentally passes list of functions as 'func' parameter
|
|
15
|
+
- Gets TypeError: 'list' object is not callable
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def lm_translate(msgs, temperature, max_tokens):
|
|
19
|
+
"""Mock language model translate function."""
|
|
20
|
+
return [{'parsed': f'translation at temp={temperature:.2f}'}]
|
|
21
|
+
|
|
22
|
+
def translate(n=5, max_temperature=1.0):
|
|
23
|
+
"""Function that generates choices with different temperatures."""
|
|
24
|
+
step = max_temperature / n
|
|
25
|
+
fns = []
|
|
26
|
+
target_text = 'Some text to translate'
|
|
27
|
+
msgs = [{'role': 'user', 'content': 'Translate this'}]
|
|
28
|
+
|
|
29
|
+
for i in range(n):
|
|
30
|
+
fn = lambda x: lm_translate(
|
|
31
|
+
msgs,
|
|
32
|
+
temperature=0.1 + 0.1 * i * step,
|
|
33
|
+
max_tokens=len(target_text) + 32,
|
|
34
|
+
)[0]
|
|
35
|
+
fns.append(fn)
|
|
36
|
+
|
|
37
|
+
# THE BUG: User passed fns (a list) as the func parameter
|
|
38
|
+
# Should be: multi_thread(some_function, fns)
|
|
39
|
+
# Instead did: multi_thread(fns, range(n))
|
|
40
|
+
choices = multi_thread(fns, range(n), progress=False)
|
|
41
|
+
return choices
|
|
42
|
+
|
|
43
|
+
return translate()
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def main():
|
|
47
|
+
print('='*70)
|
|
48
|
+
print('BEFORE vs AFTER: Error Tracing Improvements')
|
|
49
|
+
print('='*70)
|
|
50
|
+
|
|
51
|
+
print('\nBEFORE (old behavior):')
|
|
52
|
+
print('-' * 70)
|
|
53
|
+
print('''
|
|
54
|
+
The error traceback showed:
|
|
55
|
+
- Line in multi_thread.py:474
|
|
56
|
+
- concurrent.futures/_base.py:449
|
|
57
|
+
- concurrent.futures/thread.py:59
|
|
58
|
+
- multi_worker/thread.py:155
|
|
59
|
+
- ... many infrastructure frames ...
|
|
60
|
+
- Finally: TypeError: 'list' object is not callable
|
|
61
|
+
|
|
62
|
+
User had to dig through 10+ lines of infrastructure code
|
|
63
|
+
to find the actual problem.
|
|
64
|
+
''')
|
|
65
|
+
|
|
66
|
+
print('\nAFTER (new behavior):')
|
|
67
|
+
print('-' * 70)
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
simulate_original_error()
|
|
71
|
+
except TypeError as e:
|
|
72
|
+
print(f'\n{type(e).__name__}: {e}\n')
|
|
73
|
+
|
|
74
|
+
print('-' * 70)
|
|
75
|
+
print('\nKey differences:')
|
|
76
|
+
print(' ✓ Immediate identification of the problem')
|
|
77
|
+
print(' ✓ Clear hint about what went wrong')
|
|
78
|
+
print(' ✓ Shows exactly what was passed (list of functions)')
|
|
79
|
+
print(' ✓ No infrastructure clutter')
|
|
80
|
+
print(' ✓ Debugging time: < 5 seconds vs > 1 minute')
|
|
81
|
+
print('='*70)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
if __name__ == '__main__':
|
|
85
|
+
main()
|