osbot-utils 2.41.0__py3-none-any.whl → 2.43.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osbot_utils/helpers/llms/actions/LLM_Request__Execute.py +9 -3
- osbot_utils/helpers/llms/cache/LLM_Request__Cache.py +14 -13
- osbot_utils/helpers/llms/cache/LLM_Request__Cache__File_System.py +6 -4
- osbot_utils/helpers/llms/schemas/Schema__LLM_Response__Cache.py +8 -6
- osbot_utils/utils/Threads.py +8 -0
- osbot_utils/version +1 -1
- {osbot_utils-2.41.0.dist-info → osbot_utils-2.43.0.dist-info}/METADATA +2 -2
- {osbot_utils-2.41.0.dist-info → osbot_utils-2.43.0.dist-info}/RECORD +10 -10
- {osbot_utils-2.41.0.dist-info → osbot_utils-2.43.0.dist-info}/LICENSE +0 -0
- {osbot_utils-2.41.0.dist-info → osbot_utils-2.43.0.dist-info}/WHEEL +0 -0
@@ -1,3 +1,4 @@
|
|
1
|
+
from osbot_utils.helpers.duration.decorators.capture_duration import capture_duration
|
1
2
|
from osbot_utils.helpers.llms.cache.LLM_Request__Cache import LLM_Request__Cache
|
2
3
|
from osbot_utils.helpers.llms.builders.LLM_Request__Builder import LLM_Request__Builder
|
3
4
|
from osbot_utils.helpers.llms.platforms.open_ai.API__LLM__Open_AI import API__LLM__Open_AI
|
@@ -9,7 +10,7 @@ from osbot_utils.type_safe.decorators.type_safe import type_
|
|
9
10
|
class LLM_Request__Execute(Type_Safe):
|
10
11
|
llm_cache : LLM_Request__Cache
|
11
12
|
llm_api : API__LLM__Open_AI
|
12
|
-
use_cache : bool
|
13
|
+
use_cache : bool = True
|
13
14
|
request_builder: LLM_Request__Builder # todo: fix the use of LLM_Request__Builder since it not good when we when overwrite it at self.request_builder.llm_request_data = llm_request.request_data
|
14
15
|
|
15
16
|
@type_safe
|
@@ -22,10 +23,15 @@ class LLM_Request__Execute(Type_Safe):
|
|
22
23
|
|
23
24
|
self.request_builder.llm_request_data = llm_request.request_data
|
24
25
|
llm_payload = self.request_builder.build_request_payload()
|
25
|
-
|
26
|
+
with capture_duration() as duration:
|
27
|
+
response_data = self.llm_api.execute(llm_payload) # Make API call
|
26
28
|
llm_response = Schema__LLM_Response(response_data=response_data) # Create response object
|
27
29
|
|
28
30
|
if self.use_cache: # Cache the response if enabled
|
29
|
-
|
31
|
+
kwargs_add = dict(request = llm_request ,
|
32
|
+
response = llm_response ,
|
33
|
+
payload = llm_payload ,
|
34
|
+
duration = duration.seconds)
|
35
|
+
self.llm_cache.add(**kwargs_add)
|
30
36
|
|
31
37
|
return llm_response
|
@@ -27,15 +27,16 @@ class LLM_Request__Cache(Type_Safe):
|
|
27
27
|
@type_safe
|
28
28
|
def add(self, request : Schema__LLM_Request , # Request to cache
|
29
29
|
response : Schema__LLM_Response, # Response to store
|
30
|
-
payload : dict
|
30
|
+
payload : dict = None , # Payload to store
|
31
|
+
duration : float = None
|
31
32
|
) -> Obj_Id: # returns cache_id
|
32
33
|
|
33
34
|
hash_request = self.compute_request_hash (request) # calculate request hash
|
34
|
-
cache_entry = Schema__LLM_Response__Cache(
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
35
|
+
cache_entry = Schema__LLM_Response__Cache(llm__payload = payload ,
|
36
|
+
llm__request = request ,
|
37
|
+
llm__response = response ,
|
38
|
+
request__duration = duration ,
|
39
|
+
request__hash = hash_request )
|
39
40
|
cache_id = cache_entry.cache_id
|
40
41
|
|
41
42
|
self.cache_index.cache_id__from__hash__request [ hash_request ] = cache_id # Update the cache index
|
@@ -50,7 +51,7 @@ class LLM_Request__Cache(Type_Safe):
|
|
50
51
|
cache_id = self.cache_index.cache_id__from__hash__request[request_hash]
|
51
52
|
cache_entry = self.get__cache_entry__from__cache_id(cache_id)
|
52
53
|
if cache_entry:
|
53
|
-
return cache_entry.
|
54
|
+
return cache_entry.llm__response
|
54
55
|
|
55
56
|
return None
|
56
57
|
|
@@ -60,17 +61,17 @@ class LLM_Request__Cache(Type_Safe):
|
|
60
61
|
return self.cache_entries.get(cache_id)
|
61
62
|
|
62
63
|
@type_safe
|
63
|
-
def get__cache_entry__from__request(self, request: Schema__LLM_Request):
|
64
|
+
def get__cache_entry__from__request(self, request: Schema__LLM_Request) -> Optional[Schema__LLM_Response__Cache]:
|
64
65
|
cache_id = self.get__cache_id__from__request(request)
|
65
66
|
return self.get__cache_entry__from__cache_id(cache_id)
|
66
67
|
|
67
68
|
@type_safe
|
68
|
-
def get__cache_id__from__request(self, request: Schema__LLM_Request):
|
69
|
+
def get__cache_id__from__request(self, request: Schema__LLM_Request) -> Obj_Id:
|
69
70
|
request_hash = self.compute_request_hash(request)
|
70
71
|
return self.get__cache_id__from__request_hash(request_hash)
|
71
72
|
|
72
73
|
@type_safe
|
73
|
-
def get__cache_id__from__request_hash(self, request_hash: Safe_Str__Hash):
|
74
|
+
def get__cache_id__from__request_hash(self, request_hash: Safe_Str__Hash) -> Obj_Id:
|
74
75
|
return self.cache_index.cache_id__from__hash__request.get(request_hash)
|
75
76
|
|
76
77
|
def exists(self, request: Schema__LLM_Request) -> bool: # True if in cache
|
@@ -95,7 +96,7 @@ class LLM_Request__Cache(Type_Safe):
|
|
95
96
|
def get_by_id(self, cache_id : Obj_Id)-> Optional[Schema__LLM_Response]: # Cached response or None
|
96
97
|
cache_entry = self.get__cache_entry__from__cache_id(cache_id)
|
97
98
|
if cache_entry:
|
98
|
-
return cache_entry.
|
99
|
+
return cache_entry.llm__response
|
99
100
|
return None
|
100
101
|
|
101
102
|
def clear(self) -> bool: # Clear all cache entries
|
@@ -111,13 +112,13 @@ class LLM_Request__Cache(Type_Safe):
|
|
111
112
|
newest_timestamp = None
|
112
113
|
|
113
114
|
for cache_id, entry in self.cache_entries.items(): # Track models
|
114
|
-
model = entry.
|
115
|
+
model = entry.llm__request.request_data.model
|
115
116
|
if model in models:
|
116
117
|
models[model] += 1
|
117
118
|
else:
|
118
119
|
models[model] = 1
|
119
120
|
|
120
|
-
timestamp = entry.
|
121
|
+
timestamp = entry.llm__response.timestamp # Track timestamps
|
121
122
|
if oldest_timestamp is None or timestamp < oldest_timestamp:
|
122
123
|
oldest_timestamp = timestamp
|
123
124
|
if newest_timestamp is None or timestamp > newest_timestamp:
|
@@ -89,8 +89,8 @@ class LLM_Request__Cache__File_System(LLM_Request__Cache):
|
|
89
89
|
for cache_id in self.get_all_cache_ids(): # Load all cache entries
|
90
90
|
cache_entry = self.load_cache_entry(cache_id)
|
91
91
|
if cache_entry:
|
92
|
-
request = cache_entry.
|
93
|
-
hash_request = cache_entry.
|
92
|
+
request = cache_entry.llm__request
|
93
|
+
hash_request = cache_entry.request__hash
|
94
94
|
if hash_request is None: # if hash_request doesn't exist # todo: see if this a valid scenario
|
95
95
|
hash_request = self.compute_request_hash(request) # recompute hash_request
|
96
96
|
self.cache_index.cache_id__from__hash__request[hash_request] = cache_id # Update the index
|
@@ -171,11 +171,13 @@ class LLM_Request__Cache__File_System(LLM_Request__Cache):
|
|
171
171
|
@type_safe
|
172
172
|
def add(self, request : Schema__LLM_Request ,
|
173
173
|
response : Schema__LLM_Response,
|
174
|
-
|
174
|
+
duration : float = None ,
|
175
|
+
payload : dict = None ,
|
175
176
|
now : datetime = None
|
177
|
+
|
176
178
|
) -> Obj_Id: # Save an LLM request/response pair using temporal organization.
|
177
179
|
|
178
|
-
cache_id = super().add(request=request, response=response, payload=payload) # First use standard add() to handle in-memory caching
|
180
|
+
cache_id = super().add(request=request, response=response, payload=payload, duration=duration) # First use standard add() to handle in-memory caching
|
179
181
|
cache_entry = self.cache_entries[cache_id] # get the cache entry (which will exist since it was added on super().add(request, response) )
|
180
182
|
request_domains = self.extract_domains_from_request(request) # Extract domains and areas for organization
|
181
183
|
domains = self.shared_domains + request_domains
|
@@ -6,9 +6,11 @@ from osbot_utils.helpers.safe_str.Safe_Str__Hash import Safe_Str__Has
|
|
6
6
|
from osbot_utils.type_safe.Type_Safe import Type_Safe
|
7
7
|
|
8
8
|
class Schema__LLM_Response__Cache(Type_Safe):
|
9
|
-
cache_id
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
9
|
+
cache_id : Obj_Id
|
10
|
+
llm__payload : dict
|
11
|
+
llm__request : Schema__LLM_Request = None
|
12
|
+
llm__response : Schema__LLM_Response = None
|
13
|
+
request__duration : float
|
14
|
+
request__hash : Safe_Str__Hash = None
|
15
|
+
timestamp : Timestamp_Now
|
16
|
+
|
osbot_utils/utils/Threads.py
CHANGED
@@ -42,6 +42,14 @@ def invoke_in_new_event_loop(target: typing.Coroutine): # Runs a cor
|
|
42
42
|
result = future.result() # Wait for the result of the future
|
43
43
|
return result # Return the result from the coroutine
|
44
44
|
|
45
|
+
async def async__execute_coroutines(coroutines, return_exceptions: bool = False) -> list: # """ Execute multiple coroutines concurrently and wait for all to complete.
|
46
|
+
return await asyncio.gather(*coroutines, return_exceptions=return_exceptions)
|
47
|
+
|
48
|
+
def invoke_async__coroutines(coroutines, return_exceptions: bool = False) -> list:
|
49
|
+
return invoke_async_function(async__execute_coroutines(coroutines, return_exceptions))
|
50
|
+
|
51
|
+
|
52
|
+
|
45
53
|
# in the use cases when I tried to use this, it hanged
|
46
54
|
# def invoke_in_current_loop(target: typing.Coroutine):
|
47
55
|
# try:
|
osbot_utils/version
CHANGED
@@ -1 +1 @@
|
|
1
|
-
v2.
|
1
|
+
v2.43.0
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: osbot_utils
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.43.0
|
4
4
|
Summary: OWASP Security Bot - Utils
|
5
5
|
License: MIT
|
6
6
|
Author: Dinis Cruz
|
@@ -23,7 +23,7 @@ Description-Content-Type: text/markdown
|
|
23
23
|
|
24
24
|
Powerful Python util methods and classes that simplify common apis and tasks.
|
25
25
|
|
26
|
-

|
27
27
|
[](https://codecov.io/gh/owasp-sbot/OSBot-Utils)
|
28
28
|
|
29
29
|
|
@@ -190,7 +190,7 @@ osbot_utils/helpers/html/Tag__Link.py,sha256=rQ-gZN8EkSv5x1S-smdjvFflwMQHACHQXiO
|
|
190
190
|
osbot_utils/helpers/html/Tag__Style.py,sha256=LPPlIN7GyMvfCUlbs2eXVMUr9jS0PX5M94A5Ig_jXIs,846
|
191
191
|
osbot_utils/helpers/html/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
192
192
|
osbot_utils/helpers/llms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
193
|
-
osbot_utils/helpers/llms/actions/LLM_Request__Execute.py,sha256=
|
193
|
+
osbot_utils/helpers/llms/actions/LLM_Request__Execute.py,sha256=ZJTTxPdZ1qBbAP28wKYysjPIoUD_oz-3DKM6_zONGoM,2363
|
194
194
|
osbot_utils/helpers/llms/actions/Type_Safe__Schema_For__LLMs.py,sha256=em9RoSZqSSo6BQBZvEKH8Qv8f8f8oubNpy0LIDsak-E,12024
|
195
195
|
osbot_utils/helpers/llms/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
196
196
|
osbot_utils/helpers/llms/builders/LLM_Request__Builder.py,sha256=c8MN66ijFn9DugbBIGmG0bm9ujS-0kvZDGqJcZsHHYc,3385
|
@@ -198,8 +198,8 @@ osbot_utils/helpers/llms/builders/LLM_Request__Builder__Open_AI.py,sha256=JwyPDe
|
|
198
198
|
osbot_utils/helpers/llms/builders/LLM_Request__Factory.py,sha256=bpFXVTKpalBL7ZONjaHU5c0-2Rwzzd2vgdD1FpYxfGw,6291
|
199
199
|
osbot_utils/helpers/llms/builders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
200
200
|
osbot_utils/helpers/llms/cache/LLM_Cache__Path_Generator.py,sha256=7kDY-aaUWR4Da6lj5RPwjqTLUTGrOUYhbNfOPM30e40,4442
|
201
|
-
osbot_utils/helpers/llms/cache/LLM_Request__Cache.py,sha256=
|
202
|
-
osbot_utils/helpers/llms/cache/LLM_Request__Cache__File_System.py,sha256=
|
201
|
+
osbot_utils/helpers/llms/cache/LLM_Request__Cache.py,sha256=jvJFA4dCN7keoRuk6AjTNoePuUVh01iohPntN-vY5tg,7689
|
202
|
+
osbot_utils/helpers/llms/cache/LLM_Request__Cache__File_System.py,sha256=BWuWDZw-V2uamEJ39clWu9Jox63uaSHGdY6EIrkYw34,15497
|
203
203
|
osbot_utils/helpers/llms/cache/LLM_Request__Cache__Storage.py,sha256=0ok7z2kGiK3edlgD0hwD5z2hYFZ91Viar1JgD7SNoxg,5434
|
204
204
|
osbot_utils/helpers/llms/cache/Virtual_Storage__Local__Folder.py,sha256=xNM2xy0-2ROvhEFXa_jwqth7eApIcRTqcRUDxo0THXI,3214
|
205
205
|
osbot_utils/helpers/llms/cache/Virtual_Storage__Sqlite.py,sha256=j8EWuKFVMtDBxrCEi8yNdpzwvTxO6Bri2Bi4-KwO-3s,4638
|
@@ -214,7 +214,7 @@ osbot_utils/helpers/llms/schemas/Schema__LLM_Request__Function_Call.py,sha256=VJ
|
|
214
214
|
osbot_utils/helpers/llms/schemas/Schema__LLM_Request__Message__Content.py,sha256=nl-16yz4G_72ViACKE9CvGStrKxw2Gm_JcaU8wVcJXI,521
|
215
215
|
osbot_utils/helpers/llms/schemas/Schema__LLM_Request__Message__Role.py,sha256=T99w0cRrDPXQqPT-Nw7_14tMr4vKpUlhw74UJZL6w6w,168
|
216
216
|
osbot_utils/helpers/llms/schemas/Schema__LLM_Response.py,sha256=R62zjoUqu9sbtbFcOX7vG1t0vTaAhS_3Z9CkxESrTjI,298
|
217
|
-
osbot_utils/helpers/llms/schemas/Schema__LLM_Response__Cache.py,sha256=
|
217
|
+
osbot_utils/helpers/llms/schemas/Schema__LLM_Response__Cache.py,sha256=hdG_KRKkPYUqwEtpLUPLFdXSpTL3nofcTg4P2XKuleY,821
|
218
218
|
osbot_utils/helpers/llms/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
219
219
|
osbot_utils/helpers/pubsub/Event__Queue.py,sha256=bCtIdVlAuG-jvFEnz14oNhgRScEUrd8v9BqLcZleGks,5038
|
220
220
|
osbot_utils/helpers/pubsub/PubSub__Client.py,sha256=6K3l4H-Tc0DhktrxpYzLVur1uZ532pQsHWprLNRXFJE,2316
|
@@ -393,13 +393,13 @@ osbot_utils/utils/Python_Logger.py,sha256=M9Oi62LxfnRSlCN8GhaiwiBINvcSdGy39FCWjy
|
|
393
393
|
osbot_utils/utils/Regex.py,sha256=MtHhk69ax7Nwu4CQZK7y4KXHZ6VREwEpIchuioB168c,960
|
394
394
|
osbot_utils/utils/Status.py,sha256=OjqLwUhHqY-j-JeRN-hIaVZQHPRdyjR7y6i6ujsB-Yc,4287
|
395
395
|
osbot_utils/utils/Str.py,sha256=KQVfh0o3BxJKVm24yhAhgIGH5QYfzpP1G-siVv2zQws,3301
|
396
|
-
osbot_utils/utils/Threads.py,sha256=
|
396
|
+
osbot_utils/utils/Threads.py,sha256=gXd26uPy5tJ2Bb3hdwaEr3KctaZUQR9mmVYbrrhhPig,3506
|
397
397
|
osbot_utils/utils/Toml.py,sha256=Rxl8gx7mni5CvBAK-Ai02EKw-GwtJdd3yeHT2kMloik,1667
|
398
398
|
osbot_utils/utils/Version.py,sha256=Ww6ChwTxqp1QAcxOnztkTicShlcx6fbNsWX5xausHrg,422
|
399
399
|
osbot_utils/utils/Zip.py,sha256=pR6sKliUY0KZXmqNzKY2frfW-YVQEVbLKiyqQX_lc-8,14052
|
400
400
|
osbot_utils/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
401
|
-
osbot_utils/version,sha256=
|
402
|
-
osbot_utils-2.
|
403
|
-
osbot_utils-2.
|
404
|
-
osbot_utils-2.
|
405
|
-
osbot_utils-2.
|
401
|
+
osbot_utils/version,sha256=RQOQkvelJmKVw3XhrLjS20nIAlx7S5yhqSlwTzgxbQE,8
|
402
|
+
osbot_utils-2.43.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
403
|
+
osbot_utils-2.43.0.dist-info/METADATA,sha256=cqzo9kHZYU6x6JDimXZXL1hwn49k4m1WPwGgiw2phyE,1329
|
404
|
+
osbot_utils-2.43.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
405
|
+
osbot_utils-2.43.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|