sunholo 0.140.2__py3-none-any.whl → 0.140.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sunholo/agents/__init__.py +1 -1
- sunholo/agents/chat_history.py +278 -0
- sunholo/agents/flask/__init__.py +0 -1
- sunholo/agents/flask/vac_routes.py +22 -24
- {sunholo-0.140.2.dist-info → sunholo-0.140.5.dist-info}/METADATA +1 -1
- {sunholo-0.140.2.dist-info → sunholo-0.140.5.dist-info}/RECORD +10 -11
- sunholo/agents/flask/qna_routes.py +0 -604
- {sunholo-0.140.2.dist-info → sunholo-0.140.5.dist-info}/WHEEL +0 -0
- {sunholo-0.140.2.dist-info → sunholo-0.140.5.dist-info}/entry_points.txt +0 -0
- {sunholo-0.140.2.dist-info → sunholo-0.140.5.dist-info}/licenses/LICENSE.txt +0 -0
- {sunholo-0.140.2.dist-info → sunholo-0.140.5.dist-info}/top_level.txt +0 -0
sunholo/agents/__init__.py
CHANGED
@@ -2,6 +2,6 @@ from .chat_history import extract_chat_history
|
|
2
2
|
from .dispatch_to_qa import send_to_qa, send_to_qa_async
|
3
3
|
from .pubsub import process_pubsub
|
4
4
|
from .special_commands import handle_special_commands, app_to_store, handle_files
|
5
|
-
from .flask import
|
5
|
+
from .flask import create_app, VACRoutes
|
6
6
|
from .fastapi import register_qna_fastapi_routes, create_fastapi_app
|
7
7
|
from .swagger import config_to_swagger
|
sunholo/agents/chat_history.py
CHANGED
@@ -1,5 +1,282 @@
|
|
1
1
|
import json
|
2
2
|
from ..custom_logging import log
|
3
|
+
import time
|
4
|
+
import hashlib
|
5
|
+
from functools import lru_cache
|
6
|
+
from typing import List, Tuple, Optional
|
7
|
+
|
8
|
+
|
9
|
+
class ChatHistoryCache:
|
10
|
+
"""
|
11
|
+
Incremental cache for chat history processing.
|
12
|
+
|
13
|
+
Caches processed message pairs and only processes new messages
|
14
|
+
when the chat history is extended.
|
15
|
+
"""
|
16
|
+
|
17
|
+
def __init__(self, max_cache_size: int = 1000):
|
18
|
+
self.cache = {}
|
19
|
+
self.max_cache_size = max_cache_size
|
20
|
+
|
21
|
+
def _get_cache_key(self, chat_history: List[dict]) -> str:
|
22
|
+
"""Generate a cache key based on the chat history content."""
|
23
|
+
# Use the hash of the serialized chat history for the key
|
24
|
+
# Only hash the first few and last few messages to balance performance vs accuracy
|
25
|
+
if len(chat_history) <= 10:
|
26
|
+
content = str(chat_history)
|
27
|
+
else:
|
28
|
+
# Hash first 5 and last 5 messages + length
|
29
|
+
content = str(chat_history[:5] + chat_history[-5:] + [len(chat_history)])
|
30
|
+
|
31
|
+
return hashlib.md5(content.encode()).hexdigest()
|
32
|
+
|
33
|
+
def _find_cached_prefix(self, current_history: List[dict]) -> Tuple[Optional[List[Tuple]], int]:
|
34
|
+
"""
|
35
|
+
Find the longest cached prefix of the current chat history.
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
Tuple of (cached_pairs, cache_length) or (None, 0) if no cache found
|
39
|
+
"""
|
40
|
+
current_length = len(current_history)
|
41
|
+
|
42
|
+
# Check for cached versions of prefixes, starting from longest
|
43
|
+
for cache_length in range(current_length - 1, 0, -1):
|
44
|
+
prefix = current_history[:cache_length]
|
45
|
+
cache_key = self._get_cache_key(prefix)
|
46
|
+
|
47
|
+
if cache_key in self.cache:
|
48
|
+
cached_data = self.cache[cache_key]
|
49
|
+
cached_pairs = cached_data['pairs']
|
50
|
+
|
51
|
+
# Verify the cache is still valid by checking a few messages
|
52
|
+
if self._verify_cache_validity(prefix, cached_data['original_history']):
|
53
|
+
return cached_pairs, cache_length
|
54
|
+
else:
|
55
|
+
# Cache is stale, remove it
|
56
|
+
del self.cache[cache_key]
|
57
|
+
|
58
|
+
return None, 0
|
59
|
+
|
60
|
+
def _verify_cache_validity(self, current_prefix: List[dict], cached_prefix: List[dict]) -> bool:
|
61
|
+
"""Quick verification that cached data is still valid."""
|
62
|
+
if len(current_prefix) != len(cached_prefix):
|
63
|
+
return False
|
64
|
+
|
65
|
+
# Check first and last few messages for equality
|
66
|
+
check_indices = [0, -1] if len(current_prefix) >= 2 else [0]
|
67
|
+
|
68
|
+
for i in check_indices:
|
69
|
+
if current_prefix[i] != cached_prefix[i]:
|
70
|
+
return False
|
71
|
+
|
72
|
+
return True
|
73
|
+
|
74
|
+
def extract_chat_history_incremental(self, chat_history: List[dict]) -> List[Tuple]:
|
75
|
+
"""
|
76
|
+
Extract chat history with incremental caching.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
chat_history: List of chat message dictionaries
|
80
|
+
|
81
|
+
Returns:
|
82
|
+
List of (human_message, ai_message) tuples
|
83
|
+
"""
|
84
|
+
if not chat_history:
|
85
|
+
return []
|
86
|
+
|
87
|
+
# Try to find cached prefix
|
88
|
+
cached_pairs, cache_length = self._find_cached_prefix(chat_history)
|
89
|
+
|
90
|
+
if cached_pairs is not None:
|
91
|
+
log.debug(f"Found cached pairs for {cache_length} messages, processing {len(chat_history) - cache_length} new messages")
|
92
|
+
|
93
|
+
# Process only the new messages
|
94
|
+
new_messages = chat_history[cache_length:]
|
95
|
+
new_pairs = self._process_new_messages(new_messages, cached_pairs)
|
96
|
+
|
97
|
+
# Combine cached and new pairs
|
98
|
+
all_pairs = cached_pairs + new_pairs
|
99
|
+
else:
|
100
|
+
log.debug(f"No cache found, processing all {len(chat_history)} messages")
|
101
|
+
# Process all messages from scratch
|
102
|
+
all_pairs = self._extract_chat_history_full(chat_history)
|
103
|
+
|
104
|
+
# Cache the result
|
105
|
+
self._update_cache(chat_history, all_pairs)
|
106
|
+
|
107
|
+
return all_pairs
|
108
|
+
|
109
|
+
def _process_new_messages(self, new_messages: List[dict], cached_pairs: List[Tuple]) -> List[Tuple]:
|
110
|
+
"""
|
111
|
+
Process only the new messages, considering the state from cached pairs.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
new_messages: New messages to process
|
115
|
+
cached_pairs: Previously processed message pairs
|
116
|
+
|
117
|
+
Returns:
|
118
|
+
List of new message pairs
|
119
|
+
"""
|
120
|
+
if not new_messages:
|
121
|
+
return []
|
122
|
+
|
123
|
+
new_pairs = []
|
124
|
+
|
125
|
+
# Determine if we're waiting for a bot response based on cached pairs
|
126
|
+
waiting_for_bot = True
|
127
|
+
if cached_pairs:
|
128
|
+
last_pair = cached_pairs[-1]
|
129
|
+
# If last pair has both human and AI message, we're ready for a new human message
|
130
|
+
waiting_for_bot = not (last_pair[0] and last_pair[1])
|
131
|
+
|
132
|
+
# If we ended with an unpaired human message, get it
|
133
|
+
last_human_message = ""
|
134
|
+
if cached_pairs and waiting_for_bot:
|
135
|
+
last_human_message = cached_pairs[-1][0]
|
136
|
+
|
137
|
+
# Process new messages
|
138
|
+
for message in new_messages:
|
139
|
+
try:
|
140
|
+
is_human_msg = is_human(message)
|
141
|
+
content = create_message_element(message)
|
142
|
+
|
143
|
+
if is_human_msg:
|
144
|
+
last_human_message = content
|
145
|
+
waiting_for_bot = True
|
146
|
+
else: # Bot message
|
147
|
+
if waiting_for_bot and last_human_message:
|
148
|
+
new_pairs.append((last_human_message, content))
|
149
|
+
last_human_message = ""
|
150
|
+
waiting_for_bot = False
|
151
|
+
# If not waiting for bot or no human message, this is an orphaned bot message
|
152
|
+
|
153
|
+
except (KeyError, TypeError) as e:
|
154
|
+
log.warning(f"Error processing new message: {e}")
|
155
|
+
continue
|
156
|
+
|
157
|
+
return new_pairs
|
158
|
+
|
159
|
+
def _extract_chat_history_full(self, chat_history: List[dict]) -> List[Tuple]:
|
160
|
+
"""Full extraction when no cache is available."""
|
161
|
+
# Use the optimized version from before
|
162
|
+
paired_messages = []
|
163
|
+
|
164
|
+
# Handle initial bot message
|
165
|
+
start_idx = 0
|
166
|
+
if chat_history and is_bot(chat_history[0]):
|
167
|
+
try:
|
168
|
+
first_message = chat_history[0]
|
169
|
+
blank_element = ""
|
170
|
+
bot_element = create_message_element(first_message)
|
171
|
+
paired_messages.append((blank_element, bot_element))
|
172
|
+
start_idx = 1
|
173
|
+
except (KeyError, TypeError):
|
174
|
+
pass
|
175
|
+
|
176
|
+
# Process remaining messages
|
177
|
+
last_human_message = ""
|
178
|
+
for i in range(start_idx, len(chat_history)):
|
179
|
+
message = chat_history[i]
|
180
|
+
|
181
|
+
try:
|
182
|
+
is_human_msg = is_human(message)
|
183
|
+
content = create_message_element(message)
|
184
|
+
|
185
|
+
if is_human_msg:
|
186
|
+
last_human_message = content
|
187
|
+
else: # Bot message
|
188
|
+
if last_human_message:
|
189
|
+
paired_messages.append((last_human_message, content))
|
190
|
+
last_human_message = ""
|
191
|
+
|
192
|
+
except (KeyError, TypeError) as e:
|
193
|
+
log.warning(f"Error processing message {i}: {e}")
|
194
|
+
continue
|
195
|
+
|
196
|
+
return paired_messages
|
197
|
+
|
198
|
+
def _update_cache(self, chat_history: List[dict], pairs: List[Tuple]):
|
199
|
+
"""Update cache with new result."""
|
200
|
+
# Only cache if the history is of reasonable size
|
201
|
+
if len(chat_history) < 2:
|
202
|
+
return
|
203
|
+
|
204
|
+
cache_key = self._get_cache_key(chat_history)
|
205
|
+
|
206
|
+
# Implement simple LRU by removing oldest entries
|
207
|
+
if len(self.cache) >= self.max_cache_size:
|
208
|
+
# Remove 20% of oldest entries
|
209
|
+
remove_count = self.max_cache_size // 5
|
210
|
+
oldest_keys = list(self.cache.keys())[:remove_count]
|
211
|
+
for key in oldest_keys:
|
212
|
+
del self.cache[key]
|
213
|
+
|
214
|
+
self.cache[cache_key] = {
|
215
|
+
'pairs': pairs,
|
216
|
+
'original_history': chat_history.copy(), # Store copy for validation
|
217
|
+
'timestamp': time.time()
|
218
|
+
}
|
219
|
+
|
220
|
+
log.debug(f"Cached {len(pairs)} pairs for history of length {len(chat_history)}")
|
221
|
+
|
222
|
+
def clear_cache(self):
|
223
|
+
"""Clear the entire cache."""
|
224
|
+
self.cache.clear()
|
225
|
+
log.info("Chat history cache cleared")
|
226
|
+
|
227
|
+
|
228
|
+
# Global cache instance
|
229
|
+
_chat_history_cache = ChatHistoryCache()
|
230
|
+
|
231
|
+
|
232
|
+
def extract_chat_history_with_cache(chat_history: List[dict] = None) -> List[Tuple]:
|
233
|
+
"""
|
234
|
+
Main function to replace the original extract_chat_history.
|
235
|
+
|
236
|
+
Uses incremental caching for better performance with growing chat histories.
|
237
|
+
"""
|
238
|
+
if not chat_history:
|
239
|
+
log.debug("No chat history found")
|
240
|
+
return []
|
241
|
+
|
242
|
+
return _chat_history_cache.extract_chat_history_incremental(chat_history)
|
243
|
+
|
244
|
+
|
245
|
+
# Async version that wraps the cached version
|
246
|
+
async def extract_chat_history_async_cached(chat_history: List[dict] = None) -> List[Tuple]:
|
247
|
+
"""
|
248
|
+
Async version that uses the cache and runs in a thread pool if needed.
|
249
|
+
"""
|
250
|
+
import asyncio
|
251
|
+
|
252
|
+
if not chat_history:
|
253
|
+
return []
|
254
|
+
|
255
|
+
# For very large histories, run in thread pool to avoid blocking
|
256
|
+
if len(chat_history) > 1000:
|
257
|
+
loop = asyncio.get_event_loop()
|
258
|
+
return await loop.run_in_executor(
|
259
|
+
None,
|
260
|
+
extract_chat_history_with_cache,
|
261
|
+
chat_history
|
262
|
+
)
|
263
|
+
else:
|
264
|
+
# For smaller histories, just run directly
|
265
|
+
return extract_chat_history_with_cache(chat_history)
|
266
|
+
|
267
|
+
|
268
|
+
# Utility function to warm up the cache
|
269
|
+
def warm_up_cache(chat_histories: List[List[dict]]):
|
270
|
+
"""
|
271
|
+
Pre-populate cache with common chat histories.
|
272
|
+
|
273
|
+
Args:
|
274
|
+
chat_histories: List of chat history lists to cache
|
275
|
+
"""
|
276
|
+
for history in chat_histories:
|
277
|
+
extract_chat_history_with_cache(history)
|
278
|
+
|
279
|
+
log.info(f"Warmed up cache with {len(chat_histories)} chat histories")
|
3
280
|
|
4
281
|
|
5
282
|
async def extract_chat_history_async(chat_history=None):
|
@@ -243,3 +520,4 @@ def is_ai(message: dict):
|
|
243
520
|
return message['role'] == 'assistant'
|
244
521
|
else:
|
245
522
|
return 'bot_id' in message # Slack
|
523
|
+
|
sunholo/agents/flask/__init__.py
CHANGED
@@ -7,8 +7,8 @@ from functools import partial
|
|
7
7
|
import inspect
|
8
8
|
import asyncio
|
9
9
|
|
10
|
-
from ...agents import
|
11
|
-
from ..chat_history import
|
10
|
+
from ...agents import handle_special_commands
|
11
|
+
from ..chat_history import extract_chat_history_with_cache, extract_chat_history_async_cached
|
12
12
|
from ...qna.parsers import parse_output
|
13
13
|
from ...streaming import start_streaming_chat, start_streaming_chat_async
|
14
14
|
from ...archive import archive_qa
|
@@ -58,12 +58,18 @@ if __name__ == "__main__":
|
|
58
58
|
```
|
59
59
|
|
60
60
|
"""
|
61
|
-
def __init__(self, app,
|
61
|
+
def __init__(self, app,
|
62
|
+
stream_interpreter: callable,
|
63
|
+
vac_interpreter:callable=None,
|
64
|
+
additional_routes:dict=None,
|
65
|
+
async_stream:bool=False,
|
66
|
+
add_langfuse_eval:bool=True):
|
62
67
|
self.app = app
|
63
68
|
self.stream_interpreter = stream_interpreter
|
64
69
|
self.vac_interpreter = vac_interpreter or partial(self.vac_interpreter_default)
|
65
70
|
self.additional_routes = additional_routes if additional_routes is not None else []
|
66
71
|
self.async_stream = async_stream
|
72
|
+
self.add_langfuse_eval = add_langfuse_eval
|
67
73
|
self.register_routes()
|
68
74
|
|
69
75
|
|
@@ -96,13 +102,9 @@ if __name__ == "__main__":
|
|
96
102
|
# Basic routes
|
97
103
|
self.app.route("/", methods=['GET'])(self.home)
|
98
104
|
self.app.route("/health", methods=['GET'])(self.health)
|
99
|
-
|
100
|
-
# Streaming VAC
|
101
|
-
self.app.route('/vac/streaming/<vector_name>',
|
102
|
-
methods=['POST'],
|
103
|
-
provide_automatic_options=False)(self.handle_stream_vac)
|
104
105
|
|
105
106
|
if self.async_stream: # Use async treatment
|
107
|
+
log.info("async_stream enabled")
|
106
108
|
self.app.route('/vac/streaming/<vector_name>',
|
107
109
|
methods=['POST'],
|
108
110
|
provide_automatic_options=False)(self.handle_stream_vac_async)
|
@@ -351,10 +353,10 @@ if __name__ == "__main__":
|
|
351
353
|
|
352
354
|
# Use the async version of prep_vac
|
353
355
|
prep = await self.prep_vac_async(request, vector_name)
|
354
|
-
log.info(f"Processing prep: {prep}")
|
356
|
+
log.info(f"Processing async prep: {prep}")
|
355
357
|
all_input = prep["all_input"]
|
356
358
|
|
357
|
-
log.info(f'Streaming data with: {all_input}')
|
359
|
+
log.info(f'Streaming async data with: {all_input}')
|
358
360
|
|
359
361
|
async def generate_response_content():
|
360
362
|
try:
|
@@ -378,12 +380,12 @@ if __name__ == "__main__":
|
|
378
380
|
yield chunk
|
379
381
|
|
380
382
|
except Exception as e:
|
381
|
-
yield f"Streaming Error: {str(e)} {traceback.format_exc()}"
|
383
|
+
yield f"Streaming async Error: {str(e)} {traceback.format_exc()}"
|
382
384
|
|
383
385
|
response = Response(generate_response_content(), content_type='text/plain; charset=utf-8')
|
384
386
|
response.headers['Transfer-Encoding'] = 'chunked'
|
385
387
|
|
386
|
-
log.debug(f"streaming response: {response}")
|
388
|
+
log.debug(f"streaming async response: {response}")
|
387
389
|
|
388
390
|
return response
|
389
391
|
|
@@ -554,7 +556,8 @@ if __name__ == "__main__":
|
|
554
556
|
else:
|
555
557
|
log.info(f"User message: {user_message}")
|
556
558
|
|
557
|
-
paired_messages =
|
559
|
+
paired_messages = extract_chat_history_with_cache(chat_history)
|
560
|
+
|
558
561
|
command_response = handle_special_commands(user_message, vector_name, paired_messages)
|
559
562
|
|
560
563
|
if command_response is not None:
|
@@ -698,10 +701,10 @@ if __name__ == "__main__":
|
|
698
701
|
|
699
702
|
trace = None
|
700
703
|
span = None
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
704
|
+
if self.add_langfuse_eval:
|
705
|
+
trace_id = data.get('trace_id')
|
706
|
+
trace = self.create_langfuse_trace(request, vector_name, trace_id)
|
707
|
+
log.info(f"Using existing langfuse trace: {trace_id}")
|
705
708
|
|
706
709
|
#config, _ = load_config("config/llm_config.yaml")
|
707
710
|
try:
|
@@ -725,7 +728,7 @@ if __name__ == "__main__":
|
|
725
728
|
vector_name = data.pop('vector_name', vector_name)
|
726
729
|
data.pop('trace_id', None) # to ensure not in kwargs
|
727
730
|
|
728
|
-
paired_messages =
|
731
|
+
paired_messages = extract_chat_history_with_cache(chat_history)
|
729
732
|
|
730
733
|
all_input = {'user_input': user_input,
|
731
734
|
'vector_name': vector_name,
|
@@ -741,15 +744,10 @@ if __name__ == "__main__":
|
|
741
744
|
metadata=vac_config.configs_by_kind,
|
742
745
|
input = all_input
|
743
746
|
)
|
744
|
-
command_response = handle_special_commands(user_input, vector_name, paired_messages)
|
745
|
-
if command_response is not None:
|
746
|
-
if trace:
|
747
|
-
trace.update(output=jsonify(command_response))
|
748
747
|
|
749
748
|
return {
|
750
749
|
"trace": trace,
|
751
750
|
"span": span,
|
752
|
-
"command_response": command_response,
|
753
751
|
"all_input": all_input,
|
754
752
|
"vac_config": vac_config
|
755
753
|
}
|
@@ -793,7 +791,7 @@ if __name__ == "__main__":
|
|
793
791
|
data.pop('trace_id', None) # to ensure not in kwargs
|
794
792
|
|
795
793
|
# Task 3: Process chat history
|
796
|
-
chat_history_task = asyncio.create_task(
|
794
|
+
chat_history_task = asyncio.create_task(extract_chat_history_async_cached(chat_history))
|
797
795
|
tasks.append(chat_history_task)
|
798
796
|
|
799
797
|
# Await all tasks concurrently
|
@@ -1,8 +1,8 @@
|
|
1
1
|
sunholo/__init__.py,sha256=InRbX4V0-qdNHo9zYH3GEye7ASLR6LX8-SMvPV4Jsaw,1212
|
2
2
|
sunholo/custom_logging.py,sha256=JXZTnXp_DixP3jwYfKw4LYRDS9IuTq7ctCgfZbI2rxA,22023
|
3
3
|
sunholo/langchain_types.py,sha256=uZ4zvgej_f7pLqjtu4YP7qMC_eZD5ym_5x4pyvA1Ih4,1834
|
4
|
-
sunholo/agents/__init__.py,sha256=
|
5
|
-
sunholo/agents/chat_history.py,sha256=
|
4
|
+
sunholo/agents/__init__.py,sha256=AauG3l0y4r5Fzx1zJfZ634M4o-0o7B7J5T8k_gPvNqE,370
|
5
|
+
sunholo/agents/chat_history.py,sha256=e2NmiooaRUxKGr_aoU05rzhHi3VsKjbZZmzeDr2yJJE,17780
|
6
6
|
sunholo/agents/dispatch_to_qa.py,sha256=NHihwAoCJ5_Lk11e_jZnucVUGQyZHCB-YpkfMHBCpQk,8882
|
7
7
|
sunholo/agents/langserve.py,sha256=C46ph2mnygr6bdHijYWYyfQDI9ylAF0_9Kx2PfcCJpU,4414
|
8
8
|
sunholo/agents/pubsub.py,sha256=TscZN_6am6DfaQkC-Yl18ZIBOoLE-0nDSiil6GpQEh4,1344
|
@@ -12,10 +12,9 @@ sunholo/agents/swagger.py,sha256=2tzGmpveUMmTREykZvVnDj3j295wyOMu7mUFDnXdY3c,106
|
|
12
12
|
sunholo/agents/fastapi/__init__.py,sha256=S_pj4_bTUmDGoq_exaREHlOKThi0zTuGT0VZY0YfODQ,88
|
13
13
|
sunholo/agents/fastapi/base.py,sha256=W-cyF8ZDUH40rc-c-Apw3-_8IIi2e4Y9qRtnoVnsc1Q,2521
|
14
14
|
sunholo/agents/fastapi/qna_routes.py,sha256=lKHkXPmwltu9EH3RMwmD153-J6pE7kWQ4BhBlV3to-s,3864
|
15
|
-
sunholo/agents/flask/__init__.py,sha256=
|
15
|
+
sunholo/agents/flask/__init__.py,sha256=dEoByI3gDNUOjpX1uVKP7uPjhfFHJubbiaAv3xLopnk,63
|
16
16
|
sunholo/agents/flask/base.py,sha256=HLz3Z5efWaewTwSFEM6JH48NA9otoJBoVFJlARGk9L8,788
|
17
|
-
sunholo/agents/flask/
|
18
|
-
sunholo/agents/flask/vac_routes.py,sha256=RprhFJje5gTNU3ePGbCCPdBAdYs417VyXNlGe5UlR-g,33370
|
17
|
+
sunholo/agents/flask/vac_routes.py,sha256=rpakKOO6bjBJv2l0NOqd0fAzRJEFLR1buSN8GiubMeE,33230
|
19
18
|
sunholo/archive/__init__.py,sha256=qNHWm5rGPVOlxZBZCpA1wTYPbalizRT7f8X4rs2t290,31
|
20
19
|
sunholo/archive/archive.py,sha256=PxVfDtO2_2ZEEbnhXSCbXLdeoHoQVImo4y3Jr2XkCFY,1204
|
21
20
|
sunholo/auth/__init__.py,sha256=TeP-OY0XGxYV_8AQcVGoh35bvyWhNUcMRfhuD5l44Sk,91
|
@@ -169,9 +168,9 @@ sunholo/vertex/init.py,sha256=1OQwcPBKZYBTDPdyU7IM4X4OmiXLdsNV30C-fee2scQ,2875
|
|
169
168
|
sunholo/vertex/memory_tools.py,sha256=tBZxqVZ4InTmdBvLlOYwoSEWu4-kGquc-gxDwZCC4FA,7667
|
170
169
|
sunholo/vertex/safety.py,sha256=S9PgQT1O_BQAkcqauWncRJaydiP8Q_Jzmu9gxYfy1VA,2482
|
171
170
|
sunholo/vertex/type_dict_to_json.py,sha256=uTzL4o9tJRao4u-gJOFcACgWGkBOtqACmb6ihvCErL8,4694
|
172
|
-
sunholo-0.140.
|
173
|
-
sunholo-0.140.
|
174
|
-
sunholo-0.140.
|
175
|
-
sunholo-0.140.
|
176
|
-
sunholo-0.140.
|
177
|
-
sunholo-0.140.
|
171
|
+
sunholo-0.140.5.dist-info/licenses/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
|
172
|
+
sunholo-0.140.5.dist-info/METADATA,sha256=SM-i1Mdu3Sl5-m2am-EwU1DTAv9M22QZIylaV9MXkPg,10067
|
173
|
+
sunholo-0.140.5.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
174
|
+
sunholo-0.140.5.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
|
175
|
+
sunholo-0.140.5.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
|
176
|
+
sunholo-0.140.5.dist-info/RECORD,,
|
@@ -1,604 +0,0 @@
|
|
1
|
-
# Copyright [2024] [Holosun ApS]
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
|
16
|
-
import json
|
17
|
-
import traceback
|
18
|
-
import uuid
|
19
|
-
|
20
|
-
from ...agents import extract_chat_history, handle_special_commands
|
21
|
-
from ...qna.parsers import parse_output
|
22
|
-
from ...streaming import start_streaming_chat
|
23
|
-
from ...archive import archive_qa
|
24
|
-
from ...custom_logging import log
|
25
|
-
from ...utils.config import load_config
|
26
|
-
from ...utils import ConfigManager
|
27
|
-
from ...utils.version import sunholo_version
|
28
|
-
import os
|
29
|
-
from ...gcs.add_file import add_file_to_gcs, handle_base64_image
|
30
|
-
from ..swagger import validate_api_key
|
31
|
-
from datetime import datetime, timedelta
|
32
|
-
|
33
|
-
try:
|
34
|
-
from flask import request, jsonify, Response
|
35
|
-
except ImportError:
|
36
|
-
pass
|
37
|
-
|
38
|
-
try:
|
39
|
-
from langfuse.decorators import langfuse_context, observe
|
40
|
-
except ImportError:
|
41
|
-
pass
|
42
|
-
|
43
|
-
# Cache dictionary to store validated API keys
|
44
|
-
api_key_cache = {}
|
45
|
-
cache_duration = timedelta(minutes=5) # Cache duration
|
46
|
-
|
47
|
-
def make_openai_response(user_message, vector_name, answer):
|
48
|
-
response_id = str(uuid.uuid4())
|
49
|
-
log.info(f"openai response: Q: {user_message} to VECTOR_NAME: {vector_name} - A: {answer}")
|
50
|
-
openai_response = {
|
51
|
-
"id": response_id,
|
52
|
-
"object": "chat.completion",
|
53
|
-
"created": str(int(datetime.now().timestamp())),
|
54
|
-
"model": vector_name,
|
55
|
-
"system_fingerprint": sunholo_version(),
|
56
|
-
"choices": [{
|
57
|
-
"index": 0,
|
58
|
-
"message": {
|
59
|
-
"role": "assistant",
|
60
|
-
"content": answer,
|
61
|
-
},
|
62
|
-
"logprobs": None,
|
63
|
-
"finish_reason": "stop"
|
64
|
-
}],
|
65
|
-
"usage": {
|
66
|
-
"prompt_tokens": 0,
|
67
|
-
"completion_tokens": 0,
|
68
|
-
"total_tokens": 0
|
69
|
-
}
|
70
|
-
}
|
71
|
-
|
72
|
-
log.info(f"OpenAI response: {openai_response}")
|
73
|
-
return jsonify(openai_response)
|
74
|
-
|
75
|
-
def register_qna_routes(app, stream_interpreter, vac_interpreter):
|
76
|
-
"""
|
77
|
-
Register Q&A routes for a Flask application.
|
78
|
-
|
79
|
-
This function sets up multiple routes for handling Q&A operations,
|
80
|
-
including streaming responses and processing static responses.
|
81
|
-
|
82
|
-
Args:
|
83
|
-
app (Flask): The Flask application instance.
|
84
|
-
stream_interpreter (function): Function to handle streaming Q&A responses.
|
85
|
-
vac_interpreter (function): Function to handle static Q&A responses.
|
86
|
-
|
87
|
-
Returns:
|
88
|
-
None
|
89
|
-
|
90
|
-
Example:
|
91
|
-
from flask import Flask
|
92
|
-
app = Flask(__name__)
|
93
|
-
|
94
|
-
def dummy_stream_interpreter(...):
|
95
|
-
...
|
96
|
-
|
97
|
-
def dummy_vac_interpreter(...):
|
98
|
-
...
|
99
|
-
|
100
|
-
register_qna_routes(app, dummy_stream_interpreter, dummy_vac_interpreter)
|
101
|
-
"""
|
102
|
-
@app.route("/")
|
103
|
-
def home():
|
104
|
-
return jsonify("OK")
|
105
|
-
|
106
|
-
@app.route("/health")
|
107
|
-
def health():
|
108
|
-
return jsonify({"status": "healthy"})
|
109
|
-
|
110
|
-
@app.route('/vac/streaming/<vector_name>', methods=['POST'])
|
111
|
-
def stream_qa(vector_name):
|
112
|
-
"""
|
113
|
-
Handle streaming Q&A responses.
|
114
|
-
|
115
|
-
This function sets up a route to handle streaming Q&A responses based on
|
116
|
-
the provided vector name.
|
117
|
-
|
118
|
-
Args:
|
119
|
-
vector_name (str): The name of the vector for the request.
|
120
|
-
|
121
|
-
Returns:
|
122
|
-
Response: A Flask response object streaming the Q&A response content.
|
123
|
-
|
124
|
-
Example:
|
125
|
-
response = stream_qa("example_vector")
|
126
|
-
"""
|
127
|
-
observed_stream_interpreter = observe()(stream_interpreter)
|
128
|
-
prep = prep_vac(request, vector_name)
|
129
|
-
log.debug(f"Processing prep: {prep}")
|
130
|
-
trace = prep["trace"]
|
131
|
-
span = prep["span"]
|
132
|
-
command_response = prep["command_response"]
|
133
|
-
vac_config = prep["vac_config"]
|
134
|
-
all_input = prep["all_input"]
|
135
|
-
|
136
|
-
if command_response:
|
137
|
-
return jsonify(command_response)
|
138
|
-
|
139
|
-
log.info(f'Streaming data with: {all_input}')
|
140
|
-
if span:
|
141
|
-
generation = span.generation(
|
142
|
-
name="start_streaming_chat",
|
143
|
-
metadata=vac_config,
|
144
|
-
input = all_input,
|
145
|
-
completion_start_time=datetime.now(),
|
146
|
-
model=vac_config.get("model") or vac_config.get("llm")
|
147
|
-
)
|
148
|
-
|
149
|
-
def generate_response_content():
|
150
|
-
|
151
|
-
for chunk in start_streaming_chat(question=all_input["user_input"],
|
152
|
-
vector_name=vector_name,
|
153
|
-
qna_func=observed_stream_interpreter,
|
154
|
-
chat_history=all_input["chat_history"],
|
155
|
-
wait_time=all_input["stream_wait_time"],
|
156
|
-
timeout=all_input["stream_timeout"],
|
157
|
-
#kwargs
|
158
|
-
**all_input["kwargs"]
|
159
|
-
):
|
160
|
-
if isinstance(chunk, dict) and 'answer' in chunk:
|
161
|
-
# When we encounter the dictionary, we yield it as a JSON string
|
162
|
-
# and stop the generator.
|
163
|
-
if trace:
|
164
|
-
chunk["trace"] = trace.id
|
165
|
-
chunk["trace_url"] = trace.get_trace_url()
|
166
|
-
archive_qa(chunk, vector_name)
|
167
|
-
if trace:
|
168
|
-
generation.end(output=json.dumps(chunk))
|
169
|
-
span.end(output=json.dumps(chunk))
|
170
|
-
trace.update(output=json.dumps(chunk))
|
171
|
-
|
172
|
-
return json.dumps(chunk)
|
173
|
-
|
174
|
-
else:
|
175
|
-
# Otherwise, we yield the plain text chunks as they come in.
|
176
|
-
yield chunk
|
177
|
-
|
178
|
-
# Here, the generator function will handle streaming the content to the client.
|
179
|
-
response = Response(generate_response_content(), content_type='text/plain; charset=utf-8')
|
180
|
-
response.headers['Transfer-Encoding'] = 'chunked'
|
181
|
-
|
182
|
-
log.debug(f"streaming response: {response}")
|
183
|
-
if trace:
|
184
|
-
generation.end(output=response)
|
185
|
-
span.end(output=response)
|
186
|
-
trace.update(output=response)
|
187
|
-
|
188
|
-
#if 'user_id' in all_input["kwargs"]:
|
189
|
-
# kwargs = all_input["kwargs"]
|
190
|
-
# config = ConfigManager(vector_name)
|
191
|
-
# add_user_history_rag(kwargs.pop('user_id'),
|
192
|
-
# config,
|
193
|
-
# question=all_input.pop("user_input"),
|
194
|
-
# answer=response.get('answer'),
|
195
|
-
# metadata=all_input)
|
196
|
-
|
197
|
-
return response
|
198
|
-
|
199
|
-
@app.route('/vac/<vector_name>', methods=['POST'])
|
200
|
-
def process_qna(vector_name):
|
201
|
-
"""
|
202
|
-
Handle static Q&A responses.
|
203
|
-
|
204
|
-
This function sets up a route to handle static Q&A responses based on
|
205
|
-
the provided vector name.
|
206
|
-
|
207
|
-
Args:
|
208
|
-
vector_name (str): The name of the vector for the request.
|
209
|
-
|
210
|
-
Returns:
|
211
|
-
Response: A Flask response object with the Q&A response content.
|
212
|
-
|
213
|
-
Example:
|
214
|
-
response = process_qna("example_vector")
|
215
|
-
"""
|
216
|
-
observed_vac_interpreter = observe()(vac_interpreter)
|
217
|
-
prep = prep_vac(request, vector_name)
|
218
|
-
log.debug(f"Processing prep: {prep}")
|
219
|
-
trace = prep["trace"]
|
220
|
-
span = prep["span"]
|
221
|
-
command_response = prep["command_response"]
|
222
|
-
vac_config = prep["vac_config"]
|
223
|
-
all_input = prep["all_input"]
|
224
|
-
|
225
|
-
if command_response:
|
226
|
-
return jsonify(command_response)
|
227
|
-
|
228
|
-
try:
|
229
|
-
if span:
|
230
|
-
generation = span.generation(
|
231
|
-
name="vac_interpreter",
|
232
|
-
metadata=vac_config,
|
233
|
-
input = all_input,
|
234
|
-
model=vac_config.get("model") or vac_config.get("llm")
|
235
|
-
)
|
236
|
-
bot_output = observed_vac_interpreter(
|
237
|
-
question=all_input["user_input"],
|
238
|
-
vector_name=vector_name,
|
239
|
-
chat_history=all_input["chat_history"],
|
240
|
-
**all_input["kwargs"]
|
241
|
-
)
|
242
|
-
if span:
|
243
|
-
generation.end(output=bot_output)
|
244
|
-
# {"answer": "The answer", "source_documents": [{"page_content": "The page content", "metadata": "The metadata"}]}
|
245
|
-
bot_output = parse_output(bot_output)
|
246
|
-
if trace:
|
247
|
-
bot_output["trace"] = trace.id
|
248
|
-
bot_output["trace_url"] = trace.get_trace_url()
|
249
|
-
archive_qa(bot_output, vector_name)
|
250
|
-
log.info(f'==LLM Q:{all_input["user_input"]} - A:{bot_output}')
|
251
|
-
|
252
|
-
|
253
|
-
except Exception as err:
|
254
|
-
bot_output = {'answer': f'QNA_ERROR: An error occurred while processing /vac/{vector_name}: {str(err)} traceback: {traceback.format_exc()}'}
|
255
|
-
|
256
|
-
if trace:
|
257
|
-
span.end(output=jsonify(bot_output))
|
258
|
-
trace.update(output=jsonify(bot_output))
|
259
|
-
|
260
|
-
# {'answer': 'output'}
|
261
|
-
return jsonify(bot_output)
|
262
|
-
|
263
|
-
@app.before_request
|
264
|
-
def check_authentication_header():
|
265
|
-
if request.path.startswith('/openai/'):
|
266
|
-
log.debug(f'Request headers: {request.headers}')
|
267
|
-
# the header forwarded
|
268
|
-
auth_header = request.headers.get('X-Forwarded-Authorization')
|
269
|
-
if auth_header:
|
270
|
-
|
271
|
-
if auth_header.startswith('Bearer '):
|
272
|
-
api_key = auth_header.split(' ')[1] # Assuming "Bearer <api_key>"
|
273
|
-
else:
|
274
|
-
return jsonify({'error': 'Invalid authorization header does not start with "Bearer " - got: {auth_header}'}), 401
|
275
|
-
|
276
|
-
endpoints_host = os.getenv('_ENDPOINTS_HOST')
|
277
|
-
if not endpoints_host:
|
278
|
-
return jsonify({'error': '_ENDPOINTS_HOST environment variable not found'}), 401
|
279
|
-
|
280
|
-
# Check cache first
|
281
|
-
current_time = datetime.now()
|
282
|
-
if api_key in api_key_cache:
|
283
|
-
cached_result, cache_time = api_key_cache[api_key]
|
284
|
-
if current_time - cache_time < cache_duration:
|
285
|
-
if not cached_result:
|
286
|
-
return jsonify({'error': 'Invalid cached API key'}), 401
|
287
|
-
else:
|
288
|
-
return # Valid API key, continue to the endpoint
|
289
|
-
else:
|
290
|
-
# Cache expired, remove from cache
|
291
|
-
del api_key_cache[api_key]
|
292
|
-
|
293
|
-
# Validate API key
|
294
|
-
is_valid = validate_api_key(api_key, endpoints_host)
|
295
|
-
# Update cache
|
296
|
-
api_key_cache[api_key] = (is_valid, current_time)
|
297
|
-
|
298
|
-
if not is_valid:
|
299
|
-
return jsonify({'error': 'Invalid API key'}), 401
|
300
|
-
else:
|
301
|
-
return jsonify({'error': 'Missing Authorization header'}), 401
|
302
|
-
|
303
|
-
@app.route('/openai/health', methods=['GET', 'POST'])
|
304
|
-
def openai_health_endpoint():
|
305
|
-
return jsonify({'message': 'Success'})
|
306
|
-
|
307
|
-
@app.route('/openai/v1/chat/completions', methods=['POST'])
|
308
|
-
@app.route('/openai/v1/chat/completions/<vector_name>', methods=['POST'])
|
309
|
-
def openai_compatible_endpoint(vector_name=None):
|
310
|
-
"""
|
311
|
-
Handle OpenAI-compatible chat completions.
|
312
|
-
|
313
|
-
This function sets up routes to handle OpenAI-compatible chat completion requests,
|
314
|
-
both with and without a specified vector name.
|
315
|
-
|
316
|
-
Args:
|
317
|
-
vector_name (str, optional): The name of the vector for the request. Defaults to None.
|
318
|
-
|
319
|
-
Returns:
|
320
|
-
Response: A Flask response object with the chat completion content.
|
321
|
-
|
322
|
-
Example:
|
323
|
-
response = openai_compatible_endpoint("example_vector")
|
324
|
-
"""
|
325
|
-
data = request.get_json()
|
326
|
-
log.info(f'openai_compatible_endpoint got data: {data} for vector: {vector_name}')
|
327
|
-
|
328
|
-
vector_name = vector_name or data.pop('model', None)
|
329
|
-
messages = data.pop('messages', None)
|
330
|
-
chat_history = data.pop('chat_history', None)
|
331
|
-
stream = data.pop('stream', False)
|
332
|
-
|
333
|
-
if not messages:
|
334
|
-
return jsonify({"error": "No messages provided"}), 400
|
335
|
-
|
336
|
-
user_message = None
|
337
|
-
image_uri = None
|
338
|
-
mime_type = None
|
339
|
-
|
340
|
-
for msg in reversed(messages):
|
341
|
-
if msg['role'] == 'user':
|
342
|
-
if isinstance(msg['content'], list):
|
343
|
-
for content_item in msg['content']:
|
344
|
-
if content_item['type'] == 'text':
|
345
|
-
user_message = content_item['text']
|
346
|
-
elif content_item['type'] == 'image_url':
|
347
|
-
base64_data = content_item['image_url']['url']
|
348
|
-
image_uri, mime_type = handle_base64_image(base64_data, vector_name)
|
349
|
-
else:
|
350
|
-
user_message = msg['content']
|
351
|
-
break
|
352
|
-
|
353
|
-
if not user_message:
|
354
|
-
return jsonify({"error": "No user message provided"}), 400
|
355
|
-
else:
|
356
|
-
log.info(f"User message: {user_message}")
|
357
|
-
|
358
|
-
paired_messages = extract_chat_history(chat_history)
|
359
|
-
command_response = handle_special_commands(user_message, vector_name, paired_messages)
|
360
|
-
|
361
|
-
if command_response is not None:
|
362
|
-
|
363
|
-
return make_openai_response(user_message, vector_name, command_response)
|
364
|
-
|
365
|
-
if image_uri:
|
366
|
-
data["image_uri"] = image_uri
|
367
|
-
data["mime"] = mime_type
|
368
|
-
|
369
|
-
all_input = {
|
370
|
-
"user_input": user_message,
|
371
|
-
"chat_history": chat_history,
|
372
|
-
"kwargs": data
|
373
|
-
}
|
374
|
-
|
375
|
-
observed_stream_interpreter = observe()(stream_interpreter)
|
376
|
-
|
377
|
-
response_id = str(uuid.uuid4())
|
378
|
-
|
379
|
-
def generate_response_content():
|
380
|
-
for chunk in start_streaming_chat(question=user_message,
|
381
|
-
vector_name=vector_name,
|
382
|
-
qna_func=observed_stream_interpreter,
|
383
|
-
chat_history=all_input["chat_history"],
|
384
|
-
wait_time=all_input.get("stream_wait_time", 1),
|
385
|
-
timeout=all_input.get("stream_timeout", 60),
|
386
|
-
**all_input["kwargs"]
|
387
|
-
):
|
388
|
-
if isinstance(chunk, dict) and 'answer' in chunk:
|
389
|
-
openai_chunk = {
|
390
|
-
"id": response_id,
|
391
|
-
"object": "chat.completion.chunk",
|
392
|
-
"created": str(int(datetime.now().timestamp())),
|
393
|
-
"model": vector_name,
|
394
|
-
"system_fingerprint": sunholo_version(),
|
395
|
-
"choices": [{
|
396
|
-
"index": 0,
|
397
|
-
"delta": {"content": chunk['answer']},
|
398
|
-
"logprobs": None,
|
399
|
-
"finish_reason": None
|
400
|
-
}]
|
401
|
-
}
|
402
|
-
yield json.dumps(openai_chunk) + "\n"
|
403
|
-
else:
|
404
|
-
log.info(f"Unknown chunk: {chunk}")
|
405
|
-
|
406
|
-
final_chunk = {
|
407
|
-
"id": response_id,
|
408
|
-
"object": "chat.completion.chunk",
|
409
|
-
"created": str(int(datetime.now().timestamp())),
|
410
|
-
"model": vector_name,
|
411
|
-
"system_fingerprint": sunholo_version(),
|
412
|
-
"choices": [{
|
413
|
-
"index": 0,
|
414
|
-
"delta": {},
|
415
|
-
"logprobs": None,
|
416
|
-
"finish_reason": "stop"
|
417
|
-
}]
|
418
|
-
}
|
419
|
-
yield json.dumps(final_chunk) + "\n"
|
420
|
-
|
421
|
-
if stream:
|
422
|
-
log.info("Streaming openai chunks")
|
423
|
-
return Response(generate_response_content(), content_type='text/plain; charset=utf-8')
|
424
|
-
|
425
|
-
try:
|
426
|
-
observed_vac_interpreter = observe()(vac_interpreter)
|
427
|
-
bot_output = observed_vac_interpreter(
|
428
|
-
question=user_message,
|
429
|
-
vector_name=vector_name,
|
430
|
-
chat_history=all_input["chat_history"],
|
431
|
-
**all_input["kwargs"]
|
432
|
-
)
|
433
|
-
bot_output = parse_output(bot_output)
|
434
|
-
|
435
|
-
log.info(f"Bot output: {bot_output}")
|
436
|
-
if bot_output:
|
437
|
-
return make_openai_response(user_message, vector_name, bot_output.get('answer', ''))
|
438
|
-
else:
|
439
|
-
return make_openai_response(user_message, vector_name, 'ERROR: could not find an answer')
|
440
|
-
|
441
|
-
except Exception as err:
|
442
|
-
log.error(f"OpenAI response error: {str(err)} traceback: {traceback.format_exc()}")
|
443
|
-
|
444
|
-
return make_openai_response(user_message, vector_name, f'ERROR: {str(err)}')
|
445
|
-
|
446
|
-
|
447
|
-
def create_langfuse_trace(request, vector_name):
|
448
|
-
"""
|
449
|
-
Create a Langfuse trace for tracking requests.
|
450
|
-
|
451
|
-
This function initializes a Langfuse trace object based on the request headers
|
452
|
-
and vector name.
|
453
|
-
|
454
|
-
Args:
|
455
|
-
request (Request): The Flask request object.
|
456
|
-
vector_name (str): The name of the vector for the request.
|
457
|
-
|
458
|
-
Returns:
|
459
|
-
Langfuse.Trace: The Langfuse trace object.
|
460
|
-
|
461
|
-
Example:
|
462
|
-
trace = create_langfuse_trace(request, "example_vector")
|
463
|
-
"""
|
464
|
-
try:
|
465
|
-
from langfuse import Langfuse
|
466
|
-
langfuse = Langfuse()
|
467
|
-
except ImportError as err:
|
468
|
-
print(f"No langfuse installed for agents.flask.register_qna_routes, install via `pip install sunholo[http]` - {str(err)}")
|
469
|
-
|
470
|
-
return None
|
471
|
-
|
472
|
-
user_id = request.headers.get("X-User-ID")
|
473
|
-
session_id = request.headers.get("X-Session-ID")
|
474
|
-
message_source = request.headers.get("X-Message-Source")
|
475
|
-
|
476
|
-
package_version = sunholo_version()
|
477
|
-
tags = [package_version]
|
478
|
-
if message_source:
|
479
|
-
tags.append(message_source)
|
480
|
-
|
481
|
-
return langfuse.trace(
|
482
|
-
name = f"/vac/{vector_name}",
|
483
|
-
user_id = user_id,
|
484
|
-
session_id = session_id,
|
485
|
-
tags = tags,
|
486
|
-
release = f"sunholo-v{package_version}"
|
487
|
-
)
|
488
|
-
|
489
|
-
def prep_vac(request, vector_name):
|
490
|
-
"""
|
491
|
-
Prepare the input data for a VAC request.
|
492
|
-
|
493
|
-
This function processes the incoming request data, extracts relevant
|
494
|
-
information, and prepares the data for VAC processing.
|
495
|
-
|
496
|
-
Args:
|
497
|
-
request (Request): The Flask request object.
|
498
|
-
vector_name (str): The name of the vector for the request.
|
499
|
-
|
500
|
-
Returns:
|
501
|
-
dict: A dictionary containing prepared input data and metadata.
|
502
|
-
|
503
|
-
Example:
|
504
|
-
prep_data = prep_vac(request, "example_vector")
|
505
|
-
"""
|
506
|
-
#trace = create_langfuse_trace(request, vector_name)
|
507
|
-
trace = None
|
508
|
-
span = None
|
509
|
-
|
510
|
-
if request.content_type.startswith('application/json'):
|
511
|
-
data = request.get_json()
|
512
|
-
elif request.content_type.startswith('multipart/form-data'):
|
513
|
-
data = request.form.to_dict()
|
514
|
-
if 'file' in request.files:
|
515
|
-
file = request.files['file']
|
516
|
-
if file.filename != '':
|
517
|
-
log.info(f"Found file: {file.filename} to upload to GCS")
|
518
|
-
try:
|
519
|
-
image_uri, mime_type = handle_file_upload(file, vector_name)
|
520
|
-
data["image_uri"] = image_uri
|
521
|
-
data["image_url"] = image_uri
|
522
|
-
data["mime"] = mime_type
|
523
|
-
except Exception as e:
|
524
|
-
log.error(f"Error uploading file: {str(e)}")
|
525
|
-
else:
|
526
|
-
log.info("No file selected to upload to GCS")
|
527
|
-
else:
|
528
|
-
log.warning(f"Error uploading file: Unsupported content type {request.content_type}")
|
529
|
-
|
530
|
-
log.info(f"vac/{vector_name} got data: {data}")
|
531
|
-
|
532
|
-
config, _ = load_config("config/llm_config.yaml")
|
533
|
-
vac_configs = config.get("vac")
|
534
|
-
if vac_configs:
|
535
|
-
vac_config = vac_configs.get(vector_name)
|
536
|
-
if not vac_config:
|
537
|
-
log.warning("Not a local configured VAC, may be a remote config not synced yet")
|
538
|
-
|
539
|
-
if trace and vac_config:
|
540
|
-
trace.update(input=data, metadata=vac_config)
|
541
|
-
|
542
|
-
user_input = data.pop('user_input').strip()
|
543
|
-
stream_wait_time = data.pop('stream_wait_time', 7)
|
544
|
-
stream_timeout = data.pop('stream_timeout', 120)
|
545
|
-
chat_history = data.pop('chat_history', None)
|
546
|
-
vector_name = data.pop('vector_name', vector_name)
|
547
|
-
|
548
|
-
log.info("Turning chat_history into paired tuples")
|
549
|
-
paired_messages = extract_chat_history(chat_history)
|
550
|
-
|
551
|
-
all_input = {'user_input': user_input,
|
552
|
-
'vector_name': vector_name,
|
553
|
-
'chat_history': paired_messages,
|
554
|
-
'stream_wait_time': stream_wait_time,
|
555
|
-
'stream_timeout': stream_timeout,
|
556
|
-
'kwargs': data}
|
557
|
-
|
558
|
-
if trace:
|
559
|
-
span = trace.span(
|
560
|
-
name="VAC",
|
561
|
-
metadata=vac_config,
|
562
|
-
input = all_input
|
563
|
-
)
|
564
|
-
command_response = handle_special_commands(user_input, vector_name, paired_messages)
|
565
|
-
if command_response is not None:
|
566
|
-
if trace:
|
567
|
-
trace.update(output=jsonify(command_response))
|
568
|
-
|
569
|
-
return {
|
570
|
-
"trace": trace,
|
571
|
-
"span": span,
|
572
|
-
"command_response": command_response,
|
573
|
-
"all_input": all_input,
|
574
|
-
"vac_config": vac_config
|
575
|
-
}
|
576
|
-
|
577
|
-
|
578
|
-
def handle_file_upload(file, vector_name):
|
579
|
-
"""
|
580
|
-
Handle file upload and store the file in Google Cloud Storage.
|
581
|
-
|
582
|
-
This function saves the uploaded file locally, uploads it to Google Cloud Storage,
|
583
|
-
and then removes the local copy.
|
584
|
-
|
585
|
-
Args:
|
586
|
-
file (FileStorage): The uploaded file.
|
587
|
-
vector_name (str): The name of the vector for the request.
|
588
|
-
|
589
|
-
Returns:
|
590
|
-
tuple: A tuple containing the URI of the uploaded file and its MIME type.
|
591
|
-
|
592
|
-
Raises:
|
593
|
-
Exception: If the file upload fails.
|
594
|
-
|
595
|
-
Example:
|
596
|
-
uri, mime_type = handle_file_upload(file, "example_vector")
|
597
|
-
"""
|
598
|
-
try:
|
599
|
-
file.save(file.filename)
|
600
|
-
image_uri = add_file_to_gcs(file.filename, vector_name)
|
601
|
-
os.remove(file.filename) # Clean up the saved file
|
602
|
-
return image_uri, file.mimetype
|
603
|
-
except Exception as e:
|
604
|
-
raise Exception(f'File upload failed: {str(e)}')
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|