sunholo 0.67.10__py3-none-any.whl → 0.68.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -103,7 +103,7 @@ def create_message_element(message: dict):
103
103
  if 'text' in message: # This is a Slack or Google Chat message
104
104
  log.info(f"Found text element - {message['text']}")
105
105
  return message['text']
106
- elif 'content' in message: # Discord message
106
+ elif 'content' in message: # Discord or OpenAI history message
107
107
  log.info(f"Found content element - {message['content']}")
108
108
  return message['content']
109
109
  else:
@@ -130,6 +130,8 @@ def is_human(message: dict):
130
130
  return message["name"] == "Human"
131
131
  elif 'sender' in message: # Google Chat
132
132
  return message['sender']['type'] == 'HUMAN'
133
+ elif 'role' in message:
134
+ return message['role'] == 'user'
133
135
  else:
134
136
  # Slack: Check for the 'user' field and absence of 'bot_id' field
135
137
  return 'user' in message and 'bot_id' not in message
@@ -174,5 +176,7 @@ def is_ai(message: dict):
174
176
  return message["name"] == "AI"
175
177
  elif 'sender' in message: # Google Chat
176
178
  return message['sender']['type'] == 'BOT'
179
+ elif 'role' in message:
180
+ return message['role'] == 'assistant'
177
181
  else:
178
182
  return 'bot_id' in message # Slack
@@ -258,7 +258,12 @@ def register_qna_routes(app, stream_interpreter, vac_interpreter):
258
258
  # the header forwarded
259
259
  auth_header = request.headers.get('X-Forwarded-Authorization')
260
260
  if auth_header:
261
- api_key = auth_header.split(' ')[1] # Assuming "Bearer <api_key>"
261
+
262
+ if auth_header.startswith('Bearer '):
263
+ api_key = auth_header.split(' ')[1] # Assuming "Bearer <api_key>"
264
+ else:
265
+ return jsonify({'error': 'Invalid authorization header does not start with "Bearer " - got: {auth_header}'}), 401
266
+
262
267
  endpoints_host = os.getenv('_ENDPOINTS_HOST')
263
268
  if not endpoints_host:
264
269
  return jsonify({'error': '_ENDPOINTS_HOST environment variable not found'}), 401
@@ -425,8 +430,9 @@ def register_qna_routes(app, stream_interpreter, vac_interpreter):
425
430
  return make_openai_response(user_message, vector_name, 'ERROR: could not find an answer')
426
431
 
427
432
  except Exception as err:
428
- log.error(f"OpenAI response error: {err}")
429
- return jsonify({'error': f'QNA_ERROR: An error occurred: {str(err)} traceback: {traceback.format_exc()}'}), 500
433
+ log.error(f"OpenAI response error: {str(err)} traceback: {traceback.format_exc()}")
434
+
435
+ return make_openai_response(user_message, vector_name, f'ERROR: {str(err)}')
430
436
 
431
437
 
432
438
  def create_langfuse_trace(request, vector_name):
@@ -0,0 +1,501 @@
1
+ import json
2
+ import traceback
3
+ import datetime
4
+ import uuid
5
+
6
+ from ...agents import extract_chat_history, handle_special_commands
7
+ from ...qna.parsers import parse_output
8
+ from ...streaming import start_streaming_chat
9
+ from ...archive import archive_qa
10
+ from ...logging import log
11
+ from ...utils.config import load_config
12
+ from ...utils.version import sunholo_version
13
+ import os
14
+ from ...gcs.add_file import add_file_to_gcs, handle_base64_image
15
+ from ..swagger import validate_api_key
16
+ from datetime import datetime, timedelta
17
+
18
+ try:
19
+ from flask import request, jsonify, Response
20
+ except ImportError:
21
+ pass
22
+
23
+ try:
24
+ from langfuse.decorators import langfuse_context, observe
25
+ except ImportError:
26
+ pass
27
+
28
+ # Cache dictionary to store validated API keys
29
+ api_key_cache = {}
30
+ cache_duration = timedelta(minutes=5) # Cache duration
31
+
32
+ class VACRoutes:
33
+ """
34
+ **Usage Example:**
35
+
36
+ ```python
37
+ from agents.flask import VACRoutes
38
+
39
+ app = Flask(__name__)
40
+
41
+ def stream_interpreter(question, vector_name, chat_history, **kwargs):
42
+ # Implement your streaming logic
43
+ ...
44
+
45
+ def vac_interpreter(question, vector_name, chat_history, **kwargs):
46
+ # Implement your static VAC logic
47
+ ...
48
+
49
+ vac_routes = VACRoutes(app, stream_interpreter, vac_interpreter)
50
+
51
+ if __name__ == "__main__":
52
+ app.run(debug=True)
53
+ ```
54
+
55
+ """
56
+ def __init__(self, app, stream_interpreter, vac_interpreter):
57
+ self.app = app
58
+ self.stream_interpreter = stream_interpreter
59
+ self.vac_interpreter = vac_interpreter
60
+ self.register_routes()
61
+
62
+ def register_routes(self):
63
+ """
64
+ Registers all the VAC routes for the Flask application.
65
+ """
66
+ # Basic routes
67
+ self.app.route("/", methods=['GET'])(self.home)
68
+ self.app.route("/health", methods=['GET'])(self.health)
69
+
70
+ # Streaming VAC
71
+ self.app.route('/vac/streaming/<vector_name>', methods=['POST'])(self.handle_stream_vac)
72
+
73
+ # Static VAC
74
+ self.app.route('/vac/<vector_name>', methods=['POST'])(self.handle_process_vac)
75
+
76
+ # Authentication middleware
77
+ self.app.before_request(self.check_authentication)
78
+
79
+ # OpenAI health endpoint
80
+ self.app.route('/openai/health', methods=['GET', 'POST'])(self.openai_health_endpoint)
81
+
82
+ # OpenAI compatible endpoint
83
+ self.app.route('/openai/v1/chat/completions', methods=['POST'])(self.handle_openai_compatible_endpoint)
84
+ self.app.route('/openai/v1/chat/completions/<vector_name>', methods=['POST'])(self.handle_openai_compatible_endpoint)
85
+
86
+ def home(self):
87
+ return jsonify("OK")
88
+
89
+ def health(self):
90
+ return jsonify({"status": "healthy"})
91
+
92
+ def make_openai_response(self, user_message, vector_name, answer):
93
+ response_id = str(uuid.uuid4())
94
+ log.info("openai response: Q: {user_message} to VECTOR_NAME: {vector_name} - A: {answer}")
95
+ openai_response = {
96
+ "id": response_id,
97
+ "object": "chat.completion",
98
+ "created": str(int(datetime.now().timestamp())),
99
+ "model": vector_name,
100
+ "system_fingerprint": sunholo_version(),
101
+ "choices": [{
102
+ "index": 0,
103
+ "message": {
104
+ "role": "assistant",
105
+ "content": answer,
106
+ },
107
+ "logprobs": None,
108
+ "finish_reason": "stop"
109
+ }],
110
+ "usage": {
111
+ "prompt_tokens": len(user_message.split()),
112
+ "completion_tokens": len(answer.split()),
113
+ "total_tokens": len(user_message.split()) + len(answer.split())
114
+ }
115
+ }
116
+
117
+ log.info(f"OpenAI response: {openai_response}")
118
+ return jsonify(openai_response)
119
+
120
+
121
+ def handle_stream_vac(self, vector_name):
122
+ observed_stream_interpreter = observe()(self.stream_interpreter)
123
+ prep = self.prep_vac(request, vector_name)
124
+ log.debug(f"Processing prep: {prep}")
125
+ trace = prep["trace"]
126
+ span = prep["span"]
127
+ command_response = prep["command_response"]
128
+ vac_config = prep["vac_config"]
129
+ all_input = prep["all_input"]
130
+
131
+ if command_response:
132
+ return jsonify(command_response)
133
+
134
+ log.info(f'Streaming data with: {all_input}')
135
+ if span:
136
+ generation = span.generation(
137
+ name="start_streaming_chat",
138
+ metadata=vac_config,
139
+ input = all_input,
140
+ completion_start_time=datetime.datetime.now(),
141
+ model=vac_config.get("model") or vac_config.get("llm")
142
+ )
143
+
144
+ def generate_response_content():
145
+
146
+ for chunk in start_streaming_chat(question=all_input["user_input"],
147
+ vector_name=vector_name,
148
+ qna_func=observed_stream_interpreter,
149
+ chat_history=all_input["chat_history"],
150
+ wait_time=all_input["stream_wait_time"],
151
+ timeout=all_input["stream_timeout"],
152
+ #kwargs
153
+ **all_input["kwargs"]
154
+ ):
155
+ if isinstance(chunk, dict) and 'answer' in chunk:
156
+ # When we encounter the dictionary, we yield it as a JSON string
157
+ # and stop the generator.
158
+ if trace:
159
+ chunk["trace"] = trace.id
160
+ chunk["trace_url"] = trace.get_trace_url()
161
+ archive_qa(chunk, vector_name)
162
+ if trace:
163
+ generation.end(output=json.dumps(chunk))
164
+ span.end(output=json.dumps(chunk))
165
+ trace.update(output=json.dumps(chunk))
166
+
167
+ return json.dumps(chunk)
168
+
169
+ else:
170
+ # Otherwise, we yield the plain text chunks as they come in.
171
+ yield chunk
172
+
173
+ # Here, the generator function will handle streaming the content to the client.
174
+ response = Response(generate_response_content(), content_type='text/plain; charset=utf-8')
175
+ response.headers['Transfer-Encoding'] = 'chunked'
176
+
177
+ log.debug(f"streaming response: {response}")
178
+ if trace:
179
+ generation.end(output=response)
180
+ span.end(output=response)
181
+ trace.update(output=response)
182
+
183
+ return response
184
+
185
+ def handle_process_vac(self, vector_name):
186
+ observed_vac_interpreter = observe()(self.vac_interpreter)
187
+ prep = self.prep_vac(request, vector_name)
188
+ log.debug(f"Processing prep: {prep}")
189
+ trace = prep["trace"]
190
+ span = prep["span"]
191
+ command_response = prep["command_response"]
192
+ vac_config = prep["vac_config"]
193
+ all_input = prep["all_input"]
194
+
195
+ if command_response:
196
+ return jsonify(command_response)
197
+
198
+ try:
199
+ if span:
200
+ generation = span.generation(
201
+ name="vac_interpreter",
202
+ metadata=vac_config,
203
+ input = all_input,
204
+ model=vac_config.get("model") or vac_config.get("llm")
205
+ )
206
+ bot_output = observed_vac_interpreter(
207
+ question=all_input["user_input"],
208
+ vector_name=vector_name,
209
+ chat_history=all_input["chat_history"],
210
+ **all_input["kwargs"]
211
+ )
212
+ if span:
213
+ generation.end(output=bot_output)
214
+ # {"answer": "The answer", "source_documents": [{"page_content": "The page content", "metadata": "The metadata"}]}
215
+ bot_output = parse_output(bot_output)
216
+ if trace:
217
+ bot_output["trace"] = trace.id
218
+ bot_output["trace_url"] = trace.get_trace_url()
219
+ archive_qa(bot_output, vector_name)
220
+ log.info(f'==LLM Q:{all_input["user_input"]} - A:{bot_output}')
221
+
222
+
223
+ except Exception as err:
224
+ bot_output = {'answer': f'QNA_ERROR: An error occurred while processing /vac/{vector_name}: {str(err)} traceback: {traceback.format_exc()}'}
225
+
226
+ if trace:
227
+ span.end(output=jsonify(bot_output))
228
+ trace.update(output=jsonify(bot_output))
229
+
230
+ # {'answer': 'output'}
231
+ return jsonify(bot_output)
232
+
233
+ def check_authentication(self):
234
+ if request.path.startswith('/openai/'):
235
+ log.debug(f'Request headers: {request.headers}')
236
+ # the header forwarded
237
+ auth_header = request.headers.get('X-Forwarded-Authorization')
238
+ if auth_header:
239
+
240
+ if auth_header.startswith('Bearer '):
241
+ api_key = auth_header.split(' ')[1] # Assuming "Bearer <api_key>"
242
+ else:
243
+ return jsonify({'error': 'Invalid authorization header does not start with "Bearer " - got: {auth_header}'}), 401
244
+
245
+ endpoints_host = os.getenv('_ENDPOINTS_HOST')
246
+ if not endpoints_host:
247
+ return jsonify({'error': '_ENDPOINTS_HOST environment variable not found'}), 401
248
+
249
+ # Check cache first
250
+ current_time = datetime.now()
251
+ if api_key in api_key_cache:
252
+ cached_result, cache_time = api_key_cache[api_key]
253
+ if current_time - cache_time < cache_duration:
254
+ if not cached_result:
255
+ return jsonify({'error': 'Invalid cached API key'}), 401
256
+ else:
257
+ return # Valid API key, continue to the endpoint
258
+ else:
259
+ # Cache expired, remove from cache
260
+ del api_key_cache[api_key]
261
+
262
+ # Validate API key
263
+ is_valid = validate_api_key(api_key, endpoints_host)
264
+ # Update cache
265
+ api_key_cache[api_key] = (is_valid, current_time)
266
+
267
+ if not is_valid:
268
+ return jsonify({'error': 'Invalid API key'}), 401
269
+ else:
270
+ return jsonify({'error': 'Missing Authorization header'}), 401
271
+
272
+ def openai_health_endpoint():
273
+ return jsonify({'message': 'Success'})
274
+
275
+ def handle_openai_compatible_endpoint(self, vector_name=None):
276
+ data = request.get_json()
277
+ log.info(f'openai_compatible_endpoint got data: {data} for vector: {vector_name}')
278
+
279
+ vector_name = vector_name or data.pop('model', None)
280
+ messages = data.pop('messages', None)
281
+ chat_history = data.pop('chat_history', None)
282
+ stream = data.pop('stream', False)
283
+
284
+ if not messages:
285
+ return jsonify({"error": "No messages provided"}), 400
286
+
287
+ user_message = None
288
+ image_uri = None
289
+ mime_type = None
290
+
291
+ for msg in reversed(messages):
292
+ if msg['role'] == 'user':
293
+ if isinstance(msg['content'], list):
294
+ for content_item in msg['content']:
295
+ if content_item['type'] == 'text':
296
+ user_message = content_item['text']
297
+ elif content_item['type'] == 'image_url':
298
+ base64_data = content_item['image_url']['url']
299
+ image_uri, mime_type = handle_base64_image(base64_data, vector_name)
300
+ else:
301
+ user_message = msg['content']
302
+ break
303
+
304
+ if not user_message:
305
+ return jsonify({"error": "No user message provided"}), 400
306
+ else:
307
+ log.info(f"User message: {user_message}")
308
+
309
+ paired_messages = extract_chat_history(chat_history)
310
+ command_response = handle_special_commands(user_message, vector_name, paired_messages)
311
+
312
+ if command_response is not None:
313
+
314
+ return self.make_openai_response(user_message, vector_name, command_response)
315
+
316
+ if image_uri:
317
+ data["image_uri"] = image_uri
318
+ data["mime"] = mime_type
319
+
320
+ all_input = {
321
+ "user_input": user_message,
322
+ "chat_history": chat_history,
323
+ "kwargs": data
324
+ }
325
+
326
+ observed_stream_interpreter = observe()(self.stream_interpreter)
327
+
328
+ response_id = str(uuid.uuid4())
329
+
330
+ def generate_response_content():
331
+ for chunk in start_streaming_chat(question=user_message,
332
+ vector_name=vector_name,
333
+ qna_func=observed_stream_interpreter,
334
+ chat_history=all_input["chat_history"],
335
+ wait_time=all_input.get("stream_wait_time", 1),
336
+ timeout=all_input.get("stream_timeout", 60),
337
+ **all_input["kwargs"]
338
+ ):
339
+ if isinstance(chunk, dict) and 'answer' in chunk:
340
+ openai_chunk = {
341
+ "id": response_id,
342
+ "object": "chat.completion.chunk",
343
+ "created": str(int(datetime.now().timestamp())),
344
+ "model": vector_name,
345
+ "system_fingerprint": sunholo_version(),
346
+ "choices": [{
347
+ "index": 0,
348
+ "delta": {"content": chunk['answer']},
349
+ "logprobs": None,
350
+ "finish_reason": None
351
+ }]
352
+ }
353
+ yield json.dumps(openai_chunk) + "\n"
354
+ else:
355
+ log.info(f"Unknown chunk: {chunk}")
356
+
357
+ final_chunk = {
358
+ "id": response_id,
359
+ "object": "chat.completion.chunk",
360
+ "created": str(int(datetime.now().timestamp())),
361
+ "model": vector_name,
362
+ "system_fingerprint": sunholo_version(),
363
+ "choices": [{
364
+ "index": 0,
365
+ "delta": {},
366
+ "logprobs": None,
367
+ "finish_reason": "stop"
368
+ }]
369
+ }
370
+ yield json.dumps(final_chunk) + "\n"
371
+
372
+ if stream:
373
+ log.info("Streaming openai chunks")
374
+ return Response(generate_response_content(), content_type='text/plain; charset=utf-8')
375
+
376
+ try:
377
+ observed_vac_interpreter = observe()(self.vac_interpreter)
378
+ bot_output = observed_vac_interpreter(
379
+ question=user_message,
380
+ vector_name=vector_name,
381
+ chat_history=all_input["chat_history"],
382
+ **all_input["kwargs"]
383
+ )
384
+ bot_output = parse_output(bot_output)
385
+
386
+ log.info(f"Bot output: {bot_output}")
387
+ if bot_output:
388
+ return self.make_openai_response(user_message, vector_name, bot_output.get('answer', ''))
389
+ else:
390
+ return self.make_openai_response(user_message, vector_name, 'ERROR: could not find an answer')
391
+
392
+ except Exception as err:
393
+ log.error(f"OpenAI response error: {str(err)} traceback: {traceback.format_exc()}")
394
+
395
+ return self.make_openai_response(user_message, vector_name, f'ERROR: {str(err)}')
396
+
397
+
398
+ def create_langfuse_trace(self, request, vector_name):
399
+ try:
400
+ from langfuse import Langfuse
401
+ langfuse = Langfuse()
402
+ except ImportError as err:
403
+ print(f"No langfuse installed for agents.flask.register_qna_routes, install via `pip install sunholo[http]` - {str(err)}")
404
+
405
+ return None
406
+
407
+ user_id = request.headers.get("X-User-ID")
408
+ session_id = request.headers.get("X-Session-ID")
409
+ message_source = request.headers.get("X-Message-Source")
410
+
411
+ package_version = sunholo_version()
412
+ tags = [package_version]
413
+ if message_source:
414
+ tags.append(message_source)
415
+
416
+ return langfuse.trace(
417
+ name = f"/vac/{vector_name}",
418
+ user_id = user_id,
419
+ session_id = session_id,
420
+ tags = tags,
421
+ release = f"sunholo-v{package_version}"
422
+ )
423
+
424
+ def prep_vac(self, request, vector_name):
425
+ trace = self.create_langfuse_trace(request, vector_name)
426
+ span = None
427
+
428
+ if request.content_type.startswith('application/json'):
429
+ data = request.get_json()
430
+ elif request.content_type.startswith('multipart/form-data'):
431
+ data = request.form.to_dict()
432
+ if 'file' in request.files:
433
+ file = request.files['file']
434
+ if file.filename != '':
435
+ log.info(f"Found file: {file.filename} to upload to GCS")
436
+ try:
437
+ image_uri, mime_type = self.handle_file_upload(file, vector_name)
438
+ data["image_uri"] = image_uri
439
+ data["mime"] = mime_type
440
+ except Exception as e:
441
+ return jsonify({'error': str(e), 'traceback': traceback.format_exc()}), 500
442
+ else:
443
+ return jsonify({"error": "No file selected"}), 400
444
+ else:
445
+ return jsonify({"error": "Unsupported content type"}), 400
446
+
447
+ log.info(f"vac/{vector_name} got data: {data}")
448
+
449
+ config, _ = load_config("config/llm_config.yaml")
450
+ vac_configs = config.get("vac")
451
+ if vac_configs:
452
+ vac_config = vac_configs[vector_name]
453
+
454
+ if trace:
455
+ trace.update(input=data, metadata=vac_config)
456
+
457
+ user_input = data.pop('user_input').strip()
458
+ stream_wait_time = data.pop('stream_wait_time', 7)
459
+ stream_timeout = data.pop('stream_timeout', 120)
460
+ chat_history = data.pop('chat_history', None)
461
+ vector_name = data.pop('vector_name', vector_name)
462
+
463
+ paired_messages = extract_chat_history(chat_history)
464
+
465
+ all_input = {'user_input': user_input,
466
+ 'vector_name': vector_name,
467
+ 'chat_history': paired_messages,
468
+ 'stream_wait_time': stream_wait_time,
469
+ 'stream_timeout': stream_timeout,
470
+ 'kwargs': data}
471
+
472
+ if trace:
473
+ span = trace.span(
474
+ name="VAC",
475
+ metadata=vac_config,
476
+ input = all_input
477
+ )
478
+ command_response = handle_special_commands(user_input, vector_name, paired_messages)
479
+ if command_response is not None:
480
+ if trace:
481
+ trace.update(output=jsonify(command_response))
482
+
483
+ return {
484
+ "trace": trace,
485
+ "span": span,
486
+ "command_response": command_response,
487
+ "all_input": all_input,
488
+ "vac_config": vac_config
489
+ }
490
+
491
+
492
+ def handle_file_upload(self, file, vector_name):
493
+ try:
494
+ file.save(file.filename)
495
+ image_uri = add_file_to_gcs(file.filename, vector_name)
496
+ os.remove(file.filename) # Clean up the saved file
497
+ return image_uri, file.mimetype
498
+ except Exception as e:
499
+ raise Exception(f'File upload failed: {str(e)}')
500
+
501
+
@@ -54,10 +54,15 @@ def pick_retriever(vector_name, embeddings=None):
54
54
 
55
55
  embeddings = embeddings or get_embeddings(vector_name)
56
56
  read_only = value.get('read_only')
57
- vectorstore = pick_vectorstore(vectorstore,
58
- vector_name=vector_name,
59
- embeddings=embeddings,
60
- read_only=read_only)
57
+ try:
58
+ vectorstore = pick_vectorstore(vectorstore,
59
+ vector_name=vector_name,
60
+ embeddings=embeddings,
61
+ read_only=read_only)
62
+ except Exception as e:
63
+ log.error(f"Failed to pick_vectorstore {vectorstore} for {vector_name} - {str(e)} - skipping")
64
+ continue
65
+
61
66
  k_override = value.get('k', 3)
62
67
  vs_retriever = vectorstore.as_retriever(search_kwargs=dict(k=k_override))
63
68
  retriever_list.append(vs_retriever)
@@ -1,10 +1,7 @@
1
1
  import os
2
2
  try:
3
- import pg8000
4
- import sqlalchemy
5
3
  from sqlalchemy.exc import DatabaseError, ProgrammingError
6
4
  from asyncpg.exceptions import DuplicateTableError
7
- from google.cloud.alloydb.connector import Connector
8
5
  from langchain_google_alloydb_pg import AlloyDBEngine, Column, AlloyDBLoader, AlloyDBDocumentSaver
9
6
  from google.cloud.alloydb.connector import IPTypes
10
7
  except ImportError:
@@ -12,9 +9,12 @@ except ImportError:
12
9
  pass
13
10
 
14
11
  from .database import get_vector_size
12
+ from .alloydb_client import AlloyDBClient
13
+
15
14
  from ..logging import log
16
15
  from ..utils.config import load_config_key
17
16
 
17
+
18
18
  def create_alloydb_engine(vector_name):
19
19
 
20
20
  if not AlloyDBEngine:
@@ -49,183 +49,6 @@ def create_alloydb_engine(vector_name):
49
49
 
50
50
  return engine
51
51
 
52
- class AlloyDBClient:
53
- """
54
- A class to manage interactions with an AlloyDB instance.
55
-
56
- Example Usage:
57
-
58
- ```python
59
- client = AlloyDBClient(
60
- project_id="your-project-id",
61
- region="your-region",
62
- cluster_name="your-cluster-name",
63
- instance_name="your-instance-name",
64
- user="your-db-user",
65
- password="your-db-password"
66
- )
67
-
68
- # Create a database
69
- client.execute_sql("CREATE DATABASE my_database")
70
-
71
- # Execute other SQL statements
72
- client.execute_sql("CREATE TABLE my_table (id INT, name VARCHAR(50))")
73
- ```
74
- """
75
-
76
- def __init__(self,
77
- project_id: str,
78
- region: str,
79
- cluster_name:str,
80
- instance_name:str,
81
- user:str,
82
- password=None,
83
- db="postgres"):
84
- """Initializes the AlloyDB client.
85
- - project_id (str): GCP project ID where the AlloyDB instance resides.
86
- - region (str): The region where the AlloyDB instance is located.
87
- - cluster_name (str): The name of the AlloyDB cluster.
88
- - instance_name (str): The name of the AlloyDB instance.
89
- - user (str): The database user name.
90
- - password (str): The database user's password.
91
- - db_name (str): The name of the database.
92
- """
93
- self.connector = Connector()
94
- self.inst_uri = self._build_instance_uri(project_id, region, cluster_name, instance_name)
95
- self.engine = self._create_engine(self.inst_uri, user, password, db)
96
-
97
- def _build_instance_uri(self, project_id, region, cluster_name, instance_name):
98
- return f"projects/{project_id}/locations/{region}/clusters/{cluster_name}/instances/{instance_name}"
99
-
100
- def _create_engine(self, inst_uri, user, password, db):
101
- def getconn() -> pg8000.dbapi.Connection:
102
- conn = self.connector.connect(
103
- inst_uri,
104
- "pg8000",
105
- user=user,
106
- password=password,
107
- db=db,
108
- enable_iam_auth=True,
109
- )
110
- return conn
111
-
112
- engine = sqlalchemy.create_engine(
113
- "postgresql+pg8000://",
114
- isolation_level="AUTOCOMMIT",
115
- creator=getconn
116
- )
117
- engine.dialect.description_encoding = None
118
- log.info(f"Created AlloyDB engine for {inst_uri} and user: {user}")
119
- return engine
120
-
121
- def execute_sql(self, sql_statement):
122
- """Executes a given SQL statement with error handling.
123
-
124
- - sql_statement (str): The SQL statement to execute.
125
- - Returns: The result of the execution, if any.
126
- """
127
- sql_ = sqlalchemy.text(sql_statement)
128
- result = None
129
- with self.engine.connect() as conn:
130
- try:
131
- log.info(f"Executing SQL statement: {sql_}")
132
- result = conn.execute(sql_)
133
- except DatabaseError as e:
134
- if "already exists" in str(e):
135
- log.warning(f"Error ignored: {str(e)}. Assuming object already exists.")
136
- else:
137
- raise
138
- finally:
139
- conn.close()
140
-
141
- return result
142
-
143
- @staticmethod
144
- def _and_or_ilike(sources, search_type="OR", operator="ILIKE"):
145
- unique_sources = set(sources)
146
- # Choose the delimiter based on the search_type argument
147
- delimiter = ' AND ' if search_type.upper() == "AND" else ' OR '
148
-
149
- # Build the conditional expressions based on the chosen delimiter
150
- conditions = delimiter.join(f"TRIM(source) {operator} '%{source}%'" for source in unique_sources)
151
- if not conditions:
152
- log.warning("Alloydb doc query found no like_patterns")
153
- return []
154
-
155
- return conditions
156
-
157
- def delete_sources_from_alloydb(self, sources, vector_name):
158
- """
159
- Deletes from both vectorstore and docstore
160
- """
161
-
162
- vector_length = get_vector_size(vector_name)
163
-
164
- conditions = self._and_or_ilike(sources, operator="=")
165
-
166
- if not conditions:
167
- log.warning("No conditions were specified, not deleting whole table!")
168
- return False
169
-
170
- query = f"""
171
- DELETE FROM {vector_name}_docstore
172
- WHERE {conditions};
173
- DELETE FROM {vector_name}_vectorstore_{vector_length}
174
- WHERE {conditions}
175
- """
176
-
177
- return self.execute_sql(query)
178
-
179
- def create_database(self, database_name):
180
- self.execute_sql(f'CREATE DATABASE "{database_name}"')
181
-
182
- def fetch_owners(self):
183
- owners = self.execute_sql('SELECT table_schema, table_name, privilege_type FROM information_schema.table_privileges')
184
- for row in owners:
185
- print(f"Schema: {row[0]}, Table: {row[1]}, Privilege: {row[2]}")
186
- return owners
187
-
188
- def create_schema(self, schema_name="public"):
189
- self.execute_sql(f'CREATE SCHEMA IF NOT EXISTS {schema_name};')
190
-
191
- def grant_permissions(self, schema_name, users):
192
- for user in users:
193
- self.execute_sql(f'GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA {schema_name} TO "{user}";')
194
- self.execute_sql(f'GRANT USAGE, CREATE ON SCHEMA {schema_name} TO "{user}";')
195
- self.execute_sql(f'ALTER DEFAULT PRIVILEGES IN SCHEMA {schema_name} GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO "{user}";')
196
- self.execute_sql(f'GRANT USAGE ON SCHEMA information_schema TO "{user}";')
197
- self.execute_sql(f'GRANT SELECT ON information_schema.columns TO "{user}";')
198
-
199
- def create_docstore_tables(self, vector_names, users):
200
- for vector_name in vector_names:
201
- table_name = f"{vector_name}_docstore"
202
- sql = f'''
203
- CREATE TABLE IF NOT EXISTS "{table_name}"
204
- (page_content TEXT, doc_id UUID, source TEXT, images_gsurls JSONB, chunk_metadata JSONB, langchain_metadata JSONB)
205
- '''
206
- self.execute_sql(sql)
207
-
208
- for user in users:
209
- self.execute_sql(f'GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE "{table_name}" TO "{user}";')
210
-
211
- vectorstore_id = f"{vector_name}_vectorstore_1536"
212
- sql = f'''
213
- CREATE TABLE IF NOT EXISTS "{vectorstore_id}" (
214
- langchain_id UUID NOT NULL,
215
- content TEXT NOT NULL,
216
- embedding vector NOT NULL,
217
- source TEXT,
218
- langchain_metadata JSONB,
219
- docstore_doc_id UUID,
220
- eventTime TIMESTAMPTZ
221
- );
222
- '''
223
- self.execute_sql(sql)
224
-
225
- for user in users:
226
- self.execute_sql(f'GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE {vectorstore_id} TO "{user}";')
227
-
228
-
229
52
  alloydb_table_cache = {} # Our cache, initially empty # noqa: F841
230
53
  def create_alloydb_table(vector_name, engine, type = "vectorstore", alloydb_config=None, username=None):
231
54
  global alloydb_table_cache
@@ -240,9 +63,6 @@ def create_alloydb_table(vector_name, engine, type = "vectorstore", alloydb_conf
240
63
 
241
64
  return table_name
242
65
 
243
- alloydb_table_cache[table_name] = True
244
- return table_name
245
-
246
66
  log.info(f"# Creating AlloyDB table {table_name}")
247
67
  try:
248
68
  engine.init_vectorstore_table(
@@ -0,0 +1,196 @@
1
+ try:
2
+ import pg8000
3
+ import sqlalchemy
4
+ from sqlalchemy.exc import DatabaseError, ProgrammingError
5
+ from google.cloud.alloydb.connector import Connector
6
+ except ImportError:
7
+ AlloyDBEngine = None
8
+ pass
9
+
10
+ from .database import get_vector_size
11
+ from ..logging import log
12
+
13
+ class AlloyDBClient:
14
+ """
15
+ A class to manage interactions with an AlloyDB instance.
16
+
17
+ Example Usage:
18
+
19
+ ```python
20
+ client = AlloyDBClient(
21
+ project_id="your-project-id",
22
+ region="your-region",
23
+ cluster_name="your-cluster-name",
24
+ instance_name="your-instance-name",
25
+ user="your-db-user",
26
+ password="your-db-password"
27
+ )
28
+
29
+ # Create a database
30
+ client.execute_sql("CREATE DATABASE my_database")
31
+
32
+ # Execute other SQL statements
33
+ client.execute_sql("CREATE TABLE my_table (id INT, name VARCHAR(50))")
34
+ ```
35
+ """
36
+
37
+ def __init__(self,
38
+ project_id: str,
39
+ region: str,
40
+ cluster_name:str,
41
+ instance_name:str,
42
+ user:str,
43
+ password=None,
44
+ db="postgres"):
45
+ """Initializes the AlloyDB client.
46
+ - project_id (str): GCP project ID where the AlloyDB instance resides.
47
+ - region (str): The region where the AlloyDB instance is located.
48
+ - cluster_name (str): The name of the AlloyDB cluster.
49
+ - instance_name (str): The name of the AlloyDB instance.
50
+ - user (str): The database user name.
51
+ - password (str): The database user's password.
52
+ - db_name (str): The name of the database.
53
+ """
54
+ self.connector = Connector()
55
+ self.inst_uri = self._build_instance_uri(project_id, region, cluster_name, instance_name)
56
+ self.engine = self._create_engine(self.inst_uri, user, password, db)
57
+
58
+ def _build_instance_uri(self, project_id, region, cluster_name, instance_name):
59
+ return f"projects/{project_id}/locations/{region}/clusters/{cluster_name}/instances/{instance_name}"
60
+
61
+ def _create_engine(self, inst_uri, user, password, db):
62
+ def getconn() -> pg8000.dbapi.Connection:
63
+ conn = self.connector.connect(
64
+ inst_uri,
65
+ "pg8000",
66
+ user=user,
67
+ password=password,
68
+ db=db,
69
+ enable_iam_auth=True,
70
+ )
71
+ return conn
72
+
73
+ engine = sqlalchemy.create_engine(
74
+ "postgresql+pg8000://",
75
+ isolation_level="AUTOCOMMIT",
76
+ creator=getconn
77
+ )
78
+ engine.dialect.description_encoding = None
79
+ log.info(f"Created AlloyDB engine for {inst_uri} and user: {user}")
80
+ return engine
81
+
82
+ def execute_sql(self, sql_statement):
83
+ """Executes a given SQL statement with error handling.
84
+
85
+ - sql_statement (str): The SQL statement to execute.
86
+ - Returns: The result of the execution, if any.
87
+ """
88
+ sql_ = sqlalchemy.text(sql_statement)
89
+ result = None
90
+ with self.engine.connect() as conn:
91
+ try:
92
+ log.info(f"Executing SQL statement: {sql_}")
93
+ result = conn.execute(sql_)
94
+ except DatabaseError as e:
95
+ if "already exists" in str(e):
96
+ log.warning(f"Error ignored: {str(e)}. Assuming object already exists.")
97
+ else:
98
+ raise
99
+ finally:
100
+ conn.close()
101
+
102
+ return result
103
+
104
+ @staticmethod
105
+ def _and_or_ilike(sources, search_type="OR", operator="ILIKE"):
106
+ unique_sources = set(sources)
107
+ # Choose the delimiter based on the search_type argument
108
+ delimiter = ' AND ' if search_type.upper() == "AND" else ' OR '
109
+
110
+ # Build the conditional expressions based on the chosen delimiter
111
+ conditions = delimiter.join(f"TRIM(source) {operator} '%{source}%'" for source in unique_sources)
112
+ if not conditions:
113
+ log.warning("Alloydb doc query found no like_patterns")
114
+ return []
115
+
116
+ return conditions
117
+
118
+ def delete_sources_from_alloydb(self, sources, vector_name):
119
+ """
120
+ Deletes from both vectorstore and docstore
121
+ """
122
+
123
+ vector_length = get_vector_size(vector_name)
124
+
125
+ conditions = self._and_or_ilike(sources, operator="=")
126
+
127
+ if not conditions:
128
+ log.warning("No conditions were specified, not deleting whole table!")
129
+ return False
130
+
131
+ query = f"""
132
+ DELETE FROM {vector_name}_docstore
133
+ WHERE {conditions};
134
+ DELETE FROM {vector_name}_vectorstore_{vector_length}
135
+ WHERE {conditions}
136
+ """
137
+
138
+ return self.execute_sql(query)
139
+
140
+ def create_database(self, database_name):
141
+ self.execute_sql(f'CREATE DATABASE "{database_name}"')
142
+
143
+ def fetch_owners(self):
144
+ owners = self.execute_sql('SELECT table_schema, table_name, privilege_type FROM information_schema.table_privileges')
145
+ for row in owners:
146
+ print(f"Schema: {row[0]}, Table: {row[1]}, Privilege: {row[2]}")
147
+ return owners
148
+
149
+ def create_schema(self, schema_name="public"):
150
+ self.execute_sql(f'CREATE SCHEMA IF NOT EXISTS {schema_name};')
151
+
152
+ def grant_schema_permissions(self, schema_name, users):
153
+ for user in users:
154
+ self.execute_sql(f'GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA {schema_name} TO "{user}";')
155
+ self.execute_sql(f'GRANT USAGE, CREATE ON SCHEMA {schema_name} TO "{user}";')
156
+ self.execute_sql(f'ALTER DEFAULT PRIVILEGES IN SCHEMA {schema_name} GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO "{user}";')
157
+ self.execute_sql(f'GRANT USAGE ON SCHEMA information_schema TO "{user}";')
158
+ self.execute_sql(f'GRANT SELECT ON information_schema.columns TO "{user}";')
159
+
160
+ def grant_table_permissions(self, table_name, users):
161
+ for user in users:
162
+ self.execute_sql(f'GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE "{table_name}" TO "{user}";')
163
+
164
+ def create_tables(self, vector_name, users):
165
+ self.create_docstore_table(vector_name, users)
166
+ self.create_vectorstore_table(vector_name, users)
167
+
168
+ def create_docstore_table(self, vector_name: str, users):
169
+ table_name = f"{vector_name}_vectorstore"
170
+ sql = f'''
171
+ CREATE TABLE IF NOT EXISTS "{table_name}"
172
+ (page_content TEXT, doc_id UUID, source TEXT, images_gsurls JSONB, chunk_metadata JSONB, langchain_metadata JSONB)
173
+ '''
174
+ self.execute_sql(sql)
175
+
176
+ self.grant_table_permissions(table_name, users)
177
+
178
+ def create_vectorstore_table(self, vector_name: str, users):
179
+ from .database import get_vector_size
180
+ vector_size = get_vector_size(vector_name)
181
+ vectorstore_id = f"{vector_name}_{type}_{vector_size}"
182
+
183
+ sql = f'''
184
+ CREATE TABLE IF NOT EXISTS "{vectorstore_id}" (
185
+ langchain_id UUID NOT NULL,
186
+ content TEXT NOT NULL,
187
+ embedding vector NOT NULL,
188
+ source TEXT,
189
+ langchain_metadata JSONB,
190
+ docstore_doc_id UUID,
191
+ eventTime TIMESTAMPTZ
192
+ );
193
+ '''
194
+ self.execute_sql(sql)
195
+
196
+ self.grant_table_permissions(vectorstore_id, users)
sunholo/gcs/add_file.py CHANGED
@@ -26,6 +26,10 @@ from ..utils.config import load_config_key
26
26
 
27
27
 
28
28
  def handle_base64_image(base64_data, vector_name):
29
+ model = load_config_key("llm", vector_name, "vacConfig")
30
+ if model.startswith("openai"): # pass it to gpt directly
31
+ return base64_data, base64_data.split(",",1)
32
+
29
33
  try:
30
34
  header, encoded = base64_data.split(",", 1)
31
35
  data = base64.b64decode(encoded)
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sunholo
3
- Version: 0.67.10
3
+ Version: 0.68.1
4
4
  Summary: Large Language Model DevOps - a package to help deploy LLMs to the Cloud.
5
5
  Home-page: https://github.com/sunholo-data/sunholo-py
6
- Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.67.10.tar.gz
6
+ Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.68.1.tar.gz
7
7
  Author: Holosun ApS
8
8
  Author-email: multivac@sunholo.com
9
9
  License: Apache License, Version 2.0
@@ -1,7 +1,7 @@
1
1
  sunholo/__init__.py,sha256=0CdpufyRKWyZe7J7UKigL6j_qOorM-p0OjHIAuf9M38,864
2
2
  sunholo/logging.py,sha256=00VGGArfWHbJuHHSJ4kXhHTggWnRfbVYMcZNOYIsqnA,11787
3
3
  sunholo/agents/__init__.py,sha256=Hb4NXy2rN-83Z0-UDRwX-LXv2R29lcbSFPf8G6q4fZg,380
4
- sunholo/agents/chat_history.py,sha256=bkII7PNEbGCaobu2Rnr2rM9dim3BCK0kM-tiWhoI1tw,5219
4
+ sunholo/agents/chat_history.py,sha256=8iX1bgvRW6fdp6r_DQR_caPHYrZ_9QJJgPxCiSDf3q8,5380
5
5
  sunholo/agents/dispatch_to_qa.py,sha256=nFNdxhkr7rVYuUwVoBCBNYBI2Dke6-_z_ZApBEWb_cU,8291
6
6
  sunholo/agents/langserve.py,sha256=FdhQjorAY2bMn2rpuabNT6bU3uqSKWrl8DjpH3L_V7k,4375
7
7
  sunholo/agents/pubsub.py,sha256=5hbbhbBGyVWRpt2sAGC5FEheYH1mCCwVUhZEB1S7vGg,1337
@@ -13,7 +13,8 @@ sunholo/agents/fastapi/base.py,sha256=clk76cHbUAvU0OYJrRfCWX_5f0ACbhDsIzYBhI3wyo
13
13
  sunholo/agents/fastapi/qna_routes.py,sha256=DgK4Btu5XriOC1JaRQ4G_nWEjJfnQ0J5pyLanF6eF1g,3857
14
14
  sunholo/agents/flask/__init__.py,sha256=uqfHNw2Ru3EJ4dJEcbp86h_lkquBQPMxZbjhV_xe3rs,72
15
15
  sunholo/agents/flask/base.py,sha256=FgSaCODyoTtlstJtsqlLPScdgRUtv9_plxftdzHdVFo,809
16
- sunholo/agents/flask/qna_routes.py,sha256=tEsJRnnGoz_cOEaPtPpCCD920jZpLypLK58UzIxe1Ac,21659
16
+ sunholo/agents/flask/qna_routes.py,sha256=oDZzI0FllRD5GZI_C8EbKvvBSrgRlvmpwQc7lp54Krs,21926
17
+ sunholo/agents/flask/vac_routes.py,sha256=l2-w7x437F0Uu3QvwNueEYPtnKuIee6bHJ7LUMt_tkY,19520
17
18
  sunholo/archive/__init__.py,sha256=qNHWm5rGPVOlxZBZCpA1wTYPbalizRT7f8X4rs2t290,31
18
19
  sunholo/archive/archive.py,sha256=C-UhG5x-XtZ8VheQp92IYJqgD0V3NFQjniqlit94t18,1197
19
20
  sunholo/auth/__init__.py,sha256=4owDjSaWYkbTlPK47UHTOC0gCWbZsqn4ZIEw5NWZTlg,28
@@ -44,10 +45,11 @@ sunholo/cli/sun_rich.py,sha256=UpMqeJ0C8i0pkue1AHnnyyX0bFJ9zZeJ7HBR6yhuA8A,54
44
45
  sunholo/cli/swagger.py,sha256=absYKAU-7Yd2eiVNUY-g_WLl2zJfeRUNdWQ0oH8M_HM,1564
45
46
  sunholo/components/__init__.py,sha256=IDoylb74zFKo6NIS3RQqUl0PDFBGVxM1dfUmO7OJ44U,176
46
47
  sunholo/components/llm.py,sha256=T4we3tGmqUj4tPwxQr9M6AXv_BALqZV_dRSvINan-oU,10374
47
- sunholo/components/retriever.py,sha256=SKb19WxYSdOmy3hAnIai_UnSj-B-Q5S2s7g5xsTAq4g,6149
48
+ sunholo/components/retriever.py,sha256=jltG91N5r2P9RWKPW8A8tU3ilghciBczxapauW83Ir8,6377
48
49
  sunholo/components/vectorstore.py,sha256=BxtMF_wX8Zrexr67P07OTSJPjucTewmcPM5OQwIXHPM,5630
49
50
  sunholo/database/__init__.py,sha256=Zz0Shcq-CtStf9rJGIYB_Ybzb8rY_Q9mfSj-nviM490,241
50
- sunholo/database/alloydb.py,sha256=UeWbk_DAqivquMGibX_tz8v1Jza9qnf4SWThNBG2Dh4,17327
51
+ sunholo/database/alloydb.py,sha256=d9W0pbZB0jTVIGF5OVaQ6kXHo-X3-6e9NpWNmV5e9UY,10464
52
+ sunholo/database/alloydb_client.py,sha256=AYA0SSaBy-1XEfeZI97sMGehfrwnfbwZ8sE0exzI2E0,7254
51
53
  sunholo/database/database.py,sha256=UDHkceiEvJmS3esQX2LYEjEMrHcogN_JHuJXoVWCH3M,7354
52
54
  sunholo/database/lancedb.py,sha256=2rAbJVusMrm5TPtVTsUtmwn0z1iZ_wvbKhc6eyT6ClE,708
53
55
  sunholo/database/static_dbs.py,sha256=aOyU3AJ-Dzz3qSNjbuN2293cfYw5PhkcQuQxdwPMJ4w,435
@@ -61,7 +63,7 @@ sunholo/database/sql/sb/setup.sql,sha256=CvoFvZQev2uWjmFa3aj3m3iuPFzAAJZ0S7Qi3L3
61
63
  sunholo/embedder/__init__.py,sha256=sI4N_CqgEVcrMDxXgxKp1FsfsB4FpjoXgPGkl4N_u4I,44
62
64
  sunholo/embedder/embed_chunk.py,sha256=P744zUQJgqrjILunzaqtTerB9AwoXFU6tXBtz4rjWgQ,6673
63
65
  sunholo/gcs/__init__.py,sha256=DtVw_AZwQn-IguR5BJuIi2XJeF_FQXizhJikzRNrXiE,50
64
- sunholo/gcs/add_file.py,sha256=ILU3Nq-rYjL0Ini9op6jBYtXCDl3iTO61ZA1q2zykJQ,5537
66
+ sunholo/gcs/add_file.py,sha256=y2s7ZZBiJD3pu1TqRAy0s1SRGvHvzJqSDaYD-MFFh1c,5717
65
67
  sunholo/gcs/download_url.py,sha256=8XSEf8byfubqs5CMQeF_tn9wxqwUTq3n9mo5mLNIUTA,4801
66
68
  sunholo/gcs/metadata.py,sha256=C9sMPsHsq1ETetdQCqB3EBs3Kws8b8QHS9L7ei_v5aw,891
67
69
  sunholo/langfuse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -105,9 +107,9 @@ sunholo/vertex/__init__.py,sha256=JvHcGFuv6R_nAhY2AdoqqhMpJ5ugeWPZ_svGhWrObBk,13
105
107
  sunholo/vertex/init.py,sha256=JDMUaBRdednzbKF-5p33qqLit2LMsvgvWW-NRz0AqO0,1801
106
108
  sunholo/vertex/memory_tools.py,sha256=8F1iTWnqEK9mX4W5RzCVKIjydIcNp6OFxjn_dtQ3GXo,5379
107
109
  sunholo/vertex/safety.py,sha256=3meAX0HyGZYrH7rXPUAHxtI_3w_zoy_RX7Shtkoa660,1275
108
- sunholo-0.67.10.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
109
- sunholo-0.67.10.dist-info/METADATA,sha256=N4lheJkc7di64bOevGMEgiA2K5-1DdIxT1ZNx-ie38o,6157
110
- sunholo-0.67.10.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
111
- sunholo-0.67.10.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
112
- sunholo-0.67.10.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
113
- sunholo-0.67.10.dist-info/RECORD,,
110
+ sunholo-0.68.1.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
111
+ sunholo-0.68.1.dist-info/METADATA,sha256=q-b64zlvui5obJWngf1tAHgH56I3d9Hr_LgkOSNtbcE,6155
112
+ sunholo-0.68.1.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
113
+ sunholo-0.68.1.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
114
+ sunholo-0.68.1.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
115
+ sunholo-0.68.1.dist-info/RECORD,,