sunholo 0.116.2__py3-none-any.whl → 0.118.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sunholo/cli/cli_init.py CHANGED
@@ -200,7 +200,7 @@ def write_vac_config(project_dir: str, service_name: str):
200
200
 
201
201
  # Write the YAML configuration to the file
202
202
  with open(vac_config_path, 'w') as file:
203
- yaml.dump(vac_config_content, file, default_flow_style=False)
203
+ yaml.dump(vac_config_content, file)
204
204
 
205
205
  print(f"{vac_config_path} written successfully with service name '{service_name}'.")
206
206
 
sunholo/genai/__init__.py CHANGED
@@ -1,4 +1,5 @@
1
1
  from .process_funcs_cls import GenAIFunctionProcessor
2
2
  from .safety import genai_safety
3
3
  from .init import init_genai
4
- from .file_handling import download_gcs_upload_genai, construct_file_content
4
+ from .file_handling import download_gcs_upload_genai, construct_file_content
5
+ from .genaiv2 import GoogleAI, GoogleAIConfig
@@ -0,0 +1,542 @@
1
+ from typing import Optional, List, Union, Dict, Any, TypedDict, TYPE_CHECKING, Generator
2
+
3
+ import enum
4
+ import json
5
+ from pydantic import BaseModel
6
+ import time
7
+ try:
8
+ from google import genai
9
+ from google.genai import types
10
+ except ImportError:
11
+ genai = None
12
+
13
+ try:
14
+ import sounddevice as sd
15
+ except ImportError:
16
+ sd = None
17
+ except OSError:
18
+ sd = None
19
+
20
+ try:
21
+ import numpy as np
22
+ except ImportError:
23
+ np = None
24
+
25
+ try:
26
+ import cv2 as cv2
27
+ except ImportError:
28
+ cv2 = None
29
+
30
+ if TYPE_CHECKING:
31
+ from google import genai
32
+ from google.genai import types
33
+ from google.genai.types import Tool, GenerateContentConfig, EmbedContentConfig
34
+ else:
35
+ genai = None
36
+
37
+ class GoogleAIConfig(BaseModel):
38
+ """Configuration class for GoogleAI client initialization.
39
+ See https://ai.google.dev/gemini-api/docs/models/gemini-v2
40
+ """
41
+ api_key: Optional[str] = None
42
+ project_id: Optional[str] = None
43
+ location: str = "us-central1"
44
+ use_vertex: bool = False
45
+
46
+ class GoogleAI:
47
+ """A wrapper class for Google's v2 Generative AI APIs.
48
+ See https://ai.google.dev/gemini-api/docs/models/gemini-v2
49
+ """
50
+
51
+ def __init__(self, config: GoogleAIConfig):
52
+ """Initialize the GoogleAI client.
53
+
54
+ Args:
55
+ config (GoogleAIConfig): Configuration for client initialization
56
+ """
57
+ if genai is None:
58
+ raise ImportError("GoogleAI requires google-genai to be installed, try sunholo[gcp]")
59
+ if config.use_vertex:
60
+ if not config.project_id:
61
+ raise ValueError("project_id is required for Vertex AI")
62
+ self.client = genai.Client(
63
+ vertexai=True,
64
+ project=config.project_id,
65
+ location=config.location
66
+ )
67
+ else:
68
+ if not config.api_key:
69
+ raise ValueError("api_key is required for Google AI API")
70
+ self.client = genai.Client(api_key=config.api_key)
71
+
72
+ self.default_model = "gemini-2.0-flash-exp"
73
+
74
+ def google_search_tool(self) -> "types.Tool":
75
+ from google.genai.types import Tool, GoogleSearch
76
+ return Tool(
77
+ google_search = GoogleSearch()
78
+ )
79
+ def generate_text(
80
+ self,
81
+ prompt: str,
82
+ model: Optional[str] = None,
83
+ temperature: float = 0.7,
84
+ max_output_tokens: int = 1024,
85
+ top_p: float = 0.95,
86
+ top_k: int = 20,
87
+ stop_sequences: Optional[List[str]] = None,
88
+ system_prompt: Optional[str] = None,
89
+ tools: Optional[List["types.Tool"]] = None
90
+ ) -> str:
91
+ """Generate text using the specified model.
92
+
93
+ Args:
94
+ prompt (str): The input prompt
95
+ model (Optional[str]): Model name to use
96
+ temperature (float): Controls randomness (0.0-1.0)
97
+ max_output_tokens (int): Maximum number of tokens to generate
98
+ top_p (float): Nucleus sampling parameter
99
+ top_k (int): Top-k sampling parameter
100
+ stop_sequences (Optional[List[str]]): Sequences that stop generation
101
+ system_prompt (Optional[str]): System-level instruction
102
+ tools: list of python functions or Tool objects
103
+
104
+ Returns:
105
+ str: Generated text response
106
+ """
107
+ model = model or self.default_model
108
+
109
+ config = types.GenerateContentConfig(
110
+ temperature=temperature,
111
+ max_output_tokens=max_output_tokens,
112
+ top_p=top_p,
113
+ top_k=top_k,
114
+ stop_sequences=stop_sequences or [],
115
+ tools=tools or []
116
+ )
117
+
118
+ if system_prompt:
119
+ config.system_instruction = system_prompt
120
+
121
+ response = self.client.models.generate_content(
122
+ model=model,
123
+ contents=prompt,
124
+ config=config
125
+ )
126
+
127
+ return response.text
128
+
129
+ async def generate_text_async(
130
+ self,
131
+ prompt: str,
132
+ model: Optional[str] = None,
133
+ **kwargs
134
+ ) -> str:
135
+ """Async version of generate_text."""
136
+ model = model or self.default_model
137
+ response = await self.client.aio.models.generate_content(
138
+ model=model,
139
+ contents=prompt,
140
+ config=types.GenerateContentConfig(**kwargs)
141
+ )
142
+ return response.text
143
+
144
+ async def _record_audio(
145
+ self,
146
+ duration: float = 5.0,
147
+ sample_rate: int = 16000
148
+ ) -> bytes:
149
+ """Internal method to record audio."""
150
+ if sd is None or np is None:
151
+ raise ImportError("sounddevice and numpy are required for audio. Install with pip install sunholo[tts]")
152
+
153
+ print(f"Recording for {duration} seconds...")
154
+ audio_data = sd.rec(
155
+ int(duration * sample_rate),
156
+ samplerate=sample_rate,
157
+ channels=1,
158
+ dtype=np.int16
159
+ )
160
+ sd.wait()
161
+ print("Recording complete")
162
+ return audio_data.tobytes()
163
+
164
+ async def _record_video(
165
+ self,
166
+ duration: float = 5.0
167
+ ) -> List[bytes]:
168
+ """Internal method to record video frames."""
169
+ import cv2
170
+
171
+ frames = []
172
+ screen = cv2.VideoCapture(0)
173
+ start_time = time.time()
174
+
175
+ try:
176
+ while time.time() - start_time < duration:
177
+ ret, frame = screen.read()
178
+ if ret:
179
+ _, buffer = cv2.imencode('.jpg', frame)
180
+ frames.append(buffer.tobytes())
181
+ time.sleep(0.1) # Limit frame rate
182
+ finally:
183
+ screen.release()
184
+
185
+ return frames
186
+
187
+ async def _process_responses(self, session) -> List[str]:
188
+ """Internal method to process session responses."""
189
+ responses = []
190
+ i = 1
191
+ async for response in session.receive():
192
+ model_turn = response.server_content.model_turn
193
+ if model_turn is None:
194
+ continue
195
+ for part in model_turn.parts:
196
+ text = part.text
197
+ print(f"[{i}] {text}")
198
+ i += 1
199
+ responses.append(text)
200
+ return responses
201
+
202
+ async def live_async(
203
+ self,
204
+ prompt: Optional[Union[str, List[Union[str, bytes]]]] = None,
205
+ input_type: str = "text", # "text", "audio", or "video"
206
+ duration: Optional[float] = None, # For audio/video recording duration
207
+ model: Optional[str] = None,
208
+ **kwargs
209
+ ) -> str:
210
+ """Live Multimodal API with support for text, audio, and video inputs.
211
+
212
+ Args:
213
+ input_type: Type of input ("text", "audio", or "video")
214
+ prompt: Text prompt or list of text/binary chunks
215
+ duration: Recording duration for audio/video in seconds
216
+ model: Optional model name
217
+ **kwargs: Additional configuration parameters
218
+
219
+ Returns:
220
+ str: Generated response text
221
+ """
222
+ client = genai.Client(
223
+ http_options={
224
+ 'api_version': 'v1alpha',
225
+ 'url': 'generativelanguage.googleapis.com',
226
+ }
227
+ )
228
+
229
+ config = {
230
+ "generation_config": {"response_modalities": ["TEXT"]}
231
+ }
232
+
233
+ async with client.aio.live.connect(model=self.default_model, config=config) as session:
234
+ # Handle different input types
235
+ if input_type == "text":
236
+ message = {
237
+ "client_content": {
238
+ "turns": [
239
+ {
240
+ "parts": [{"text": prompt}],
241
+ "role": "user"
242
+ }
243
+ ],
244
+ "turn_complete": True
245
+ }
246
+ }
247
+ await session.send(json.dumps(message), end_of_turn=True)
248
+
249
+ elif input_type == "audio":
250
+ audio_data = await self._record_audio(duration=duration or 5.0)
251
+ message = {"media_chunks": [audio_data]}
252
+ await session.send(message)
253
+ await session.send(json.dumps({"turn_complete": True}), end_of_turn=True)
254
+
255
+ elif input_type == "video":
256
+ frames = await self._record_video(duration=duration or 5.0)
257
+ for frame in frames:
258
+ message = {"media_chunks": [frame]}
259
+ await session.send(message)
260
+ await session.send(json.dumps({"turn_complete": True}), end_of_turn=True)
261
+
262
+ else:
263
+ raise ValueError(f"Unsupported input_type: {input_type}")
264
+
265
+ # Process responses
266
+ responses = await self._process_responses(session)
267
+ return "OK"
268
+
269
+ def gs_uri(self, uri, mime_type=None):
270
+
271
+ if mime_type is None:
272
+ from ..utils.mime import guess_mime_type
273
+ mime_type = guess_mime_type(uri)
274
+
275
+ return types.Part.from_uri(
276
+ file_uri=uri,
277
+ mime_type=mime_type,
278
+ )
279
+
280
+ def local_file(self, filename, mime_type=None):
281
+ if mime_type is None:
282
+ from ..utils.mime import guess_mime_type
283
+ mime_type = guess_mime_type(filename)
284
+
285
+ with open(filename, 'rb') as f:
286
+ image_bytes = f.read()
287
+
288
+ if image_bytes and mime_type:
289
+ return types.Part.from_bytes(
290
+ data=image_bytes,
291
+ mime_type=mime_type,
292
+ )
293
+ else:
294
+ raise ValueError(f"Could not read bytes or mime_type for {filename=} - {mime_type=}")
295
+
296
+ def stream_text(
297
+ self,
298
+ prompt: str,
299
+ model: Optional[str] = None,
300
+ **kwargs
301
+ ) -> "Generator[str, None, None]":
302
+ """Stream text generation responses.
303
+
304
+ Args:
305
+ prompt (str): The input prompt
306
+ model (Optional[str]): Model name to use
307
+ **kwargs: Additional configuration parameters
308
+
309
+ Yields:
310
+ str: Chunks of generated text
311
+ """
312
+ model = model or self.default_model
313
+ for chunk in self.client.models.generate_content_stream(
314
+ model=model,
315
+ contents=prompt,
316
+ config=types.GenerateContentConfig(**kwargs)
317
+ ):
318
+ yield chunk.text
319
+
320
+ def get_embedding(
321
+ self,
322
+ text: Union[str, List[str]],
323
+ model: str = "text-embedding-004",
324
+ output_dim: Optional[int] = None
325
+ ) -> Union[List[float], List[List[float]]]:
326
+ """Get text embeddings.
327
+
328
+ Args:
329
+ text (Union[str, List[str]]): Text to embed
330
+ model (str): Embedding model to use
331
+ output_dim (Optional[int]): Desired embedding dimension
332
+
333
+ Returns:
334
+ Union[List[float], List[List[float]]]: Embeddings
335
+ """
336
+ config = {}
337
+ if output_dim:
338
+ config["output_dimensionality"] = output_dim
339
+
340
+ response = self.client.models.embed_content(
341
+ model=model,
342
+ contents=text,
343
+ config=types.EmbedContentConfig(**config) if config else None
344
+ )
345
+
346
+ if isinstance(text, str):
347
+ return response.embeddings[0].values
348
+ return [emb.values for emb in response.embeddings]
349
+
350
+ def structured_output(
351
+ self,
352
+ prompt: str,
353
+ schema: Union[BaseModel, Dict, type, TypedDict],
354
+ model: Optional[str] = None,
355
+ is_list: bool = False
356
+ ) -> Dict:
357
+ """Generate structured output according to a schema.
358
+
359
+ Args:
360
+ prompt (str): Input prompt
361
+ schema (Union[BaseModel, Dict, type]): Schema definition (Pydantic model, TypedDict, or raw schema)
362
+ model (Optional[str]): Model to use
363
+ is_list (bool): Whether to wrap the schema in a list
364
+
365
+ Returns:
366
+ Dict: Structured response matching schema
367
+ """
368
+ model = model or self.default_model
369
+
370
+ # Handle TypedDict differently than Pydantic models
371
+ if isinstance(schema, type):
372
+ if hasattr(schema, '__origin__') and schema.__origin__ is list:
373
+ # Handle list[TypedDict] case
374
+ inner_type = schema.__args__[0]
375
+ config = types.GenerateContentConfig(
376
+ response_mime_type='application/json',
377
+ response_schema=schema
378
+ )
379
+ elif hasattr(schema, '__annotations__'): # TypedDict check
380
+ # Create API-compatible schema
381
+ schema_dict = {
382
+ 'type': 'OBJECT',
383
+ 'properties': {},
384
+ 'required': list(schema.__annotations__.keys()) # TypedDict fields are required by default
385
+ }
386
+
387
+ for field_name, field_type in schema.__annotations__.items():
388
+ if hasattr(field_type, '__base__') and field_type.__base__ == enum.Enum:
389
+ schema_dict['properties'][field_name] = {
390
+ 'type': 'STRING',
391
+ 'enum': [e.value for e in field_type]
392
+ }
393
+ elif field_type is str:
394
+ schema_dict['properties'][field_name] = {'type': 'STRING'}
395
+ elif field_type is int:
396
+ schema_dict['properties'][field_name] = {'type': 'INTEGER'}
397
+ elif field_type is float:
398
+ schema_dict['properties'][field_name] = {'type': 'NUMBER'}
399
+ elif field_type is bool:
400
+ schema_dict['properties'][field_name] = {'type': 'BOOLEAN'}
401
+ else:
402
+ schema_dict['properties'][field_name] = {'type': 'STRING'}
403
+
404
+ if is_list:
405
+ schema_dict = {
406
+ 'type': 'ARRAY',
407
+ 'items': schema_dict
408
+ }
409
+
410
+ config = types.GenerateContentConfig(
411
+ response_mime_type='application/json',
412
+ response_schema=schema_dict
413
+ )
414
+ elif hasattr(schema, '__origin__') and schema.__origin__ is list:
415
+ # Handle List[TypeDict] case
416
+ inner_type = schema.__args__[0]
417
+ if hasattr(inner_type, '__annotations__'): # Check if inner type is TypedDict
418
+ # Create API-compatible schema for the inner TypedDict
419
+ schema_dict = {
420
+ 'type': 'OBJECT',
421
+ 'properties': {},
422
+ 'required': list(inner_type.__annotations__.keys())
423
+ }
424
+
425
+ for field_name, field_type in inner_type.__annotations__.items():
426
+ if hasattr(field_type, '__base__') and field_type.__base__ == enum.Enum:
427
+ schema_dict['properties'][field_name] = {
428
+ 'type': 'STRING',
429
+ 'enum': [e.value for e in field_type]
430
+ }
431
+ elif field_type is str:
432
+ schema_dict['properties'][field_name] = {'type': 'STRING'}
433
+ elif field_type is int:
434
+ schema_dict['properties'][field_name] = {'type': 'INTEGER'}
435
+ elif field_type is float:
436
+ schema_dict['properties'][field_name] = {'type': 'NUMBER'}
437
+ elif field_type is bool:
438
+ schema_dict['properties'][field_name] = {'type': 'BOOLEAN'}
439
+ else:
440
+ schema_dict['properties'][field_name] = {'type': 'STRING'}
441
+
442
+ # Wrap in array type
443
+ array_schema = {
444
+ 'type': 'ARRAY',
445
+ 'items': schema_dict
446
+ }
447
+
448
+ config = types.GenerateContentConfig(
449
+ response_mime_type='application/json',
450
+ response_schema=array_schema
451
+ )
452
+ elif issubclass(schema, BaseModel):
453
+ # Convert Pydantic model to Google's schema format
454
+ schema_dict = {
455
+ 'type': 'OBJECT',
456
+ 'properties': {},
457
+ 'required': []
458
+ }
459
+
460
+ for field_name, field_info in schema.model_fields.items():
461
+ # Handle different field types including enums
462
+ if isinstance(field_info.annotation, type) and issubclass(field_info.annotation, enum.Enum):
463
+ field_type = {
464
+ 'type': 'STRING',
465
+ 'enum': [e.value for e in field_info.annotation]
466
+ }
467
+ elif field_info.annotation is str:
468
+ field_type = {'type': 'STRING'}
469
+ elif field_info.annotation is int:
470
+ field_type = {'type': 'INTEGER'}
471
+ elif field_info.annotation is float:
472
+ field_type = {'type': 'NUMBER'}
473
+ elif field_info.annotation is bool:
474
+ field_type = {'type': 'BOOLEAN'}
475
+ elif field_info.annotation is list or (
476
+ hasattr(field_info.annotation, '__origin__') and
477
+ field_info.annotation.__origin__ is list
478
+ ):
479
+ # Handle typed lists
480
+ if hasattr(field_info.annotation, '__args__'):
481
+ inner_type = field_info.annotation.__args__[0]
482
+ if inner_type is str:
483
+ item_type = 'STRING'
484
+ elif inner_type is int:
485
+ item_type = 'INTEGER'
486
+ elif inner_type is float:
487
+ item_type = 'NUMBER'
488
+ elif inner_type is bool:
489
+ item_type = 'BOOLEAN'
490
+ else:
491
+ item_type = 'STRING'
492
+ else:
493
+ item_type = 'STRING'
494
+ field_type = {'type': 'ARRAY', 'items': {'type': item_type}}
495
+ else:
496
+ field_type = {'type': 'STRING'}
497
+
498
+ schema_dict['properties'][field_name] = field_type
499
+ if field_info.is_required:
500
+ schema_dict['required'].append(field_name)
501
+
502
+ if is_list:
503
+ schema_dict = {
504
+ 'type': 'ARRAY',
505
+ 'items': schema_dict
506
+ }
507
+
508
+ config = types.GenerateContentConfig(
509
+ response_mime_type='application/json',
510
+ response_schema=schema_dict
511
+ )
512
+ else:
513
+ # Handle raw schema dict
514
+ config = types.GenerateContentConfig(
515
+ response_mime_type='application/json',
516
+ response_schema=schema
517
+ )
518
+
519
+ response = self.client.models.generate_content(
520
+ model=model,
521
+ contents=prompt,
522
+ config=config
523
+ )
524
+
525
+ return response.text
526
+ def count_tokens(self, text: str, model: Optional[str] = None) -> int:
527
+ """Count the number of tokens in the text.
528
+
529
+ Args:
530
+ text (str): Input text
531
+ model (Optional[str]): Model to use for tokenization
532
+
533
+ Returns:
534
+ int: Number of tokens
535
+ """
536
+ model = model or self.default_model
537
+ response = self.client.models.count_tokens(
538
+ model=model,
539
+ contents=text
540
+ )
541
+ return response.total_tokens
542
+
sunholo/langfuse/evals.py CHANGED
@@ -2,7 +2,7 @@ import os
2
2
  import json
3
3
 
4
4
  from ..pubsub import decode_pubsub_message
5
- from langfuse import Langfuse
5
+
6
6
  import traceback
7
7
 
8
8
 
@@ -55,6 +55,7 @@ def direct_langfuse_evals(data, eval_funcs: list=[eval_length]):
55
55
 
56
56
 
57
57
  def do_evals(trace_id, eval_funcs: list=[eval_length], **kwargs) -> dict:
58
+ from langfuse import Langfuse
58
59
  # Initialize Langfuse with environment variables
59
60
  langfuse = Langfuse(
60
61
  secret_key=os.environ["LANGFUSE_SECRET_KEY"],
sunholo/mcp/cli.py CHANGED
@@ -3,7 +3,6 @@ import asyncio
3
3
  from typing import Any, Sequence
4
4
  from functools import lru_cache
5
5
  import subprocess
6
- from ..utils.version import sunholo_version
7
6
 
8
7
  try:
9
8
  from mcp.server import Server
@@ -68,7 +67,6 @@ class SunholoMCPServer:
68
67
  async def read_resource(uri: AnyUrl) -> str:
69
68
  """Read Sunholo resources based on URI"""
70
69
  logger.info(f"{uri} available")
71
- console.print(f"{uri} available")
72
70
  if str(uri) == "sunholo://vacs/list":
73
71
  try:
74
72
  # Execute sunholo vac list command
@@ -43,7 +43,7 @@ def load_gitignore_patterns(gitignore_path):
43
43
  """
44
44
  with open(gitignore_path, 'r') as f:
45
45
  patterns = [line.strip() for line in f if line.strip() and not line.startswith('#')]
46
- patterns.extend(["*.git/*", "*.terraform/*"])
46
+ patterns.extend([".git/", ".terraform/"]) # More precise pattern matching
47
47
  return patterns
48
48
 
49
49
  def should_ignore(file_path, patterns):
@@ -62,11 +62,18 @@ def should_ignore(file_path, patterns):
62
62
  True
63
63
  """
64
64
  rel_path = os.path.relpath(file_path)
65
-
65
+
66
66
  for pattern in patterns:
67
- if fnmatch(rel_path, pattern) or fnmatch(os.path.basename(rel_path), pattern):
67
+ # Handle directory patterns ending with /
68
+ if pattern.endswith('/'):
69
+ if any(part == pattern[:-1] for part in rel_path.split(os.sep)):
70
+ print(f"Ignoring {rel_path}")
71
+ return True
72
+ # Handle file patterns
73
+ elif fnmatch(rel_path, pattern):
74
+ print(f"Ignoring {rel_path}")
68
75
  return True
69
-
76
+
70
77
  return False
71
78
 
72
79
 
@@ -1,9 +1,9 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: sunholo
3
- Version: 0.116.2
3
+ Version: 0.118.0
4
4
  Summary: Large Language Model DevOps - a package to help deploy LLMs to the Cloud.
5
5
  Home-page: https://github.com/sunholo-data/sunholo-py
6
- Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.116.2.tar.gz
6
+ Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.118.0.tar.gz
7
7
  Author: Holosun ApS
8
8
  Author-email: multivac@sunholo.com
9
9
  License: Apache License, Version 2.0
@@ -47,6 +47,7 @@ Requires-Dist: google-cloud-pubsub; extra == "all"
47
47
  Requires-Dist: google-cloud-discoveryengine; extra == "all"
48
48
  Requires-Dist: google-cloud-texttospeech; extra == "all"
49
49
  Requires-Dist: google-generativeai>=0.7.1; extra == "all"
50
+ Requires-Dist: google-genai; extra == "all"
50
51
  Requires-Dist: gunicorn; extra == "all"
51
52
  Requires-Dist: httpcore; extra == "all"
52
53
  Requires-Dist: httpx; extra == "all"
@@ -64,6 +65,7 @@ Requires-Dist: langchain-unstructured; extra == "all"
64
65
  Requires-Dist: langfuse; extra == "all"
65
66
  Requires-Dist: mcp; extra == "all"
66
67
  Requires-Dist: numpy; extra == "all"
68
+ Requires-Dist: opencv-python; extra == "all"
67
69
  Requires-Dist: pg8000; extra == "all"
68
70
  Requires-Dist: pgvector; extra == "all"
69
71
  Requires-Dist: pillow; extra == "all"
@@ -118,9 +120,9 @@ Requires-Dist: unstructured[all-docs,local-inference]; extra == "pipeline"
118
120
  Provides-Extra: gcp
119
121
  Requires-Dist: anthropic[vertex]; extra == "gcp"
120
122
  Requires-Dist: google-api-python-client; extra == "gcp"
121
- Requires-Dist: google-cloud-alloydb-connector[pg8000]; extra == "gcp"
122
123
  Requires-Dist: google-auth-httplib2; extra == "gcp"
123
124
  Requires-Dist: google-auth-oauthlib; extra == "gcp"
125
+ Requires-Dist: google-cloud-alloydb-connector[pg8000]; extra == "gcp"
124
126
  Requires-Dist: google-cloud-aiplatform>=1.58.0; extra == "gcp"
125
127
  Requires-Dist: google-cloud-bigquery; extra == "gcp"
126
128
  Requires-Dist: google-cloud-build; extra == "gcp"
@@ -130,6 +132,7 @@ Requires-Dist: google-cloud-logging; extra == "gcp"
130
132
  Requires-Dist: google-cloud-pubsub; extra == "gcp"
131
133
  Requires-Dist: google-cloud-discoveryengine; extra == "gcp"
132
134
  Requires-Dist: google-cloud-texttospeech; extra == "gcp"
135
+ Requires-Dist: google-genai; extra == "gcp"
133
136
  Requires-Dist: google-generativeai>=0.8.3; extra == "gcp"
134
137
  Requires-Dist: langchain-google-genai>=2.0.0; extra == "gcp"
135
138
  Requires-Dist: langchain_google_alloydb_pg>=0.2.2; extra == "gcp"
@@ -164,6 +167,20 @@ Provides-Extra: tts
164
167
  Requires-Dist: google-cloud-texttospeech; extra == "tts"
165
168
  Requires-Dist: numpy; extra == "tts"
166
169
  Requires-Dist: sounddevice; extra == "tts"
170
+ Provides-Extra: video
171
+ Requires-Dist: opencv-python; extra == "video"
172
+ Dynamic: author
173
+ Dynamic: author-email
174
+ Dynamic: classifier
175
+ Dynamic: description
176
+ Dynamic: description-content-type
177
+ Dynamic: download-url
178
+ Dynamic: home-page
179
+ Dynamic: keywords
180
+ Dynamic: license
181
+ Dynamic: provides-extra
182
+ Dynamic: requires-dist
183
+ Dynamic: summary
167
184
 
168
185
  [![PyPi Version](https://img.shields.io/pypi/v/sunholo.svg)](https://pypi.python.org/pypi/sunholo/)
169
186
 
@@ -45,7 +45,7 @@ sunholo/chunker/splitter.py,sha256=RfekLPkjhCcNd1PFXIj_FxusJMJ8_3cyWl7bsYvtQ0g,7
45
45
  sunholo/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
46
  sunholo/cli/chat_vac.py,sha256=sYPzUDwwwebJvIobv3GRW_xbQQ4BTy9G-WHdarGCHB0,23705
47
47
  sunholo/cli/cli.py,sha256=WiWyLywKwuKR46H7a-mBLO0c7jMW-PNl8th2Mj7ioMs,4606
48
- sunholo/cli/cli_init.py,sha256=u6BZFtUyFMOKrXZ46-DfET0IpH3Tl2PlOz386rADtrw,8549
48
+ sunholo/cli/cli_init.py,sha256=Nz-VvM-35K2FEsijpHeGWjT3qid5NWI1mPqE-3r_FFk,8523
49
49
  sunholo/cli/configs.py,sha256=QUM9DvKOdZmEQRM5uI3Nh887T0YDiSMr7O240zTLqws,4546
50
50
  sunholo/cli/deploy.py,sha256=zxdwUsRTRMC8U5vyRv0JiKBLFn84Ug_Tc88-_h9hJSs,1609
51
51
  sunholo/cli/embedder.py,sha256=v-FKiSPHaQzB6ctClclYueIf3bf3CqYtC1oRgPfT4dY,5566
@@ -86,8 +86,9 @@ sunholo/gcs/download_folder.py,sha256=ijJTnS595JqZhBH8iHFErQilMbkuKgL-bnTCMLGuvl
86
86
  sunholo/gcs/download_url.py,sha256=Ul81n1rklr8WogPsuxWWD1Nr8RHU451LzHPMJNhAKzw,6416
87
87
  sunholo/gcs/extract_and_sign.py,sha256=paRrTCvCN5vkQwCB7OSkxWi-pfOgOtZ0bwdXE08c3Ps,1546
88
88
  sunholo/gcs/metadata.py,sha256=oQLcXi4brsZ74aegWyC1JZmhlaEV270HS5_UWtAYYWE,898
89
- sunholo/genai/__init__.py,sha256=6SWK7uV5F625J-P3xQoD6WKL59a9RSaidj-Guslyt8Q,192
89
+ sunholo/genai/__init__.py,sha256=TV3PYHWoR4cChdmCOaYB0PtAEQ86qol9XYYEtb1JmSA,239
90
90
  sunholo/genai/file_handling.py,sha256=Z3E7TR1DnP9WnneeEGC8LcT6k-9GFxwXDPaVZWw8HLE,8366
91
+ sunholo/genai/genaiv2.py,sha256=uqWcfvlsPVPyQo-W_cP9h2TTzyYfzj4lyJlyqPyKTkI,20269
91
92
  sunholo/genai/images.py,sha256=EyjsDqt6XQw99pZUQamomCpMOoIah9bp3XY94WPU7Ms,1678
92
93
  sunholo/genai/init.py,sha256=yG8E67TduFCTQPELo83OJuWfjwTnGZsyACospahyEaY,687
93
94
  sunholo/genai/process_funcs_cls.py,sha256=D6eNrc3vtTZzwdkacZNOSfit499N_o0C5AHspyUJiYE,33690
@@ -98,7 +99,7 @@ sunholo/invoke/direct_vac_func.py,sha256=dACx3Zh7uZnuWLIFYiyLoyXUhh5-eUpd2RatDUd
98
99
  sunholo/invoke/invoke_vac_utils.py,sha256=sJc1edHTHMzMGXjji1N67c3iUaP7BmAL5nj82Qof63M,2053
99
100
  sunholo/langfuse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
100
101
  sunholo/langfuse/callback.py,sha256=jl0SZsFS53uMW9DGeM9SOL_EsRZsba0wwFGLqKzu9_U,1684
101
- sunholo/langfuse/evals.py,sha256=fQBaC0dBTYfgCzyfv9QBRvUfc9f42lbwQAeZmynaHO8,3841
102
+ sunholo/langfuse/evals.py,sha256=P6bMK22ujySYcuH1qVdnUNAmV3p1KBshYUYpfdbMWF4,3846
102
103
  sunholo/langfuse/prompts.py,sha256=E3ZBd51k8NWkeO7K-uYkUPJhbqJWXee8X0N79pVCaIA,2744
103
104
  sunholo/llamaindex/__init__.py,sha256=DlY_cHWCsVEV1C5WBgDdHRgOMlJc8pDoCRukUJ8PT9w,88
104
105
  sunholo/llamaindex/get_files.py,sha256=6rhXCDqQ_lrIapISQ_OYQDjiSATXvS_9m3qq53-oIl0,781
@@ -108,7 +109,7 @@ sunholo/llamaindex/user_history.py,sha256=ZtkecWuF9ORduyGB8kF8gP66bm9DdvCI-ZiK6K
108
109
  sunholo/lookup/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
109
110
  sunholo/lookup/model_lookup.yaml,sha256=O7o-jP53MLA06C8pI-ILwERShO-xf6z_258wtpZBv6A,739
110
111
  sunholo/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
111
- sunholo/mcp/cli.py,sha256=D9IFMSffCsRO6vFayzukiVX9s3CvoFFyJUcQ0S6db1s,10660
112
+ sunholo/mcp/cli.py,sha256=d24nnVzhZYz4AWgTqmN-qjKG4rPbf8RhdmEOHZkBHy8,10570
112
113
  sunholo/pubsub/__init__.py,sha256=DfTEk4zmCfqn6gFxRrqDO0pOrvXTDqH-medpgYO4PGw,117
113
114
  sunholo/pubsub/process_pubsub.py,sha256=rN2N4WM6PZkMKDrdT8pnEfTvsXACRyJFqIHJQCbuxLs,3088
114
115
  sunholo/pubsub/pubsub_manager.py,sha256=19w_N0LiG-wgVWvgJ13b8BUeN8ZzgSPXAhPmL1HRRSI,6966
@@ -130,7 +131,7 @@ sunholo/tools/__init__.py,sha256=5NuYpwwTX81qGUWvgwfItoSLXteNnp7KjgD7IPZUFjI,53
130
131
  sunholo/tools/web_browser.py,sha256=8Gdf02F4zCOeSnijnfaL6jzk4oaSI0cj48o-esoWzwE,29086
131
132
  sunholo/utils/__init__.py,sha256=Hv02T5L2zYWvCso5hzzwm8FQogwBq0OgtUbN_7Quzqc,89
132
133
  sunholo/utils/api_key.py,sha256=Ct4bIAQZxzPEw14hP586LpVxBAVi_W9Serpy0BK-7KI,244
133
- sunholo/utils/big_context.py,sha256=gJIP7_ZL-YSLhOMq8jmFTMqH1wq8eB1NK7oKPeZAq2s,5578
134
+ sunholo/utils/big_context.py,sha256=iJRbJlpgEMR8BSpSiOw35wRtZ-mjChVlei8m0lf6tIY,5876
134
135
  sunholo/utils/config.py,sha256=bz0ODJyqnoHQIsk4pmNpVxxq5WvwS0SfOq4cnCjQPJk,9105
135
136
  sunholo/utils/config_class.py,sha256=Z4sGzEkuxlCAJ8b-65_yzLmybnunywwOD9eXL8an5Wg,9619
136
137
  sunholo/utils/config_schema.py,sha256=Wv-ncitzljOhgbDaq9qnFqH5LCuxNv59dTGDWgd1qdk,4189
@@ -149,9 +150,9 @@ sunholo/vertex/init.py,sha256=1OQwcPBKZYBTDPdyU7IM4X4OmiXLdsNV30C-fee2scQ,2875
149
150
  sunholo/vertex/memory_tools.py,sha256=tBZxqVZ4InTmdBvLlOYwoSEWu4-kGquc-gxDwZCC4FA,7667
150
151
  sunholo/vertex/safety.py,sha256=S9PgQT1O_BQAkcqauWncRJaydiP8Q_Jzmu9gxYfy1VA,2482
151
152
  sunholo/vertex/type_dict_to_json.py,sha256=uTzL4o9tJRao4u-gJOFcACgWGkBOtqACmb6ihvCErL8,4694
152
- sunholo-0.116.2.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
153
- sunholo-0.116.2.dist-info/METADATA,sha256=YUoz-Kw42oDa68pBZ6Qd1gygiihKtR36SqJ0OnXl-6I,9297
154
- sunholo-0.116.2.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
155
- sunholo-0.116.2.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
156
- sunholo-0.116.2.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
157
- sunholo-0.116.2.dist-info/RECORD,,
153
+ sunholo-0.118.0.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
154
+ sunholo-0.118.0.dist-info/METADATA,sha256=b8HJvu2GA9AJqTfFcqKBRCw8ArVt71zvQwDUAKuKK3A,9752
155
+ sunholo-0.118.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
156
+ sunholo-0.118.0.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
157
+ sunholo-0.118.0.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
158
+ sunholo-0.118.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.6.0)
2
+ Generator: setuptools (75.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5