sunholo 0.72.0__py3-none-any.whl → 0.73.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,151 @@
1
+ import json
2
+ import requests
3
+
4
+ from pathlib import Path
5
+
6
+ from ..logging import log
7
+ from ..agents import send_to_qa
8
+ from ..qna.parsers import parse_output
9
+ from ..streaming import generate_proxy_stream
10
+
11
+ def invoke_vac_qa(vac_input: dict, vac_name: str, chat_history=[], stream=False):
12
+ """
13
+ This lets VACs call other VAC Q&A endpoints within their code
14
+ """
15
+
16
+ if 'user_input' not in vac_input:
17
+ raise ValueError('vac_input must contain at least "user_input" key - got {vac_input}')
18
+
19
+ user_id = vac_input.get('user_id')
20
+ session_id = vac_input.get('session_id')
21
+ image_uri = vac_input.get('image_url') or vac_input.get('image_uri')
22
+
23
+ if not stream:
24
+ log.info(f'Batch invoke_vac_qa with {vac_input=}')
25
+ vac_response = send_to_qa(
26
+ vac_input["user_input"],
27
+ vector_name=vac_name,
28
+ chat_history=chat_history,
29
+ message_author=user_id,
30
+ #TODO: populate these
31
+ image_url=image_uri,
32
+ source_filters=None,
33
+ search_kwargs=None,
34
+ private_docs=None,
35
+ whole_document=False,
36
+ source_filters_and_or=False,
37
+ # system kwargs
38
+ configurable={
39
+ "vector_name": vac_name,
40
+ },
41
+ user_id=user_id,
42
+ session_id=session_id,
43
+ message_source="sunholo.invoke_vac_qa.invoke")
44
+
45
+ # ensures {'answer': answer}
46
+ answer = parse_output(vac_response)
47
+ chat_history.append({"name": "Human", "content": vac_input})
48
+ chat_history.append({"name": "AI", "content": answer})
49
+ answer["chat_history"] = chat_history
50
+
51
+ return answer
52
+
53
+ log.info(f"Streaming invoke_vac_qa with {vac_input=}")
54
+ def stream_response():
55
+ generate = generate_proxy_stream(
56
+ send_to_qa,
57
+ vac_input["user_input"],
58
+ vector_name=vac_name,
59
+ chat_history=chat_history,
60
+ generate_f_output=lambda x: x, # Replace with actual processing function
61
+ stream_wait_time=0.5,
62
+ stream_timeout=120,
63
+ message_author=user_id,
64
+ #TODO: populate these
65
+ image_url=image_uri,
66
+ source_filters=None,
67
+ search_kwargs=None,
68
+ private_docs=None,
69
+ whole_document=False,
70
+ source_filters_and_or=False,
71
+ # system kwargs
72
+ configurable={
73
+ "vector_name": vac_name,
74
+ },
75
+ user_id=user_id,
76
+ session_id=session_id,
77
+ message_source="sunholo.invoke_vac_qa.stream"
78
+ )
79
+ for part in generate():
80
+ yield part
81
+
82
+ answer = ""
83
+
84
+ for token in stream_response():
85
+ if isinstance(token, bytes):
86
+ token = token.decode('utf-8')
87
+ yield token
88
+ if isinstance(token, dict):
89
+ # ?
90
+ pass
91
+ elif isinstance(token, str):
92
+ answer += token
93
+
94
+ if answer:
95
+ chat_history.append({"name": "Human", "content": vac_input})
96
+ chat_history.append({"name": "AI", "content": answer})
97
+
98
+ return chat_history
99
+
100
+ def invoke_vac(service_url, data, vector_name=None, metadata=None, is_file=False):
101
+ """
102
+ This lets a VAC be invoked by directly calling its URL, used for file uploads
103
+ """
104
+ try:
105
+ if is_file:
106
+ log.info("Uploading file...")
107
+ # Handle file upload
108
+ if not isinstance(data, Path) or not data.is_file():
109
+ raise ValueError("For file uploads, 'data' must be a Path object pointing to a valid file.")
110
+
111
+ files = {
112
+ 'file': (data.name, open(data, 'rb')),
113
+ }
114
+ form_data = {
115
+ 'vector_name': vector_name,
116
+ 'metadata': json.dumps(metadata) if metadata else '',
117
+ }
118
+
119
+ response = requests.post(service_url, files=files, data=form_data)
120
+ else:
121
+ log.info("Uploading JSON...")
122
+ try:
123
+ if isinstance(data, dict):
124
+ json_data = data
125
+ else:
126
+ json_data = json.loads(data)
127
+ except json.JSONDecodeError as err:
128
+ log.error(f"[bold red]ERROR: invalid JSON: {str(err)} [/bold red]")
129
+ raise err
130
+ except Exception as err:
131
+ log.error(f"[bold red]ERROR: could not parse JSON: {str(err)} [/bold red]")
132
+ raise err
133
+
134
+ log.debug(f"Sending data: {data} or json_data: {json.dumps(json_data)}")
135
+ # Handle JSON data
136
+ headers = {"Content-Type": "application/json"}
137
+ response = requests.post(service_url, headers=headers, data=json.dumps(json_data))
138
+
139
+ response.raise_for_status()
140
+
141
+ the_data = response.json()
142
+ log.info(the_data)
143
+
144
+ return the_data
145
+
146
+ except requests.exceptions.RequestException as e:
147
+ log.error(f"[bold red]ERROR: Failed to invoke VAC: {e}[/bold red]")
148
+ raise e
149
+ except Exception as e:
150
+ log.error(f"[bold red]ERROR: An unexpected error occurred: {e}[/bold red]")
151
+ raise e
@@ -1,8 +1,8 @@
1
1
  from ..logging import log
2
- from ..utils import load_config_key
2
+ from ..utils import ConfigManager
3
3
 
4
4
  # Load the YAML file
5
- def load_prompt_from_yaml(key, prefix="sunholo", file_path=None):
5
+ def load_prompt_from_yaml(key, prefix="sunholo", load_from_file=False):
6
6
  """
7
7
  Returns a string you can use with Langfuse PromptTemplate.from_template()
8
8
 
@@ -17,6 +17,12 @@ def load_prompt_from_yaml(key, prefix="sunholo", file_path=None):
17
17
  from langchain_core.prompts import PromptTemplate
18
18
 
19
19
  """
20
+ config = ConfigManager(prefix)
21
+ if load_from_file:
22
+
23
+ return config.promptConfig(key)
24
+
25
+
20
26
  from langfuse import Langfuse
21
27
 
22
28
  # Initialize Langfuse client
@@ -35,4 +41,4 @@ def load_prompt_from_yaml(key, prefix="sunholo", file_path=None):
35
41
  except Exception as err:
36
42
  log.warning(f"Could not find langfuse template: {langfuse_template} - {str(err)} - attempting to load from promptConfig")
37
43
 
38
- return load_config_key(key, vector_name=prefix, kind="promptConfig")
44
+ return config.promptConfig(key)
@@ -4,7 +4,7 @@ except ImportError:
4
4
  rag = None
5
5
 
6
6
  from ..logging import log
7
- from ..utils.config import load_config_key
7
+ from ..utils import ConfigManager
8
8
  from ..vertex import init_vertex
9
9
  from .get_files import fetch_corpus
10
10
  from ..components import load_memories
@@ -41,7 +41,8 @@ def do_llamaindex(message_data, metadata, vector_name):
41
41
  if not rag:
42
42
  raise ValueError("Need to install vertexai module via `pip install sunholo[gcp]`")
43
43
 
44
- gcp_config = load_config_key("gcp_config", vector_name=vector_name, kind="vacConfig")
44
+ config = ConfigManager(vector_name)
45
+ gcp_config = config.vacConfig("gcp_config")
45
46
  if not gcp_config:
46
47
  raise ValueError(f"Need config.{vector_name}.gcp_config to configure llamaindex on VertexAI")
47
48
 
@@ -81,7 +82,7 @@ def do_llamaindex(message_data, metadata, vector_name):
81
82
 
82
83
  corpuses.append(corpus)
83
84
  if not corpuses:
84
- log.warning("Could not find a Vertex Llamaindex RAG corpus to import data to despite being in config")
85
+ log.info("No Vertex Llamaindex RAG corpus to import data")
85
86
  return None
86
87
 
87
88
  try:
@@ -93,8 +94,7 @@ def do_llamaindex(message_data, metadata, vector_name):
93
94
  log.info(f"Found llamaindex corpus: {corpuses}")
94
95
 
95
96
  # native support for cloud storage and drive links
96
- chunker_config = load_config_key("chunker", vector_name=vector_name, kind="vacConfig")
97
-
97
+ chunker_config = config.vacConfig("chunker")
98
98
 
99
99
  if message_data.startswith("gs://") or message_data.startswith("https://drive.google.com"):
100
100
  log.info(f"rag.import_files for {message_data}")
@@ -115,7 +115,8 @@ def do_llamaindex(message_data, metadata, vector_name):
115
115
 
116
116
 
117
117
  def check_llamaindex_in_memory(vector_name):
118
- memories = load_config_key("memory", vector_name=vector_name, kind="vacConfig")
118
+ memories = ConfigManager(vector_name).vacConfig("memory")
119
+
119
120
  for memory in memories: # Iterate over the list
120
121
  for key, value in memory.items(): # Now iterate over the dictionary
121
122
  log.info(f"Found memory {key}")
@@ -130,7 +131,7 @@ def check_llamaindex_in_memory(vector_name):
130
131
 
131
132
  def llamaindex_chunker_check(message_data, metadata, vector_name):
132
133
  # llamaindex handles its own chunking/embedding
133
- memories = load_config_key("memory", vector_name=vector_name, kind="vacConfig")
134
+ memories = ConfigManager(vector_name).vacConfig("memory")
134
135
  total_memories = len(memories)
135
136
  llama = None
136
137
  if check_llamaindex_in_memory(vector_name):
@@ -158,11 +158,14 @@ def parse_json_data(json_data: dict):
158
158
  try:
159
159
  if isinstance(json_data, dict):
160
160
  content = json_data.get('content', None)
161
+ metadata = json_data.get('metadata', None)
161
162
  if content is not None: # content can be '' empty string
162
163
  #log.debug(f'Yield content: {content}')
163
164
  yield content
165
+ elif metadata is not None:
166
+ log.debug('Metadata:', metadata)
164
167
  else:
165
- log.debug(f'No "content" key found, yielding all json data dict: {json_data}')
168
+ log.debug(f'No "content" or "metadata" key found, yielding all json data dict: {json_data}')
166
169
  yield json_data # Yielding all JSON data
167
170
  elif isinstance(json_data, str):
168
171
  yield json_data
sunholo/utils/config.py CHANGED
@@ -57,7 +57,7 @@ def load_all_configs():
57
57
  """
58
58
  from ..logging import log
59
59
 
60
- if not os.getenv("_CONFIG_FOLDER", None):
60
+ if not os.getenv("_CONFIG_FOLDER"):
61
61
  log.debug("_CONFIG_FOLDER is not set, using os.getcwd() instead")
62
62
  else:
63
63
  log.debug(f"_CONFIG_FOLDER set to: {os.getenv('_CONFIG_FOLDER')}")
@@ -9,6 +9,9 @@ class ConfigManager:
9
9
  def __init__(self, vector_name: str):
10
10
  """
11
11
  Initialize the ConfigManager with a vector name.
12
+ Requires a local config/ folder holding your configuration files or the env var VAC_CONFIG_FOLDER to be set.
13
+
14
+ Read more at: https://dev.sunholo.com/docs/config
12
15
 
13
16
  Args:
14
17
  vector_name (str): The name of the vector in the configuration files.
@@ -16,14 +19,22 @@ class ConfigManager:
16
19
  Example:
17
20
  ```python
18
21
  # Usage example:
19
- config = ConfigManager("myVector")
22
+ config = ConfigManager("my_vac")
20
23
  agent = config.vacConfig("agent")
21
24
  ```
22
25
  """
26
+ local_config_folder = os.path.join(os.getcwd(), "config")
27
+ if os.path.isdir(local_config_folder):
28
+ print(f"Found local config folder {local_config_folder} - will overwrite any global configurations")
29
+ else:
30
+ local_config_folder = None
31
+ if os.getenv("VAC_CONFIG_FOLDER") is None and local_config_folder is None:
32
+ raise ValueError(f"Must have either a local config/ folder in this dir ({os.getcwd()}/config/) or a folder specified via the VAC_CONFIG_FOLDER environment variable, or both.")
33
+
23
34
  self.vector_name = vector_name
24
35
  self.config_cache = {}
25
36
  self.config_folder = os.getenv("VAC_CONFIG_FOLDER", os.getcwd())
26
- self.local_config_folder = os.path.join(os.getcwd(), "config")
37
+ self.local_config_folder = local_config_folder
27
38
  self.configs_by_kind = self.load_all_configs()
28
39
 
29
40
  def load_all_configs(self):
@@ -38,14 +49,15 @@ class ConfigManager:
38
49
 
39
50
  log.debug(f"Loading all configs from folder: {self.config_folder} and local folder: {self.local_config_folder}")
40
51
  global_configs_by_kind = self._load_configs_from_folder(self.config_folder)
41
- local_configs_by_kind = self._load_configs_from_folder(self.local_config_folder)
42
52
 
43
- # Merge local configs into global configs
44
- for kind, local_config in local_configs_by_kind.items():
45
- if kind in global_configs_by_kind:
46
- global_configs_by_kind[kind] = self._merge_dicts(global_configs_by_kind[kind], local_config)
47
- else:
48
- global_configs_by_kind[kind] = local_config
53
+ if self.local_config_folder:
54
+ local_configs_by_kind = self._load_configs_from_folder(self.local_config_folder)
55
+ # Merge local configs into global configs
56
+ for kind, local_config in local_configs_by_kind.items():
57
+ if kind in global_configs_by_kind:
58
+ global_configs_by_kind[kind] = self._merge_dicts(global_configs_by_kind[kind], local_config)
59
+ else:
60
+ global_configs_by_kind[kind] = local_config
49
61
 
50
62
  return global_configs_by_kind
51
63
 
@@ -10,8 +10,46 @@ from ..utils.parsers import validate_extension_id
10
10
  import base64
11
11
  import json
12
12
  from io import StringIO
13
+ import os
13
14
 
14
15
  class VertexAIExtensions:
16
+ """
17
+ Example
18
+
19
+ ```python
20
+ from sunholo.vertex import VertexAIExtensions
21
+ vex = VertexAIExtensions()
22
+ vex.list_extensions()
23
+ # [{'resource_name': 'projects/374404277595/locations/us-central1/extensions/770924776838397952',
24
+ # 'display_name': 'Code Interpreter',
25
+ # 'description': 'N/A'}]
26
+ ```
27
+
28
+ Creating an extension example as per:
29
+ https://cloud.google.com/vertex-ai/generative-ai/docs/extensions/create-extension
30
+
31
+ ```python
32
+ ## validates before upload
33
+ vex.upload_openapi_file("your-extension-name.yaml")
34
+ vex.openapi_file_gcs
35
+ # 'gs://your-extensions-bucket/your-extension-name.yaml'
36
+
37
+ ## load in examples to be used by creation later
38
+ vex.load_tool_use_examples('your-examples.yaml')
39
+
40
+ vex.create_extension(
41
+ "My New Extension",
42
+ description="Querying the VAC above my database",
43
+ service_account='sa-serviceaccount@my-project.iam.gserviceaccount.com')
44
+ ```
45
+
46
+ Call the extension
47
+ ```python
48
+ operation_params = {"input": {"question":"This needs to be in same schema as your openapi spec"}
49
+ vex.execute_extension("an_operation_id_from_your_openai_spec",
50
+ operation_params = operation_params)
51
+ ```
52
+ """
15
53
  def __init__(self):
16
54
  if extensions is None:
17
55
  raise ImportError("VertexAIExtensions needs vertexai.previewextensions to be installed. Install via `pip install sunholo[gcp]`")
@@ -27,6 +65,10 @@ class VertexAIExtensions:
27
65
  """
28
66
  self.IMAGE_FILE_EXTENSIONS = set(["jpg", "jpeg", "png"])
29
67
  self.location = "us-central1"
68
+ self.openapi_file_gcs = None
69
+ self.tool_use_examples = None
70
+ self.manifest = {}
71
+ self.created_extensions = []
30
72
 
31
73
  def list_extensions(self):
32
74
  the_list = extensions.Extension.list()
@@ -40,92 +82,165 @@ class VertexAIExtensions:
40
82
  })
41
83
 
42
84
  return extensions_list
85
+
86
+ def validate_openapi(self, filename):
87
+ try:
88
+ from openapi_spec_validator import validate
89
+ from openapi_spec_validator.readers import read_from_filename
90
+ except ImportError:
91
+ raise ImportError("Must have openapi-spec-validator installed - install via `pip install sunholo[tools]`")
92
+
93
+ spec_dict, spec_url = read_from_filename(filename)
94
+ validate(spec_dict)
95
+
96
+ def upload_to_gcs(self, filename):
97
+ if not os.getenv('EXTENSIONS_BUCKET'):
98
+ raise ValueError('Please specify env var EXTENSIONS_BUCKET for location to upload openapi spec')
43
99
 
100
+ from ..gcs.add_file import add_file_to_gcs
101
+ file_base = os.path.basename(filename)
44
102
 
45
- def get_extension_import_config(self, display_name: str, description: str,
46
- api_spec_gcs: dict, service_account_name: dict, tool_use_examples: list):
47
- tool_use_examples = [
48
- {
49
- "extensionOperation": {
50
- "operationId": "say_hello",
51
- },
52
- "displayName": "Say hello in the requested language",
53
- "query": "Say hello in French",
54
- "requestParams": {
55
- "fields": [
56
- {
57
- "key": "apiServicePrompt",
58
- "value": {
59
- "string_value": "French",
60
- }
61
- }
62
- ]
63
- },
64
- "responseParams": {
65
- "fields": [
66
- {
67
- "key": "apiServiceOutput",
68
- "value": {
69
- "string_value": "bonjour",
70
- },
71
- }
72
- ],
73
- },
74
- "responseSummary": "Bonjour"
75
- }
76
- ]
77
-
78
- return {
79
- "displayName": display_name,
80
- "description": description,
81
- "manifest": {
82
- "name": "EXTENSION_NAME_LLM",
83
- "description": "DESCRIPTION_LLM",
103
+ self_uri = add_file_to_gcs(file_base, bucket_filepath=file_base)
104
+
105
+ return self_uri
106
+
107
+ def upload_openapi_file(self, filename: str):
108
+ self.validate_openapi(filename)
109
+
110
+ self.openapi_file_gcs = self.upload_to_gcs(filename)
111
+
112
+ def load_tool_use_examples(self, filename: str):
113
+ import yaml
114
+
115
+ with open(filename, 'r') as file:
116
+ self.tool_use_examples = yaml.safe_load(file)
117
+
118
+ # google.cloud.aiplatform_v1beta1.types.ToolUseExample
119
+ return self.tool_use_examples
120
+
121
+
122
+ def update_tool_use_examples_via_patch(self):
123
+ import requests
124
+ import json
125
+ from google.auth import default
126
+ from google.auth.transport.requests import Request
127
+
128
+ extension = self.created_extension
129
+ if extension is None:
130
+ raise ValueError("Need to create the extension first")
131
+
132
+ # Get the access token using Google authentication
133
+ credentials, project_id = default()
134
+ credentials.refresh(Request())
135
+ access_token = credentials.token
136
+
137
+ ENDPOINT=f"{self.location}-aiplatform.googleapis.com"
138
+ URL=f"https://{ENDPOINT}/v1beta1"
139
+
140
+ extension_id = self.created_extension.resource_name
141
+
142
+ # Define the URL and extension ID
143
+ url = f"{URL}/{extension_id}"
144
+ log.info(f"PATCH {url}")
145
+ headers = {
146
+ "Authorization": f"Bearer {access_token}",
147
+ "Content-Type": "application/json"
148
+ }
149
+
150
+ # Define the payload
151
+ payload = {
152
+ "toolUseExamples": self.tool_use_examples['tool_use_examples']
153
+ }
154
+
155
+ # Make the PATCH request
156
+ response = requests.patch(
157
+ url,
158
+ headers=headers,
159
+ params={"update_mask": "toolUseExamples"},
160
+ data=json.dumps(payload)
161
+ )
162
+
163
+ # Check the response
164
+ if response.status_code == 200:
165
+ log.info("Tool use examples updated successfully.")
166
+ else:
167
+ log.info(f"Failed to update tool use examples. Status code: {response.status_code}, Response: {response.text}")
168
+
169
+
170
+ def create_extension_manifest(self,
171
+ display_name,
172
+ description,
173
+ open_api_gcs_uri: str,
174
+ service_account: str):
175
+
176
+ self.manifest = {
177
+ "name": display_name,
178
+ "description": description,
84
179
  "apiSpec": {
85
- "openApiGcsUri": api_spec_gcs,
180
+ "openApiGcsUri": open_api_gcs_uri,
86
181
  },
87
182
  "authConfig": {
88
183
  "authType": "OAUTH",
89
- "oauthConfig": {"service_account": service_account_name}
184
+ "oauthConfig": {"service_account": service_account}
90
185
  }
91
- },
92
- "toolUseExamples": tool_use_examples,
93
186
  }
94
187
 
95
- def create_extension_instance(self, display_name: str, description: str, open_api_gcs_uri: str,
96
- llm_name: str = None, llm_description: str = None, runtime_config: dict = None, service_account: str = None):
188
+ return self.manifest
189
+
190
+ def create_extension(self,
191
+ display_name: str,
192
+ description: str,
193
+ open_api_file: str = None,
194
+ tool_example_file: str = None,
195
+ runtime_config: dict = None,
196
+ service_account: str = None):
197
+
97
198
  project_id = get_gcp_project()
98
199
  extension_name = f"projects/{project_id}/locations/us-central1/extensions/{validate_extension_id(display_name)}"
99
200
 
201
+ if open_api_file:
202
+ self.upload_openapi_file(open_api_file)
203
+
204
+ manifest = self.create_extension_manifest(
205
+ display_name,
206
+ description,
207
+ open_api_gcs_uri = self.openapi_file_gcs,
208
+ service_account = service_account,
209
+ )
210
+
211
+ if tool_example_file:
212
+ self.load_tool_use_examples(tool_example_file)
213
+
100
214
  extension = extensions.Extension.create(
101
215
  extension_name=extension_name,
102
216
  display_name=display_name,
103
217
  description=description,
104
- runtime_config=runtime_config or None,
105
- manifest={
106
- "name": llm_name or display_name,
107
- "description": llm_description or description,
108
- "api_spec": {
109
- "open_api_gcs_uri": open_api_gcs_uri
110
- },
111
- "auth_config": {
112
- "auth_type": "GOOGLE_SERVICE_ACCOUNT_AUTH",
113
- "google_service_account_config": service_account or {},
114
- },
115
- },
218
+ runtime_config=runtime_config or None, # sets things like what bucket will be used
219
+ manifest=manifest,
220
+ #tool_use_examples=self.tool_use_examples
116
221
  )
117
222
  log.info(f"Created Vertex Extension: {extension_name}")
223
+
224
+ self.created_extension = extension
225
+
226
+ if tool_example_file:
227
+ self.update_tool_use_examples_via_patch()
118
228
 
119
- return extension
229
+ return extension.resource_name
120
230
 
121
- def execute_extension(self, operation_id: str, operation_params: dict, extension_id: str):
231
+ def execute_extension(self, operation_id: str, operation_params: dict, extension_id: str=None):
122
232
  init_vertex(location=self.location)
123
233
 
124
- if not extension_id.startswith("projects/"):
125
- project_id = get_gcp_project()
126
- extension_name = f"projects/{project_id}/locations/{self.location}/extensions/{extension_id}"
127
- else:
128
- extension_name = extension_id
234
+ if not extension_id:
235
+ extension_name = self.created_extension.resource_name
236
+ if extension_name is None:
237
+ raise ValueError("Must specify extension_id or init one with class")
238
+ else:
239
+ if not extension_id.startswith("projects/"):
240
+ project_id = get_gcp_project()
241
+ extension_name = f"projects/{project_id}/locations/{self.location}/extensions/{extension_id}"
242
+ else:
243
+ extension_name = extension_id
129
244
 
130
245
  extension = extensions.Extension(extension_name)
131
246
 
@@ -1,6 +1,6 @@
1
1
  try:
2
2
  from vertexai.preview import rag
3
- from vertexai.preview.generative_models import Tool, grounding, GenerationResponse
3
+ from vertexai.preview.generative_models import Tool, grounding
4
4
  except ImportError:
5
5
  rag = None
6
6
 
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sunholo
3
- Version: 0.72.0
3
+ Version: 0.73.3
4
4
  Summary: Large Language Model DevOps - a package to help deploy LLMs to the Cloud.
5
5
  Home-page: https://github.com/sunholo-data/sunholo-py
6
- Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.72.0.tar.gz
6
+ Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.73.3.tar.gz
7
7
  Author: Holosun ApS
8
8
  Author-email: multivac@sunholo.com
9
9
  License: Apache License, Version 2.0
@@ -87,7 +87,7 @@ Requires-Dist: google-api-python-client ; extra == 'gcp'
87
87
  Requires-Dist: google-cloud-alloydb-connector[pg8000] ; extra == 'gcp'
88
88
  Requires-Dist: google-auth-httplib2 ; extra == 'gcp'
89
89
  Requires-Dist: google-auth-oauthlib ; extra == 'gcp'
90
- Requires-Dist: google-cloud-aiplatform ; extra == 'gcp'
90
+ Requires-Dist: google-cloud-aiplatform >=1.58.0 ; extra == 'gcp'
91
91
  Requires-Dist: google-cloud-bigquery ; extra == 'gcp'
92
92
  Requires-Dist: google-cloud-build ; extra == 'gcp'
93
93
  Requires-Dist: google-cloud-service-control ; extra == 'gcp'
@@ -119,6 +119,7 @@ Requires-Dist: pytesseract ; extra == 'pipeline'
119
119
  Requires-Dist: tabulate ; extra == 'pipeline'
120
120
  Requires-Dist: unstructured[local-inference] ; extra == 'pipeline'
121
121
  Provides-Extra: tools
122
+ Requires-Dist: openapi-spec-validator ; extra == 'tools'
122
123
  Requires-Dist: playwright ; extra == 'tools'
123
124
 
124
125
  ## Introduction