sunholo 0.70.5__py3-none-any.whl → 0.70.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sunholo/vertex/extensions.py +190 -2
- sunholo/vertex/extensions_class.py +241 -0
- sunholo/vertex/memory_tools.py +2 -2
- {sunholo-0.70.5.dist-info → sunholo-0.70.6.dist-info}/METADATA +2 -2
- {sunholo-0.70.5.dist-info → sunholo-0.70.6.dist-info}/RECORD +9 -8
- {sunholo-0.70.5.dist-info → sunholo-0.70.6.dist-info}/LICENSE.txt +0 -0
- {sunholo-0.70.5.dist-info → sunholo-0.70.6.dist-info}/WHEEL +0 -0
- {sunholo-0.70.5.dist-info → sunholo-0.70.6.dist-info}/entry_points.txt +0 -0
- {sunholo-0.70.5.dist-info → sunholo-0.70.6.dist-info}/top_level.txt +0 -0
sunholo/vertex/extensions.py
CHANGED
|
@@ -6,6 +6,16 @@ from ..logging import log
|
|
|
6
6
|
from ..utils.gcp_project import get_gcp_project
|
|
7
7
|
from ..utils.parsers import validate_extension_id
|
|
8
8
|
|
|
9
|
+
# https://github.com/GoogleCloudPlatform/applied-ai-engineering-samples/blob/main/genai-on-vertex-ai/vertex_ai_extensions/notebooks/pandas_code_interpreter.ipynb
|
|
10
|
+
import base64
|
|
11
|
+
import json
|
|
12
|
+
import pprint
|
|
13
|
+
import pandas
|
|
14
|
+
from io import StringIO
|
|
15
|
+
|
|
16
|
+
global CODE_INTERPRETER_WRITTEN_FILES
|
|
17
|
+
CODE_INTERPRETER_WRITTEN_FILES = []
|
|
18
|
+
|
|
9
19
|
def get_extension_import_config(
|
|
10
20
|
display_name: str,
|
|
11
21
|
description: str,
|
|
@@ -102,7 +112,7 @@ def create_extension_instance(
|
|
|
102
112
|
},
|
|
103
113
|
},
|
|
104
114
|
)
|
|
105
|
-
log.info(f"
|
|
115
|
+
log.info(f"Created Vertex Extension: {extension_name}")
|
|
106
116
|
|
|
107
117
|
return extension
|
|
108
118
|
|
|
@@ -125,14 +135,192 @@ def create_extension_code_interpreter(
|
|
|
125
135
|
}
|
|
126
136
|
}
|
|
127
137
|
|
|
138
|
+
llm_description="""
|
|
139
|
+
Tool to generate and execute valid Python code from a natural
|
|
140
|
+
language description, or to execute custom Python code.
|
|
141
|
+
Use this tool to:
|
|
142
|
+
- generate and/or execute code for various tasks:
|
|
143
|
+
- perform a wide variety of mathematical calculations, for example, add,
|
|
144
|
+
subtract, multiply, divide, average, power, factorial, quotient,
|
|
145
|
+
formulae, logarithms, random numbers, trigonometric functions, and
|
|
146
|
+
equations;
|
|
147
|
+
- sort, filter, select top results, and otherwise analyze data (including
|
|
148
|
+
data acquired from other tools and Extensions);
|
|
149
|
+
- create visualizations, plot charts, draw graphs, shapes, print results,
|
|
150
|
+
etc.
|
|
151
|
+
- execute custom code and get results and output files.
|
|
152
|
+
"""
|
|
153
|
+
|
|
128
154
|
code_extension = create_extension_instance(
|
|
129
155
|
display_name="Code Interpreter",
|
|
130
156
|
description="This extension generates and executes code in the specified language",
|
|
131
157
|
open_api_gcs_uri="gs://vertex-extension-public/code_interpreter.yaml",
|
|
132
158
|
llm_name="code_interpreter_tool",
|
|
133
|
-
llm_description=
|
|
159
|
+
llm_description=llm_description,
|
|
134
160
|
runtime_config=runtime_config
|
|
135
161
|
)
|
|
136
162
|
log.info(f"Created code extension: {code_extension=}")
|
|
137
163
|
|
|
138
164
|
return code_extension
|
|
165
|
+
|
|
166
|
+
def execute_extension(operation_id: str,
|
|
167
|
+
operation_params: dict,
|
|
168
|
+
extension_id: str):
|
|
169
|
+
|
|
170
|
+
# only us-central for now
|
|
171
|
+
location = "us-central1"
|
|
172
|
+
init_vertex(location=location)
|
|
173
|
+
|
|
174
|
+
if not extension_id.startswith("projects/"):
|
|
175
|
+
project_id=get_gcp_project()
|
|
176
|
+
extension_name = f"projects/{project_id}/locations/{location}/extensions/{extension_id}"
|
|
177
|
+
else:
|
|
178
|
+
extension_name=extension_id
|
|
179
|
+
|
|
180
|
+
extension = extensions.Extension(extension_name)
|
|
181
|
+
|
|
182
|
+
response = extension.execute(
|
|
183
|
+
operation_id=operation_id,
|
|
184
|
+
# {"query": "find the max value in the list: [1,2,3,4,-5]"}
|
|
185
|
+
operation_params=operation_params,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
return response
|
|
189
|
+
|
|
190
|
+
def execute_code_extension(query:str, filenames: list[str]=None, gcs_files: list[str]=None):
|
|
191
|
+
|
|
192
|
+
if filenames and gcs_files:
|
|
193
|
+
raise ValueError("Can't specify both filenames and gcs_files")
|
|
194
|
+
|
|
195
|
+
extension_code_interpreter = extensions.Extension.from_hub("code_interpreter")
|
|
196
|
+
|
|
197
|
+
file_arr=None
|
|
198
|
+
if filenames:
|
|
199
|
+
file_arr = [
|
|
200
|
+
{
|
|
201
|
+
"name": filename,
|
|
202
|
+
"contents": base64.b64encode(open(filename, "rb").read()).decode()
|
|
203
|
+
}
|
|
204
|
+
for filename in filenames
|
|
205
|
+
]
|
|
206
|
+
|
|
207
|
+
response = extension_code_interpreter.execute(
|
|
208
|
+
operation_id = "generate_and_execute",
|
|
209
|
+
operation_params={
|
|
210
|
+
"query": query,
|
|
211
|
+
"files": file_arr,
|
|
212
|
+
"file_gcs_uris": gcs_files
|
|
213
|
+
})
|
|
214
|
+
|
|
215
|
+
CODE_INTERPRETER_WRITTEN_FILES.extend(
|
|
216
|
+
[item['name'] for item in response['output_files']])
|
|
217
|
+
|
|
218
|
+
if response.get('execution_error'):
|
|
219
|
+
log.error(f"Code Execution Response failed with: {response.get('execution_error')} - maybe retry?")
|
|
220
|
+
|
|
221
|
+
return response
|
|
222
|
+
|
|
223
|
+
css_styles = """
|
|
224
|
+
<style>
|
|
225
|
+
.main_summary {
|
|
226
|
+
font-weight: bold;
|
|
227
|
+
font-size: 14px; color: #4285F4;
|
|
228
|
+
background-color:rgba(221, 221, 221, 0.5); padding:8px;}
|
|
229
|
+
</style>
|
|
230
|
+
"""
|
|
231
|
+
|
|
232
|
+
# Parser to visualise the content of returned files as HTML.
|
|
233
|
+
def parse_files_to_html(outputFiles, save_files_locally = True):
|
|
234
|
+
IMAGE_FILE_EXTENSIONS = set(["jpg", "jpeg", "png"])
|
|
235
|
+
file_list = []
|
|
236
|
+
details_tml = """<details><summary>{name}</summary><div>{html_content}</div></details>"""
|
|
237
|
+
|
|
238
|
+
if not outputFiles:
|
|
239
|
+
return "No Files generated from the code"
|
|
240
|
+
# Sort output_files so images are displayed before other files such as JSON.
|
|
241
|
+
for output_file in sorted(
|
|
242
|
+
outputFiles,
|
|
243
|
+
key=lambda x: x["name"].split(".")[-1] not in IMAGE_FILE_EXTENSIONS,
|
|
244
|
+
):
|
|
245
|
+
file_name = output_file.get("name")
|
|
246
|
+
file_contents = base64.b64decode(output_file.get("contents"))
|
|
247
|
+
if save_files_locally:
|
|
248
|
+
open(file_name,"wb").write(file_contents)
|
|
249
|
+
|
|
250
|
+
if file_name.split(".")[-1] in IMAGE_FILE_EXTENSIONS:
|
|
251
|
+
# Render Image
|
|
252
|
+
file_html_content = ('<img src="data:image/png;base64, '
|
|
253
|
+
f'{output_file.get("contents")}" />')
|
|
254
|
+
elif file_name.endswith(".json"):
|
|
255
|
+
# Pretty print JSON
|
|
256
|
+
json_pp = pprint.pformat(
|
|
257
|
+
json.loads(file_contents.decode()),
|
|
258
|
+
compact=False,
|
|
259
|
+
width=160)
|
|
260
|
+
file_html_content = (f'<span>{json_pp}</span>')
|
|
261
|
+
elif file_name.endswith(".csv"):
|
|
262
|
+
# CSV
|
|
263
|
+
csv_md = pandas.read_csv(
|
|
264
|
+
StringIO(file_contents.decode())).to_markdown(index=False)
|
|
265
|
+
file_html_content = f'<span>{csv_md}</span>'
|
|
266
|
+
elif file_name.endswith(".pkl"):
|
|
267
|
+
# PKL
|
|
268
|
+
file_html_content = f'<span>Preview N/A</span>'
|
|
269
|
+
else:
|
|
270
|
+
file_html_content = f"<span>{file_contents.decode()}</span>"
|
|
271
|
+
|
|
272
|
+
file_list.append({'name': file_name, "html_content": file_html_content})
|
|
273
|
+
|
|
274
|
+
buffer_html = [ details_tml.format(**_file) for _file in file_list ]
|
|
275
|
+
return "".join(buffer_html)
|
|
276
|
+
|
|
277
|
+
# Processing code interpreter response to html visualization.
|
|
278
|
+
def process_response(response: dict, save_files_locally = None) -> None:
|
|
279
|
+
|
|
280
|
+
result_template = """
|
|
281
|
+
<details open>
|
|
282
|
+
<summary class='main_summary'>{summary}:</summary>
|
|
283
|
+
<div><pre>{content}</pre></div>
|
|
284
|
+
</details>
|
|
285
|
+
"""
|
|
286
|
+
|
|
287
|
+
result = ""
|
|
288
|
+
code = response.get('generated_code')
|
|
289
|
+
if 'execution_result' in response and response['execution_result']!="":
|
|
290
|
+
result = result_template.format(
|
|
291
|
+
summary="Executed Code Output",
|
|
292
|
+
content=response.get('execution_result'))
|
|
293
|
+
else:
|
|
294
|
+
result = result_template.format(
|
|
295
|
+
summary="Executed Code Output",
|
|
296
|
+
content="Code does not produce printable output.")
|
|
297
|
+
|
|
298
|
+
if response.get('execution_error', None):
|
|
299
|
+
result += result_template.format(
|
|
300
|
+
summary="Generated Code Raised a (Possibly Non-Fatal) Exception",
|
|
301
|
+
content=response.get('execution_error', None))
|
|
302
|
+
|
|
303
|
+
result += result_template.format(
|
|
304
|
+
summary="Files Created <u>(Click on filename to view content)</u>",
|
|
305
|
+
content=parse_files_to_html(
|
|
306
|
+
response.get('output_files', []),
|
|
307
|
+
save_files_locally = True))
|
|
308
|
+
|
|
309
|
+
html_content = f"""
|
|
310
|
+
{css_styles}
|
|
311
|
+
<div id='main'>
|
|
312
|
+
<div id="right">
|
|
313
|
+
<h3>Generated Code by Code Interpreter</h3>
|
|
314
|
+
<pre><code>{code}</code></pre>
|
|
315
|
+
</div>
|
|
316
|
+
<div id="left">
|
|
317
|
+
<h3>Code Execution Results</h3>
|
|
318
|
+
{result}
|
|
319
|
+
</div>
|
|
320
|
+
</div>
|
|
321
|
+
"""
|
|
322
|
+
if save_files_locally:
|
|
323
|
+
# write to local file
|
|
324
|
+
pass
|
|
325
|
+
|
|
326
|
+
return html_content
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
from vertexai.preview import extensions
|
|
2
|
+
from .init import init_vertex
|
|
3
|
+
from ..logging import log
|
|
4
|
+
from ..utils.gcp_project import get_gcp_project
|
|
5
|
+
from ..utils.parsers import validate_extension_id
|
|
6
|
+
import base64
|
|
7
|
+
import json
|
|
8
|
+
from io import StringIO
|
|
9
|
+
|
|
10
|
+
class VertexAIExtensions:
|
|
11
|
+
def __init__(self):
|
|
12
|
+
self.CODE_INTERPRETER_WRITTEN_FILES = []
|
|
13
|
+
self.css_styles = """
|
|
14
|
+
<style>
|
|
15
|
+
.main_summary {
|
|
16
|
+
font-weight: bold;
|
|
17
|
+
font-size: 14px; color: #4285F4;
|
|
18
|
+
background-color:rgba(221, 221, 221, 0.5); padding:8px;}
|
|
19
|
+
</style>
|
|
20
|
+
"""
|
|
21
|
+
self.IMAGE_FILE_EXTENSIONS = set(["jpg", "jpeg", "png"])
|
|
22
|
+
self.location = "us-central1"
|
|
23
|
+
|
|
24
|
+
def get_extension_import_config(self, display_name: str, description: str,
|
|
25
|
+
api_spec_gcs: dict, service_account_name: dict, tool_use_examples: list):
|
|
26
|
+
tool_use_examples = [
|
|
27
|
+
{
|
|
28
|
+
"extensionOperation": {
|
|
29
|
+
"operationId": "say_hello",
|
|
30
|
+
},
|
|
31
|
+
"displayName": "Say hello in the requested language",
|
|
32
|
+
"query": "Say hello in French",
|
|
33
|
+
"requestParams": {
|
|
34
|
+
"fields": [
|
|
35
|
+
{
|
|
36
|
+
"key": "apiServicePrompt",
|
|
37
|
+
"value": {
|
|
38
|
+
"string_value": "French",
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
]
|
|
42
|
+
},
|
|
43
|
+
"responseParams": {
|
|
44
|
+
"fields": [
|
|
45
|
+
{
|
|
46
|
+
"key": "apiServiceOutput",
|
|
47
|
+
"value": {
|
|
48
|
+
"string_value": "bonjour",
|
|
49
|
+
},
|
|
50
|
+
}
|
|
51
|
+
],
|
|
52
|
+
},
|
|
53
|
+
"responseSummary": "Bonjour"
|
|
54
|
+
}
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
return {
|
|
58
|
+
"displayName": display_name,
|
|
59
|
+
"description": description,
|
|
60
|
+
"manifest": {
|
|
61
|
+
"name": "EXTENSION_NAME_LLM",
|
|
62
|
+
"description": "DESCRIPTION_LLM",
|
|
63
|
+
"apiSpec": {
|
|
64
|
+
"openApiGcsUri": api_spec_gcs,
|
|
65
|
+
},
|
|
66
|
+
"authConfig": {
|
|
67
|
+
"authType": "OAUTH",
|
|
68
|
+
"oauthConfig": {"service_account": service_account_name}
|
|
69
|
+
}
|
|
70
|
+
},
|
|
71
|
+
"toolUseExamples": tool_use_examples,
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
def create_extension_instance(self, display_name: str, description: str, open_api_gcs_uri: str,
|
|
75
|
+
llm_name: str = None, llm_description: str = None, runtime_config: dict = None, service_account: str = None):
|
|
76
|
+
project_id = get_gcp_project()
|
|
77
|
+
extension_name = f"projects/{project_id}/locations/us-central1/extensions/{validate_extension_id(display_name)}"
|
|
78
|
+
|
|
79
|
+
extension = extensions.Extension.create(
|
|
80
|
+
extension_name=extension_name,
|
|
81
|
+
display_name=display_name,
|
|
82
|
+
description=description,
|
|
83
|
+
runtime_config=runtime_config or None,
|
|
84
|
+
manifest={
|
|
85
|
+
"name": llm_name or display_name,
|
|
86
|
+
"description": llm_description or description,
|
|
87
|
+
"api_spec": {
|
|
88
|
+
"open_api_gcs_uri": open_api_gcs_uri
|
|
89
|
+
},
|
|
90
|
+
"auth_config": {
|
|
91
|
+
"auth_type": "GOOGLE_SERVICE_ACCOUNT_AUTH",
|
|
92
|
+
"google_service_account_config": service_account or {},
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
)
|
|
96
|
+
log.info(f"Created Vertex Extension: {extension_name}")
|
|
97
|
+
|
|
98
|
+
return extension
|
|
99
|
+
|
|
100
|
+
def execute_extension(self, operation_id: str, operation_params: dict, extension_id: str):
|
|
101
|
+
init_vertex(location=self.location)
|
|
102
|
+
|
|
103
|
+
if not extension_id.startswith("projects/"):
|
|
104
|
+
project_id = get_gcp_project()
|
|
105
|
+
extension_name = f"projects/{project_id}/locations/{self.location}/extensions/{extension_id}"
|
|
106
|
+
else:
|
|
107
|
+
extension_name = extension_id
|
|
108
|
+
|
|
109
|
+
extension = extensions.Extension(extension_name)
|
|
110
|
+
|
|
111
|
+
response = extension.execute(
|
|
112
|
+
operation_id=operation_id,
|
|
113
|
+
operation_params=operation_params,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
return response
|
|
117
|
+
|
|
118
|
+
def execute_code_extension(self, query: str, filenames: list[str] = None, gcs_files: list[str] = None):
|
|
119
|
+
if filenames and gcs_files:
|
|
120
|
+
raise ValueError("Can't specify both filenames and gcs_files")
|
|
121
|
+
|
|
122
|
+
extension_code_interpreter = extensions.Extension.from_hub("code_interpreter")
|
|
123
|
+
|
|
124
|
+
file_arr = None
|
|
125
|
+
if filenames:
|
|
126
|
+
file_arr = [
|
|
127
|
+
{
|
|
128
|
+
"name": filename,
|
|
129
|
+
"contents": base64.b64encode(open(filename, "rb").read()).decode()
|
|
130
|
+
}
|
|
131
|
+
for filename in filenames
|
|
132
|
+
]
|
|
133
|
+
|
|
134
|
+
response = extension_code_interpreter.execute(
|
|
135
|
+
operation_id="generate_and_execute",
|
|
136
|
+
operation_params={
|
|
137
|
+
"query": query,
|
|
138
|
+
"files": file_arr,
|
|
139
|
+
"file_gcs_uris": gcs_files
|
|
140
|
+
})
|
|
141
|
+
|
|
142
|
+
self.CODE_INTERPRETER_WRITTEN_FILES.extend(
|
|
143
|
+
[item['name'] for item in response['output_files']])
|
|
144
|
+
|
|
145
|
+
if response.get('execution_error'):
|
|
146
|
+
log.error(f"Code Execution Response failed with: {response.get('execution_error')} - maybe retry?")
|
|
147
|
+
|
|
148
|
+
return response
|
|
149
|
+
|
|
150
|
+
def parse_files_to_html(self, outputFiles, save_files_locally=True):
|
|
151
|
+
file_list = []
|
|
152
|
+
details_tml = """<details><summary>{name}</summary><div>{html_content}</div></details>"""
|
|
153
|
+
|
|
154
|
+
if not outputFiles:
|
|
155
|
+
return "No Files generated from the code"
|
|
156
|
+
# Sort output_files so images are displayed before other files such as JSON.
|
|
157
|
+
for output_file in sorted(
|
|
158
|
+
outputFiles,
|
|
159
|
+
key=lambda x: x["name"].split(".")[-1] not in self.IMAGE_FILE_EXTENSIONS,
|
|
160
|
+
):
|
|
161
|
+
file_name = output_file.get("name")
|
|
162
|
+
file_contents = base64.b64decode(output_file.get("contents"))
|
|
163
|
+
if save_files_locally:
|
|
164
|
+
open(file_name, "wb").write(file_contents)
|
|
165
|
+
|
|
166
|
+
if file_name.split(".")[-1] in self.IMAGE_FILE_EXTENSIONS:
|
|
167
|
+
# Render Image
|
|
168
|
+
file_html_content = ('<img src="data:image/png;base64, '
|
|
169
|
+
f'{output_file.get("contents")}" />')
|
|
170
|
+
elif file_name.endswith(".json"):
|
|
171
|
+
import pprint
|
|
172
|
+
# Pretty print JSON
|
|
173
|
+
json_pp = pprint.pformat(
|
|
174
|
+
json.loads(file_contents.decode()),
|
|
175
|
+
compact=False,
|
|
176
|
+
width=160)
|
|
177
|
+
file_html_content = (f'<span>{json_pp}</span>')
|
|
178
|
+
elif file_name.endswith(".csv"):
|
|
179
|
+
# CSV
|
|
180
|
+
try:
|
|
181
|
+
import pandas
|
|
182
|
+
except ImportError:
|
|
183
|
+
log.error("Need pandas for csv processing")
|
|
184
|
+
csv_md = pandas.read_csv(
|
|
185
|
+
StringIO(file_contents.decode())).to_markdown(index=False)
|
|
186
|
+
file_html_content = f'<span>{csv_md}</span>'
|
|
187
|
+
elif file_name.endswith(".pkl"):
|
|
188
|
+
# PKL
|
|
189
|
+
file_html_content = f'<span>Preview N/A</span>'
|
|
190
|
+
else:
|
|
191
|
+
file_html_content = f"<span>{file_contents.decode()}</span>"
|
|
192
|
+
|
|
193
|
+
file_list.append({'name': file_name, "html_content": file_html_content})
|
|
194
|
+
|
|
195
|
+
buffer_html = [details_tml.format(**_file) for _file in file_list]
|
|
196
|
+
return "".join(buffer_html)
|
|
197
|
+
|
|
198
|
+
def process_response(self, response: dict, save_files_locally=None) -> str:
|
|
199
|
+
result_template = """
|
|
200
|
+
<details open>
|
|
201
|
+
<summary class='main_summary'>{summary}:</summary>
|
|
202
|
+
<div><pre>{content}</pre></div>
|
|
203
|
+
</details>
|
|
204
|
+
"""
|
|
205
|
+
|
|
206
|
+
result = ""
|
|
207
|
+
code = response.get('generated_code')
|
|
208
|
+
if 'execution_result' in response and response['execution_result'] != "":
|
|
209
|
+
result = result_template.format(
|
|
210
|
+
summary="Executed Code Output",
|
|
211
|
+
content=response.get('execution_result'))
|
|
212
|
+
else:
|
|
213
|
+
result = result_template.format(
|
|
214
|
+
summary="Executed Code Output",
|
|
215
|
+
content="Code does not produce printable output.")
|
|
216
|
+
|
|
217
|
+
if response.get('execution_error', None):
|
|
218
|
+
result += result_template.format(
|
|
219
|
+
summary="Generated Code Raised a (Possibly Non-Fatal) Exception",
|
|
220
|
+
content=response.get('execution_error', None))
|
|
221
|
+
|
|
222
|
+
result += result_template.format(
|
|
223
|
+
summary="Files Created <u>(Click on filename to view content)</u>",
|
|
224
|
+
content=self.parse_files_to_html(
|
|
225
|
+
response.get('output_files', []),
|
|
226
|
+
save_files_locally=True))
|
|
227
|
+
|
|
228
|
+
html_content = f"""
|
|
229
|
+
{self.css_styles}
|
|
230
|
+
<div id='main'>
|
|
231
|
+
<h3>Generated Code by Code Interpreter</h3>
|
|
232
|
+
<pre><code>{code}</code></pre>
|
|
233
|
+
<h3>Code Execution Results</h3>
|
|
234
|
+
{result}
|
|
235
|
+
</div>
|
|
236
|
+
"""
|
|
237
|
+
if save_files_locally:
|
|
238
|
+
with open('code_execution_results.html', 'w') as file:
|
|
239
|
+
file.write(html_content)
|
|
240
|
+
|
|
241
|
+
return html_content
|
sunholo/vertex/memory_tools.py
CHANGED
|
@@ -90,13 +90,13 @@ def get_vertex_memories(vector_name):
|
|
|
90
90
|
de = DiscoveryEngineClient(vector_name, project_id=get_gcp_project())
|
|
91
91
|
log.info(f"Found vectorstore {vectorstore}")
|
|
92
92
|
|
|
93
|
-
data_store_path = de.data_store_path()
|
|
93
|
+
data_store_path = f"{de.data_store_path()}/dataStores/{vector_name}"
|
|
94
94
|
corpus_tool = Tool.from_retrieval(
|
|
95
95
|
grounding.Retrieval(grounding.VertexAISearch(datastore=data_store_path))
|
|
96
96
|
)
|
|
97
97
|
tools.append(corpus_tool)
|
|
98
98
|
except Exception as err:
|
|
99
|
-
log.error(f"Failed to fetch DiscoveryEngine
|
|
99
|
+
log.error(f"Failed to fetch DiscoveryEngine grounding - {str(err)} - skipping")
|
|
100
100
|
continue
|
|
101
101
|
|
|
102
102
|
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: sunholo
|
|
3
|
-
Version: 0.70.
|
|
3
|
+
Version: 0.70.6
|
|
4
4
|
Summary: Large Language Model DevOps - a package to help deploy LLMs to the Cloud.
|
|
5
5
|
Home-page: https://github.com/sunholo-data/sunholo-py
|
|
6
|
-
Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.70.
|
|
6
|
+
Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.70.6.tar.gz
|
|
7
7
|
Author: Holosun ApS
|
|
8
8
|
Author-email: multivac@sunholo.com
|
|
9
9
|
License: Apache License, Version 2.0
|
|
@@ -108,13 +108,14 @@ sunholo/utils/timedelta.py,sha256=BbLabEx7_rbErj_YbNM0MBcaFN76DC4PTe4zD2ucezg,49
|
|
|
108
108
|
sunholo/utils/user_ids.py,sha256=SQd5_H7FE7vcTZp9AQuQDWBXd4FEEd7TeVMQe1H4Ny8,292
|
|
109
109
|
sunholo/utils/version.py,sha256=jjU_4anXBikJxPg0Wur0X-B7-ec1tC7jToykAnAG9Dg,108
|
|
110
110
|
sunholo/vertex/__init__.py,sha256=JvHcGFuv6R_nAhY2AdoqqhMpJ5ugeWPZ_svGhWrObBk,136
|
|
111
|
-
sunholo/vertex/extensions.py,sha256=
|
|
111
|
+
sunholo/vertex/extensions.py,sha256=d-Ikt9gHFf-jUMPmyU-xHwYe22QtEyr90Ua1LDKgTws,11026
|
|
112
|
+
sunholo/vertex/extensions_class.py,sha256=0-XMrMvfhMN380ZdGXl11Mt7R9kCu9rB4Vduiflk8QA,9202
|
|
112
113
|
sunholo/vertex/init.py,sha256=RLjQppTUwubWgwf2PoAke-EtcwlVkFPaPMYvUsMw1KQ,2029
|
|
113
|
-
sunholo/vertex/memory_tools.py,sha256=
|
|
114
|
+
sunholo/vertex/memory_tools.py,sha256=hy_UhKl05Bka5zK7KWoO8ZPVM-p8AQkcgFVLHUg9jZs,5748
|
|
114
115
|
sunholo/vertex/safety.py,sha256=3meAX0HyGZYrH7rXPUAHxtI_3w_zoy_RX7Shtkoa660,1275
|
|
115
|
-
sunholo-0.70.
|
|
116
|
-
sunholo-0.70.
|
|
117
|
-
sunholo-0.70.
|
|
118
|
-
sunholo-0.70.
|
|
119
|
-
sunholo-0.70.
|
|
120
|
-
sunholo-0.70.
|
|
116
|
+
sunholo-0.70.6.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
|
|
117
|
+
sunholo-0.70.6.dist-info/METADATA,sha256=LhA1lHY2mU6i7YHXkJtxgbqwqac6lU0p75reOatR2bk,6240
|
|
118
|
+
sunholo-0.70.6.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
|
|
119
|
+
sunholo-0.70.6.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
|
|
120
|
+
sunholo-0.70.6.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
|
|
121
|
+
sunholo-0.70.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|