sunholo 0.78.4__py3-none-any.whl → 0.78.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sunholo/components/llm.py CHANGED
@@ -16,15 +16,22 @@ from ..utils import load_config_key, ConfigManager
16
16
 
17
17
  import os
18
18
 
19
- def pick_llm(vector_name):
19
+ def pick_llm(vector_name:str=None, config:ConfigManager=None):
20
+
21
+ if config is None:
22
+ if vector_name is None:
23
+ raise ValueError("config and vector_name was None")
24
+ config = ConfigManager(vector_name)
25
+
20
26
  log.debug('Picking llm')
21
27
 
22
- llm_str = load_config_key("llm", vector_name, kind="vacConfig")
28
+ llm_str = config.vacConfig("llm")
23
29
 
24
30
  if llm_str == 'openai':
25
- llm_chat = get_llm_chat(vector_name)
26
- llm = get_llm_chat(vector_name, model="gpt-3.5-turbo-16k") # TODO: fix it needs llm_chat and not llm
27
- embeddings = get_embeddings(vector_name)
31
+ llm_chat = get_llm_chat(config=config)
32
+ llm = get_llm_chat(model="gpt-3.5-turbo-16k", config=config) # TODO: fix it needs llm_chat and not llm
33
+ embeddings = get_embeddings(config=config)
34
+
28
35
  log.debug("Chose OpenAI")
29
36
  elif llm_str == 'vertex':
30
37
  llm = get_llm_chat(vector_name) # TODO: fix it needs llm_chat and not llm
@@ -62,7 +69,9 @@ def llm_str_to_llm(llm_str, model=None, vector_name=None, config=None):
62
69
  if llm_str is None:
63
70
  raise NotImplementedError("llm_str was None")
64
71
 
65
- if vector_name:
72
+ if config is None:
73
+ if vector_name is None:
74
+ raise ValueError("vector_name and config was None")
66
75
  config = ConfigManager(vector_name)
67
76
 
68
77
  if llm_str == 'openai':
@@ -125,10 +134,16 @@ def get_llm(vector_name=None, model=None, config=None):
125
134
  log.debug(f"Chose LLM: {llm_str}")
126
135
  return llm_str_to_llm(llm_str, model=model, config=config)
127
136
 
128
- def get_llm_chat(vector_name, model=None):
129
- llm_str = load_config_key("llm", vector_name, kind="vacConfig")
137
+ def get_llm_chat(vector_name:str=None, model=None, config:ConfigManager=None):
138
+
139
+ if config is None:
140
+ if vector_name is None:
141
+ raise ValueError("config and vector_name was None")
142
+ config = ConfigManager(vector_name)
143
+
144
+ llm_str = config.vacConfig("llm")
130
145
  if not model:
131
- model = load_config_key("model", vector_name, kind="vacConfig")
146
+ model = config.vacConfig("model")
132
147
 
133
148
  log.debug(f"Chose LLM: {llm_str}")
134
149
  # Configure LLMs based on llm_str
@@ -167,7 +182,7 @@ def get_llm_chat(vector_name, model=None):
167
182
  return ChatAnthropic(model_name = model, temperature=0)
168
183
  elif llm_str == 'azure':
169
184
  from langchain_openai import AzureChatOpenAI
170
- azure_config = load_config_key("azure", vector_name, kind="vacConfig")
185
+ azure_config = config.vacConfig("azure")
171
186
  if not azure_config:
172
187
  raise ValueError("Need to configure azure.config if llm='azure'")
173
188
 
@@ -209,22 +224,37 @@ def get_llm_chat(vector_name, model=None):
209
224
  if llm_str is None:
210
225
  raise NotImplementedError(f'No llm implemented for {llm_str}')
211
226
 
212
- def get_embeddings(vector_name):
227
+ def get_embeddings(vector_name=None, config:ConfigManager=None):
228
+
229
+ if not config:
230
+ if not vector_name:
231
+ raise ValueError(f"config and vector_name was None: {vector_name}")
232
+ config = ConfigManager(vector_name)
233
+
213
234
 
214
235
  llm_str = None
215
- embed_dict = load_config_key("embedder", vector_name, kind="vacConfig")
236
+ embed_dict = config.vacConfig("embedder")
216
237
 
217
238
  if embed_dict:
218
239
  llm_str = embed_dict.get('llm')
219
240
 
220
241
  if llm_str is None:
221
- llm_str = load_config_key("llm", vector_name, kind="vacConfig")
242
+ llm_str = config.vacConfig("llm")
243
+
244
+ if llm_str is None:
245
+ raise ValueError(f"llm_str was None: {llm_str}")
222
246
 
223
- return pick_embedding(llm_str, vector_name=vector_name)
247
+ return pick_embedding(llm_str, config=config)
224
248
 
225
249
 
226
250
  #TODO: specify model
227
- def pick_embedding(llm_str: str, vector_name: str=None):
251
+ def pick_embedding(llm_str: str, vector_name: str=None, config: ConfigManager=None):
252
+
253
+ if not config:
254
+ if not vector_name:
255
+ raise ValueError(f"config and vector_name was None {vector_name}")
256
+ config = ConfigManager(vector_name)
257
+
228
258
  # get embedding directly from llm_str
229
259
  # Configure embeddings based on llm_str
230
260
  if llm_str == 'openai':
@@ -244,7 +274,7 @@ def pick_embedding(llm_str: str, vector_name: str=None):
244
274
  elif llm_str == 'azure':
245
275
  from langchain_openai import AzureOpenAIEmbeddings
246
276
 
247
- azure_config = load_config_key("azure", vector_name, kind="vacConfig")
277
+ azure_config = config.vacConfig("azure")
248
278
  if not azure_config:
249
279
  raise ValueError("Need to configure azure.config if llm='azure'")
250
280
 
@@ -26,8 +26,14 @@ from langchain.retrievers import ContextualCompressionRetriever
26
26
 
27
27
 
28
28
 
29
- def load_memories(vector_name):
30
- memories = ConfigManager(vector_name).vacConfig("memory")
29
+ def load_memories(vector_name:str=None, config:ConfigManager=None):
30
+ if config is None:
31
+ if vector_name is None:
32
+ raise ValueError("vector_name and config were none")
33
+ config = ConfigManager(vector_name)
34
+
35
+ memories = config.vacConfig("memory")
36
+
31
37
  log.info(f"Found memory settings for {vector_name}: {memories}")
32
38
  if not memories or len(memories) == 0:
33
39
  log.info(f"No memory settings found for {vector_name}")
@@ -22,6 +22,7 @@ from langchain.schema import Document
22
22
  from ..components import get_embeddings, pick_vectorstore, load_memories, pick_embedding
23
23
  from ..logging import log
24
24
  from ..database.uuid import generate_uuid_from_object_id
25
+ from ..utils import ConfigManager
25
26
 
26
27
  def embed_pubsub_chunk(data: dict):
27
28
  """Triggered from a message on a Cloud Pub/Sub topic "embed_chunk" topic
@@ -63,6 +64,9 @@ def embed_pubsub_chunk(data: dict):
63
64
  log.error(msg)
64
65
  return msg
65
66
 
67
+ config = ConfigManager(vector_name)
68
+ log.info(f"{config=}")
69
+
66
70
  log.info(f"Embedding: {vector_name} page_content: {page_content[:30]}...[{len(page_content)}] - {metadata}")
67
71
 
68
72
  if 'eventTime' not in metadata:
@@ -102,9 +106,9 @@ def embed_pubsub_chunk(data: dict):
102
106
  doc = Document(page_content=page_content, metadata=metadata)
103
107
 
104
108
  # init embedding and vector store
105
- embeddings = get_embeddings(vector_name)
109
+ embeddings = get_embeddings(config=config)
106
110
 
107
- memories = load_memories(vector_name)
111
+ memories = load_memories(config=config)
108
112
  vectorstore_list = []
109
113
  for memory in memories: # Iterate over the list
110
114
  for key, value in memory.items():
@@ -114,7 +118,7 @@ def embed_pubsub_chunk(data: dict):
114
118
  # check if vectorstore specific embedding is available
115
119
  embed_llm = value.get('llm')
116
120
  if embed_llm:
117
- embeddings = pick_embedding(embed_llm)
121
+ embeddings = pick_embedding(embed_llm, config=config)
118
122
  # check if read only
119
123
  read_only = value.get('read_only')
120
124
  if read_only:
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sunholo
3
- Version: 0.78.4
3
+ Version: 0.78.5
4
4
  Summary: Large Language Model DevOps - a package to help deploy LLMs to the Cloud.
5
5
  Home-page: https://github.com/sunholo-data/sunholo-py
6
- Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.78.4.tar.gz
6
+ Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.78.5.tar.gz
7
7
  Author: Holosun ApS
8
8
  Author-email: multivac@sunholo.com
9
9
  License: Apache License, Version 2.0
@@ -54,8 +54,8 @@ sunholo/cli/sun_rich.py,sha256=UpMqeJ0C8i0pkue1AHnnyyX0bFJ9zZeJ7HBR6yhuA8A,54
54
54
  sunholo/cli/swagger.py,sha256=absYKAU-7Yd2eiVNUY-g_WLl2zJfeRUNdWQ0oH8M_HM,1564
55
55
  sunholo/cli/vertex.py,sha256=8130YCarxHL1UC3aqblNmUwGZTXbkdL4Y_FOnZJsWiI,2056
56
56
  sunholo/components/__init__.py,sha256=IDoylb74zFKo6NIS3RQqUl0PDFBGVxM1dfUmO7OJ44U,176
57
- sunholo/components/llm.py,sha256=QTTpqUhfj7u9Ty9-E-XL8dpg4fp19z64FdRC1zbTHVo,10698
58
- sunholo/components/retriever.py,sha256=BFUw_6turT3CQJZWv_uXylmH5fHdb0gKfKJrQ_j6MGY,6533
57
+ sunholo/components/llm.py,sha256=XhSFuvthK35LDirX-zUbeLrLU8ccLSGxdJOOQovBGEM,11481
58
+ sunholo/components/retriever.py,sha256=F-wgZMpGJ8mGxJMAHA7HNgDwEhnvq1Pd6EGnTuBFlY8,6719
59
59
  sunholo/components/vectorstore.py,sha256=zUJ90L1S4IyxLB0JUWopeuwVjcsSqdhj1QreEfsJhsE,5548
60
60
  sunholo/database/__init__.py,sha256=Zz0Shcq-CtStf9rJGIYB_Ybzb8rY_Q9mfSj-nviM490,241
61
61
  sunholo/database/alloydb.py,sha256=d9W0pbZB0jTVIGF5OVaQ6kXHo-X3-6e9NpWNmV5e9UY,10464
@@ -75,7 +75,7 @@ sunholo/discovery_engine/chunker_handler.py,sha256=fDqvXeXr58s6TB75MMIGKKEg42T21
75
75
  sunholo/discovery_engine/create_new.py,sha256=7oZG78T6lW0EspRzlo7-qRyXFSuFxDn2dfSAVEaqlqY,978
76
76
  sunholo/discovery_engine/discovery_engine_client.py,sha256=YYsFeaW41l8jmWCruQnYxJGKEYBZ7dduTBDhdxI63hQ,17719
77
77
  sunholo/embedder/__init__.py,sha256=sI4N_CqgEVcrMDxXgxKp1FsfsB4FpjoXgPGkl4N_u4I,44
78
- sunholo/embedder/embed_chunk.py,sha256=8BJ90tR0_JbCcsVCzrtPdZn6sVys0OhXSxLszlve_ko,6819
78
+ sunholo/embedder/embed_chunk.py,sha256=FFr5pDvFCsWNS5JnTjuf1aCpg4Qlut83wqndneavnj8,6944
79
79
  sunholo/gcs/__init__.py,sha256=SZvbsMFDko40sIRHTHppA37IijvJTae54vrhooEF5-4,90
80
80
  sunholo/gcs/add_file.py,sha256=m-iQeYAmdXxy2EJ1uMmM3gx-eKbTcNpfsAyRd4sL_hA,7120
81
81
  sunholo/gcs/download_folder.py,sha256=mfntDA3Gl-7quMK9_eSTWvUOY1330jF--1cb62C0K1E,1607
@@ -132,9 +132,9 @@ sunholo/vertex/init.py,sha256=uyg76EqS39jWJ2gxMqXOLWP6MQ2hc81wFdwgG86ZoCM,2868
132
132
  sunholo/vertex/memory_tools.py,sha256=pomHrDKqvY8MZxfUqoEwhdlpCvSGP6KmFJMVKOimXjs,6842
133
133
  sunholo/vertex/safety.py,sha256=S9PgQT1O_BQAkcqauWncRJaydiP8Q_Jzmu9gxYfy1VA,2482
134
134
  sunholo/vertex/type_dict_to_json.py,sha256=uTzL4o9tJRao4u-gJOFcACgWGkBOtqACmb6ihvCErL8,4694
135
- sunholo-0.78.4.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
136
- sunholo-0.78.4.dist-info/METADATA,sha256=Ic8NbVh3Y5f0OZxVu18aB9QaI5PcIamIG3DDPzH6_6o,7348
137
- sunholo-0.78.4.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
138
- sunholo-0.78.4.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
139
- sunholo-0.78.4.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
140
- sunholo-0.78.4.dist-info/RECORD,,
135
+ sunholo-0.78.5.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
136
+ sunholo-0.78.5.dist-info/METADATA,sha256=seWt-IjHlh20kmVCIeYAftXTj7OPKwqVvdOZhZ4J7KM,7348
137
+ sunholo-0.78.5.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
138
+ sunholo-0.78.5.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
139
+ sunholo-0.78.5.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
140
+ sunholo-0.78.5.dist-info/RECORD,,