sunholo 0.118.0__py3-none-any.whl → 0.118.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -29,7 +29,7 @@ try:
29
29
  except ImportError:
30
30
  BlobServiceClient = None
31
31
 
32
- from ..types import Document
32
+ from ..langchain_types import Document
33
33
 
34
34
  from .splitter import chunk_doc_to_docs
35
35
  from .pdfs import split_pdf_to_pages
sunholo/chunker/pdfs.py CHANGED
@@ -14,7 +14,7 @@
14
14
  import os
15
15
  import pathlib
16
16
  from ..custom_logging import log
17
- from ..types import Document
17
+ from ..langchain_types import Document
18
18
 
19
19
  def split_pdf_to_pages(pdf_path, temp_dir):
20
20
 
@@ -2,7 +2,7 @@ from ..custom_logging import log
2
2
  from ..pubsub import PubSubManager
3
3
  from ..utils.parsers import contains_url, extract_urls
4
4
  from ..utils.gcp_project import get_gcp_project
5
- from ..types import Document
5
+ from ..langchain_types import Document
6
6
 
7
7
  def publish_if_urls(the_content, vector_name):
8
8
  """
@@ -1,10 +1,9 @@
1
- from typing import Any, Dict, Optional, TYPE_CHECKING, Union
2
1
  from dataclasses import dataclass, asdict
3
2
  import json
3
+ from typing import Dict, Any
4
4
 
5
- if TYPE_CHECKING:
6
- from langchain.schema import Document as LangchainDocument
7
-
5
+ # Note: Moved TYPE_CHECKING to only be used where needed
6
+ from typing import TYPE_CHECKING
8
7
 
9
8
  @dataclass
10
9
  class Document:
@@ -36,7 +35,8 @@ class Document:
36
35
  """Convert to JSON string - for compatibility with LangChain's Document."""
37
36
  return json.dumps(self.to_dict())
38
37
 
39
- def convert_to_langchain_doc(doc: Document) -> Union[Any, "LangchainDocument"]:
38
+ # Move the type checking import and annotation inside the function
39
+ def convert_to_langchain_doc(doc: Document) -> Any: # Remove Union and LangchainDocument from return type
40
40
  """Convert our Document to a LangChain Document.
41
41
 
42
42
  Returns Any when LangChain isn't available to avoid type errors.
File without changes
@@ -0,0 +1,157 @@
1
+
2
+ from sunholo.utils import ConfigManager
3
+ from sunholo.vertex import (
4
+ init_genai,
5
+ )
6
+
7
+ from tools.your_agent import get_quarto, quarto_content, QuartoProcessor
8
+
9
+ from my_log import log
10
+
11
+ init_genai()
12
+
13
+ # kwargs supports - image_uri, mime
14
+ def vac_stream(question: str, vector_name:str, chat_history=[], callback=None, **kwargs):
15
+
16
+ config=ConfigManager(vector_name)
17
+ processor = QuartoProcessor(config)
18
+
19
+ orchestrator = get_quarto(config, processor)
20
+ if not orchestrator:
21
+ msg = f"No quarto model could be configured for {vector_name}"
22
+ log.error(msg)
23
+ callback.on_llm_end(response=msg)
24
+ return {"answer": msg}
25
+
26
+ chat = orchestrator.start_chat()
27
+
28
+ guardrail = 0
29
+ guardrail_max = kwargs.get('max_steps', 10)
30
+ big_text = ""
31
+ usage_metadata = None
32
+ functions_called = []
33
+ result=None
34
+ last_responses=None
35
+ while guardrail < guardrail_max:
36
+
37
+ content = quarto_content(question, chat_history)
38
+ log.info(f"# Loop [{guardrail}] - {content=}")
39
+ response = chat.send_message(content, stream=True)
40
+ this_text = "" # reset for this loop
41
+ log.debug(f"[{guardrail}] {response}")
42
+
43
+ for chunk in response:
44
+ try:
45
+ log.debug(f"[{guardrail}] {chunk=}")
46
+ # Check if 'text' is an attribute of chunk and if it's a string
47
+ if hasattr(chunk, 'text') and isinstance(chunk.text, str):
48
+ token = chunk.text
49
+ else:
50
+ function_names = []
51
+ try:
52
+ for part in chunk.candidates[0].content.parts:
53
+ if fn := part.function_call:
54
+ params = {key: val for key, val in fn.args.items()}
55
+ func_args = ",".join(f"{key}={value}" for key, value in params.items())
56
+ log.info(f"Found function call: {fn.name}({func_args})")
57
+ function_names.append(f"{fn.name}({func_args})")
58
+ functions_called.append(f"{fn.name}({func_args})")
59
+ except Exception as err:
60
+ log.warning(f"{str(err)}")
61
+
62
+ token = "" # Handle the case where 'text' is not available
63
+
64
+ if processor.last_api_requests_and_responses:
65
+ if processor.last_api_requests_and_responses != last_responses:
66
+ last_responses = processor.last_api_requests_and_responses
67
+ for last_response in last_responses:
68
+ result=None # reset for this function response
69
+ if last_response:
70
+ log.info(f"[{guardrail}] {last_response=}")
71
+
72
+ # Convert the last_response to a string by extracting relevant information
73
+ function_name = last_response[0]
74
+ arguments = last_response[1]
75
+ result = last_response[2]
76
+ func_args = ",".join(f"{key}={value}" for key, value in arguments.items())
77
+
78
+ if f"{function_name}({func_args})" not in function_names:
79
+ log.warning(f"skipping {function_name}({func_args}) as not in execution list")
80
+ continue
81
+
82
+ token = f"\n## Loop [{guardrail}] Function call: {function_name}({func_args}):\n"
83
+
84
+ if function_name == "decide_to_go_on":
85
+ token += f"# go_on={result}\n"
86
+ else:
87
+ log.info("Adding result for: {function_name}")
88
+ token += result
89
+
90
+ callback.on_llm_new_token(token=token)
91
+ big_text += token
92
+ this_text += token
93
+
94
+ if not usage_metadata:
95
+ chunk_metadata = chunk.usage_metadata
96
+ usage_metadata = {
97
+ "prompt_token_count": chunk_metadata.prompt_token_count,
98
+ "candidates_token_count": chunk_metadata.candidates_token_count,
99
+ "total_token_count": chunk_metadata.total_token_count,
100
+ }
101
+
102
+ except ValueError as err:
103
+ callback.on_llm_new_token(token=str(err))
104
+
105
+ # change response to one with executed functions
106
+ response = processor.process_funcs(response)
107
+
108
+ if this_text:
109
+ chat_history.append(("<waiting for ai>", this_text))
110
+ log.info(f"[{guardrail}] Updated chat_history: {chat_history}")
111
+
112
+ go_on_check = processor.check_function_result("decide_to_go_on", False)
113
+ if go_on_check:
114
+ log.info("Breaking agent loop")
115
+ break
116
+
117
+ guardrail += 1
118
+ if guardrail > guardrail_max:
119
+ log.warning("Guardrail kicked in, more than 10 loops")
120
+ break
121
+
122
+ callback.on_llm_end(response=big_text)
123
+ log.info(f"orchestrator.response: {big_text}")
124
+
125
+ metadata = {
126
+ "question:": question,
127
+ "chat_history": chat_history,
128
+ "usage_metadata": usage_metadata,
129
+ "functions_called": functions_called
130
+ }
131
+
132
+ return {"answer": big_text or "No answer was given", "metadata": metadata}
133
+
134
+
135
+ def vac(question: str, vector_name: str, chat_history=[], **kwargs):
136
+ # Create a callback that does nothing for streaming if you don't want intermediate outputs
137
+ class NoOpCallback:
138
+ def on_llm_new_token(self, token):
139
+ pass
140
+ def on_llm_end(self, response):
141
+ pass
142
+
143
+ # Use the NoOpCallback for non-streaming behavior
144
+ callback = NoOpCallback()
145
+
146
+ # Pass all arguments to vac_stream and use the final return
147
+ result = vac_stream(
148
+ question=question,
149
+ vector_name=vector_name,
150
+ chat_history=chat_history,
151
+ callback=callback,
152
+ **kwargs
153
+ )
154
+
155
+ return result
156
+
157
+
@@ -0,0 +1,16 @@
1
+ import os
2
+
3
+ from sunholo.agents import VACRoutes, create_app
4
+
5
+ from vac_service import vac_stream, vac
6
+
7
+ app = create_app(__name__)
8
+
9
+ # Register the Q&A routes with the specific interpreter functions
10
+ # creates /vac/<vector_name> and /vac/streaming/<vector_name>
11
+ VACRoutes(app, vac_stream, vac)
12
+
13
+ if __name__ == "__main__":
14
+ import os
15
+ app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8080)), debug=True)
16
+
@@ -0,0 +1,3 @@
1
+ from sunholo.custom_logging import setup_logging
2
+
3
+ log = setup_logging("sunholo")
File without changes
@@ -0,0 +1,78 @@
1
+ from sunholo.genai import GenAIFunctionProcessor
2
+ from sunholo.utils import ConfigManager
3
+
4
+ from my_log import log
5
+
6
+
7
+ class QuartoProcessor(GenAIFunctionProcessor):
8
+ def construct_tools(self) -> dict:
9
+ tools = self.config.vacConfig("tools")
10
+ quarto_config = tools.get("quarto")
11
+
12
+ def decide_to_go_on(go_on: bool):
13
+ """
14
+ Examine the chat history. If the answer to the user's question has been answered, then go_on=False.
15
+ If the chat history indicates the answer is still being looked for, then go_on=True.
16
+ If there is no chat history, then go_on=True.
17
+ If there is an error that can't be corrected or solved by you, then go_on=False.
18
+ If there is an error but you think you can solve it by correcting your function arguments (such as an incorrect source), then go_on=True
19
+ If you want to ask the user a question or for some more feedback, then go_on=False.
20
+
21
+ Args:
22
+ go_on: boolean Whether to continue searching or fetching from the AlloyDB database
23
+
24
+ Returns:
25
+ boolean: True to carry on, False to continue
26
+ """
27
+ return go_on
28
+
29
+ def quarto_render() -> dict:
30
+ """
31
+ ...
32
+
33
+ Args:
34
+
35
+
36
+ Returns:
37
+
38
+ """
39
+ pass
40
+
41
+ return {
42
+ "quarto_render": quarto_render,
43
+ "decide_to_go_on": decide_to_go_on
44
+ }
45
+
46
+ def quarto_content(question: str, chat_history=[]) -> str:
47
+ prompt_config = ConfigManager("quarto")
48
+ alloydb_template = prompt_config.promptConfig("quarto_template")
49
+
50
+ conversation_text = ""
51
+ for human, ai in chat_history:
52
+ conversation_text += f"Human: {human}\nAI: {ai}\n"
53
+
54
+ return alloydb_template.format(the_question=question, chat_history=conversation_text[-10000:])
55
+
56
+
57
+ def get_quarto(config:ConfigManager, processor:QuartoProcessor):
58
+
59
+ tools = config.vacConfig('tools')
60
+
61
+ if tools and tools.get('quarto'):
62
+ model_name = None
63
+ if config.vacConfig('llm') != "vertex":
64
+ model_name = 'gemini-1.5-flash'
65
+ alloydb_model = processor.get_model(
66
+ system_instruction=(
67
+ "You are a helpful Quarto agent that helps users create and render Quarto documents. "
68
+ "When you think the answer has been given to the satisfaction of the user, or you think no answer is possible, or you need user confirmation or input, you MUST use the decide_to_go_on(go_on=False) function"
69
+ "When you want to ask the question to the user, mark the go_on=False in the function"
70
+ ),
71
+ model_name=model_name
72
+ )
73
+
74
+ if alloydb_model:
75
+ return alloydb_model
76
+
77
+ log.error("Error initializing quarto model")
78
+ return None
@@ -0,0 +1,73 @@
1
+ from my_log import log
2
+ from sunholo.utils import ConfigManager
3
+
4
+ # VAC specific imports
5
+
6
+ #TODO: Developer to update to their own implementation
7
+ from sunholo.vertex import init_vertex, get_vertex_memories
8
+ from vertexai.preview.generative_models import GenerativeModel
9
+
10
+ #TODO: change this to a streaming VAC function
11
+ def vac_stream(question: str, vector_name, chat_history=[], callback=None, **kwargs):
12
+
13
+ rag_model = create_model(vector_name)
14
+
15
+ # streaming model calls
16
+ response = rag_model.generate_content(question, stream=True)
17
+ for chunk in response:
18
+ try:
19
+ callback.on_llm_new_token(token=chunk.text)
20
+ except ValueError as err:
21
+ callback.on_llm_new_token(token=str(err))
22
+
23
+ callback.on_llm_end(response=response)
24
+ log.info(f"rag_model.response: {response}")
25
+
26
+ metadata = {
27
+ "chat_history": chat_history
28
+ }
29
+
30
+ return {"answer": response.text, "metadata": metadata}
31
+
32
+
33
+
34
+ #TODO: change this to a batch VAC function
35
+ def vac(question: str, vector_name: str, chat_history=[], **kwargs):
36
+ # Create a callback that does nothing for streaming if you don't want intermediate outputs
37
+ class NoOpCallback:
38
+ def on_llm_new_token(self, token):
39
+ pass
40
+ def on_llm_end(self, response):
41
+ pass
42
+
43
+ # Use the NoOpCallback for non-streaming behavior
44
+ callback = NoOpCallback()
45
+
46
+ # Pass all arguments to vac_stream and use the final return
47
+ result = vac_stream(
48
+ question=question,
49
+ vector_name=vector_name,
50
+ chat_history=chat_history,
51
+ callback=callback,
52
+ **kwargs
53
+ )
54
+
55
+ return result
56
+
57
+
58
+ # TODO: common model setup to both batching and streaming
59
+ def create_model(vac):
60
+ config = ConfigManager(vac)
61
+
62
+ init_vertex()
63
+ corpus_tools = get_vertex_memories(config)
64
+
65
+ model = config.vacConfig("model")
66
+
67
+ # Create a gemini-pro model instance
68
+ # https://ai.google.dev/api/python/google/generativeai/GenerativeModel#streaming
69
+ rag_model = GenerativeModel(
70
+ model_name=model or "gemini-1.5-flash", tools=[corpus_tools]
71
+ )
72
+
73
+ return rag_model
File without changes
@@ -0,0 +1,17 @@
1
+ import os
2
+
3
+ from sunholo.agents import VACRoutes, create_app
4
+
5
+ from vac_service import vac_stream
6
+
7
+ app = create_app(__name__)
8
+
9
+ # Register the Q&A routes with the specific interpreter functions
10
+ # creates endpoints /vac/streaming/<vector_name> and /vac/<vector_name> etc.
11
+ VACRoutes(app, vac_stream)
12
+
13
+ # start via `python app.py`
14
+ if __name__ == "__main__":
15
+ import os
16
+ app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8080)), debug=True)
17
+
@@ -0,0 +1,3 @@
1
+ from sunholo.custom_logging import setup_logging
2
+
3
+ log = setup_logging("sunholo")
@@ -0,0 +1,71 @@
1
+ from my_log import log
2
+ from sunholo.utils import ConfigManager
3
+
4
+ # VAC specific imports
5
+
6
+ #TODO: Developer to update to their own implementation
7
+ from sunholo.genai import init_genai, genai_safety
8
+ import google.generativeai as genai
9
+
10
+ #TODO: change this to a streaming VAC function for your use case
11
+ def vac_stream(question: str, vector_name:str, chat_history=[], callback=None, **kwargs):
12
+
13
+ model = create_model(vector_name)
14
+
15
+ # create chat history for genai model
16
+ # https://ai.google.dev/api/generate-content
17
+ contents = []
18
+ for human, ai in chat_history:
19
+ if human:
20
+ contents.append({"role":"user", "parts":[{"text": human}]})
21
+
22
+ if ai:
23
+ contents.append({"role":"model", "parts":[{"text": ai}]})
24
+
25
+
26
+ # the user question at the end of contents list
27
+ contents.append({"role":"user", "parts":[{"text": question}]})
28
+
29
+ log.info(contents)
30
+ # streaming model calls
31
+ response = model.generate_content(contents, stream=True)
32
+ chunks=""
33
+ for chunk in response:
34
+ if chunk and chunk.text:
35
+ try:
36
+ callback.on_llm_new_token(token=chunk.text)
37
+ chunks += chunk.text
38
+ except ValueError as err:
39
+ callback.on_llm_new_token(token=str(err))
40
+
41
+ # stream has finished, full response is also returned
42
+ callback.on_llm_end(response=response)
43
+ log.info(f"model.response: {response}")
44
+
45
+ metadata = {
46
+ "question": question,
47
+ "vector_name": vector_name,
48
+ "chat_history": chat_history
49
+ }
50
+
51
+ # to not return this dict at the end of the stream, pass stream_only: true in request
52
+ return {"answer": chunks, "metadata": metadata}
53
+
54
+
55
+ # TODO: example model setup function
56
+ def create_model(vac):
57
+ config = ConfigManager(vac)
58
+
59
+ init_genai()
60
+
61
+ # get a setting from the config vacConfig object (returns None if not found)
62
+ model = config.vacConfig("model")
63
+
64
+ # Create a gemini-flash model instance
65
+ # https://ai.google.dev/api/python/google/generativeai/GenerativeModel#streaming
66
+ genai_model = genai.GenerativeModel(
67
+ model_name=model or "gemini-1.5-flash",
68
+ safety_settings=genai_safety()
69
+ )
70
+
71
+ return genai_model
File without changes
@@ -0,0 +1,49 @@
1
+ import os
2
+ import traceback
3
+
4
+ # app.py
5
+ from fastapi import FastAPI, Request
6
+ from fastapi.responses import JSONResponse
7
+
8
+ from my_log import log
9
+
10
+ app = FastAPI()
11
+
12
+ @app.get("/")
13
+ def home():
14
+ """Simple endpoint to indicate that the app is running."""
15
+ return {"message": "Hello, service!"}
16
+
17
+ @app.post("/system_service/<param>")
18
+ async def system_service(request: Request):
19
+ """
20
+ Pubsub message parsed and sent to Langfuse ID server
21
+ """
22
+ data = await request.json()
23
+
24
+ try:
25
+ #TODO: add stuff here
26
+ meta = ""
27
+ return {"status": "success", "message": meta}
28
+ except Exception as err:
29
+ log.error(f'EVAL_ERROR: Error when sending {data} to /pubsub_to_langfuse: {str(err)} traceback: {traceback.format_exc()}')
30
+ return JSONResponse(status_code=200, content={"status": "error", "message": f'{str(err)} traceback: {traceback.format_exc()}'})
31
+
32
+ @app.post("/test_endpoint")
33
+ async def test_me(request: Request):
34
+ """
35
+ Endpoint to send trace_ids directly for evals then sent to Langfuse ID server
36
+ """
37
+ data = await request.json()
38
+
39
+ try:
40
+ #TODO: do something here
41
+ meta = ""
42
+ return {"status": "success", "message": meta}
43
+ except Exception as err:
44
+ log.error(f'EVAL_ERROR: Error when sending {data} to /direct_evals: {str(err)} traceback: {traceback.format_exc()}')
45
+ return JSONResponse(status_code=500, content={"status": "error", "message": f'{str(err)} traceback: {traceback.format_exc()}'})
46
+
47
+ if __name__ == "__main__":
48
+ import uvicorn
49
+ uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("PORT", 8080)), debug=True)
@@ -0,0 +1,3 @@
1
+ from sunholo.custom_logging import setup_logging
2
+
3
+ log = setup_logging("system")
@@ -1,12 +1,11 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: sunholo
3
- Version: 0.118.0
3
+ Version: 0.118.1
4
4
  Summary: Large Language Model DevOps - a package to help deploy LLMs to the Cloud.
5
- Home-page: https://github.com/sunholo-data/sunholo-py
6
- Download-URL: https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.118.0.tar.gz
7
- Author: Holosun ApS
8
- Author-email: multivac@sunholo.com
5
+ Author-email: Holosun ApS <multivac@sunholo.com>
9
6
  License: Apache License, Version 2.0
7
+ Project-URL: Homepage, https://github.com/sunholo-data/sunholo-py
8
+ Project-URL: Download, https://github.com/sunholo-data/sunholo-py/archive/refs/tags/v0.118.0.tar.gz
10
9
  Keywords: llms,devops,google_cloud_platform
11
10
  Classifier: Development Status :: 3 - Alpha
12
11
  Classifier: Intended Audience :: Developers
@@ -16,6 +15,7 @@ Classifier: Programming Language :: Python :: 3
16
15
  Classifier: Programming Language :: Python :: 3.10
17
16
  Classifier: Programming Language :: Python :: 3.11
18
17
  Classifier: Programming Language :: Python :: 3.12
18
+ Requires-Python: >=3.10
19
19
  Description-Content-Type: text/markdown
20
20
  License-File: LICENSE.txt
21
21
  Requires-Dist: aiohttp
@@ -24,6 +24,9 @@ Requires-Dist: pydantic
24
24
  Requires-Dist: requests
25
25
  Requires-Dist: ruamel.yaml
26
26
  Requires-Dist: tenacity
27
+ Provides-Extra: test
28
+ Requires-Dist: pytest; extra == "test"
29
+ Requires-Dist: pytest-cov; extra == "test"
27
30
  Provides-Extra: all
28
31
  Requires-Dist: aiohttp; extra == "all"
29
32
  Requires-Dist: anthropic[vertex]; extra == "all"
@@ -169,18 +172,6 @@ Requires-Dist: numpy; extra == "tts"
169
172
  Requires-Dist: sounddevice; extra == "tts"
170
173
  Provides-Extra: video
171
174
  Requires-Dist: opencv-python; extra == "video"
172
- Dynamic: author
173
- Dynamic: author-email
174
- Dynamic: classifier
175
- Dynamic: description
176
- Dynamic: description-content-type
177
- Dynamic: download-url
178
- Dynamic: home-page
179
- Dynamic: keywords
180
- Dynamic: license
181
- Dynamic: provides-extra
182
- Dynamic: requires-dist
183
- Dynamic: summary
184
175
 
185
176
  [![PyPi Version](https://img.shields.io/pypi/v/sunholo.svg)](https://pypi.python.org/pypi/sunholo/)
186
177
 
@@ -1,6 +1,6 @@
1
1
  sunholo/__init__.py,sha256=Ap2yX2ITBVt_vkloYipUM8OwW14g6aor2NX7LWp0-mI,1133
2
2
  sunholo/custom_logging.py,sha256=YfIN1oP3dOEkkYkyRBU8BGS3uJFGwUDsFCl8mIVbwvE,12225
3
- sunholo/types.py,sha256=xdNNb4bR7O4jY9NTNzeeiX5VYjVxCGzYWy8hUXemxr0,1764
3
+ sunholo/langchain_types.py,sha256=uZ4zvgej_f7pLqjtu4YP7qMC_eZD5ym_5x4pyvA1Ih4,1834
4
4
  sunholo/agents/__init__.py,sha256=X2I3pPkGeKWjc3d0QgSpkTyqD8J8JtrEWqwrumf1MMc,391
5
5
  sunholo/agents/chat_history.py,sha256=Gph_CdlP2otYnNdR1q1Umyyyvcad2F6K3LxU5yBQ9l0,5387
6
6
  sunholo/agents/dispatch_to_qa.py,sha256=AwLS41oK6iS8xre-HuWjS4jj9dvU-evWI58EYfG65fg,8879
@@ -36,10 +36,10 @@ sunholo/chunker/doc_handling.py,sha256=t_lDazHfJbs4Q2Ruq2MvBBeJRfsjjQkzMxKuX8qQK
36
36
  sunholo/chunker/encode_metadata.py,sha256=hxxd9KU35Xi0Z_EL8kt_oD66pKfBLhEjBImC16ew-Eo,1919
37
37
  sunholo/chunker/images.py,sha256=id2PBu6XyGEOtgafq2v0c9_O6kxaC_pYFMnbsIitkSg,1868
38
38
  sunholo/chunker/loaders.py,sha256=5NXrMxV-WdbFpxeLhFzccw0_zhf1UQ7yKFFeaMkc9Bc,11105
39
- sunholo/chunker/message_data.py,sha256=iPrYUQRjjCGoVDvnqGEudkXi1PdbOwxey6v08_-GF2g,10826
40
- sunholo/chunker/pdfs.py,sha256=DFZdsvAQt8qMvg7d-3dj29ULgKJZYXy4qGdORLmE790,2467
39
+ sunholo/chunker/message_data.py,sha256=bpb8QWQttqazm5lr7fTFJ5JDwf-P0SQ5cOIf6NikNyI,10836
40
+ sunholo/chunker/pdfs.py,sha256=xwbuMJrbypcyPXfZ8tiUidWeMr80C2NhfTC1mwa8SHY,2477
41
41
  sunholo/chunker/process_chunker_data.py,sha256=uO-YOEHIjAOy0ZMJ0vea9OMNsQBISHfhbtgoyuHiP6s,3598
42
- sunholo/chunker/publish.py,sha256=Y-4w59iVGoXCKaun9jMWVR7Cqprb9mTZGiaVk5y_zrY,2933
42
+ sunholo/chunker/publish.py,sha256=IDud-NhRcEZFv9GkyWJFRKwfptIU052kSPKEx8AYW68,2943
43
43
  sunholo/chunker/pubsub.py,sha256=48bhuAcszN7LGe3-ksPSLHHhq0uKxiXOrizck5qpcP0,1012
44
44
  sunholo/chunker/splitter.py,sha256=RfekLPkjhCcNd1PFXIj_FxusJMJ8_3cyWl7bsYvtQ0g,7068
45
45
  sunholo/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -125,6 +125,20 @@ sunholo/streaming/stream_lookup.py,sha256=hYg1DbdSE_QNJ8ZB-ynXJlWgvFjrGvwoUsGJu_
125
125
  sunholo/streaming/streaming.py,sha256=gSxLuwK-5-t5D1AjcHf838BY-L4jvdkdn_xePl-DK3o,16635
126
126
  sunholo/summarise/__init__.py,sha256=MZk3dblUMODcPb1crq4v-Z508NrFIpkSWNf9FIO8BcU,38
127
127
  sunholo/summarise/summarise.py,sha256=UnycBVLLEXK1HitCOG2zW3XIyxMrw47xoVf6e2OC9A0,4150
128
+ sunholo/templates/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
129
+ sunholo/templates/agent/agent_service.py,sha256=s2PZZNo287StnkiNIb7S05TWgx0oSJgcAtTWvKz1H2E,6184
130
+ sunholo/templates/agent/app.py,sha256=HK9xXjAoHzuYJQo43ddl1NjRAc7GY4tq6VMxVVUaEyA,413
131
+ sunholo/templates/agent/my_log.py,sha256=otLxNHIGsm4-fsoeagsykoqvBsqM3RlwCgEhsbdtfHA,81
132
+ sunholo/templates/agent/vac_service.py,sha256=FFnZrg0ohG2J4mfwbSIh8v4pgTNNJhN3Ng0jQtrg51c,2109
133
+ sunholo/templates/agent/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
134
+ sunholo/templates/agent/tools/your_agent.py,sha256=vUeAnuIRK3d8Z4TcYQ80bBtPzDsXsUwl_hldt_lw4Uw,2920
135
+ sunholo/templates/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
136
+ sunholo/templates/project/app.py,sha256=skoIQRyTvWUT689RVCMYFDDuYSwkEw-6EsbGTIFLCA4,446
137
+ sunholo/templates/project/my_log.py,sha256=otLxNHIGsm4-fsoeagsykoqvBsqM3RlwCgEhsbdtfHA,81
138
+ sunholo/templates/project/vac_service.py,sha256=Xdfzmcidm4P-IlkjvkQZ47rC6ERRevSI1TchimjRDVg,2213
139
+ sunholo/templates/system_services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
140
+ sunholo/templates/system_services/app.py,sha256=sJvYP1FC8ufWffr1pqKekVWeZiLMKq5HBWaRKxq8f64,1621
141
+ sunholo/templates/system_services/my_log.py,sha256=BkGSGkwyqIjU6W44BfsOhZbNs2XfzEvmKbfryfI3tBY,80
128
142
  sunholo/terraform/__init__.py,sha256=yixxEltc3n9UpZaVi05GlgS-YRq_DVGjUc37I9ajeP4,76
129
143
  sunholo/terraform/tfvars_editor.py,sha256=-TBBWbALYb5HLFYwD2s70Kp27ys6fzIyreBFOT5kqqY,13142
130
144
  sunholo/tools/__init__.py,sha256=5NuYpwwTX81qGUWvgwfItoSLXteNnp7KjgD7IPZUFjI,53
@@ -150,9 +164,9 @@ sunholo/vertex/init.py,sha256=1OQwcPBKZYBTDPdyU7IM4X4OmiXLdsNV30C-fee2scQ,2875
150
164
  sunholo/vertex/memory_tools.py,sha256=tBZxqVZ4InTmdBvLlOYwoSEWu4-kGquc-gxDwZCC4FA,7667
151
165
  sunholo/vertex/safety.py,sha256=S9PgQT1O_BQAkcqauWncRJaydiP8Q_Jzmu9gxYfy1VA,2482
152
166
  sunholo/vertex/type_dict_to_json.py,sha256=uTzL4o9tJRao4u-gJOFcACgWGkBOtqACmb6ihvCErL8,4694
153
- sunholo-0.118.0.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
154
- sunholo-0.118.0.dist-info/METADATA,sha256=b8HJvu2GA9AJqTfFcqKBRCw8ArVt71zvQwDUAKuKK3A,9752
155
- sunholo-0.118.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
156
- sunholo-0.118.0.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
157
- sunholo-0.118.0.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
158
- sunholo-0.118.0.dist-info/RECORD,,
167
+ sunholo-0.118.1.dist-info/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
168
+ sunholo-0.118.1.dist-info/METADATA,sha256=_1l01je_MwmiGCriNA97uLBd1TJB04gcfXOUljF3T2Y,9641
169
+ sunholo-0.118.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
170
+ sunholo-0.118.1.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
171
+ sunholo-0.118.1.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
172
+ sunholo-0.118.1.dist-info/RECORD,,