ws-bom-robot-app 0.0.60__py3-none-any.whl → 0.0.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. ws_bom_robot_app/config.py +2 -3
  2. ws_bom_robot_app/cron_manager.py +2 -2
  3. ws_bom_robot_app/llm/agent_description.py +123 -123
  4. ws_bom_robot_app/llm/agent_handler.py +177 -177
  5. ws_bom_robot_app/llm/agent_lcel.py +45 -46
  6. ws_bom_robot_app/llm/api.py +12 -0
  7. ws_bom_robot_app/llm/defaut_prompt.py +15 -15
  8. ws_bom_robot_app/llm/feedbacks/feedback_manager.py +66 -74
  9. ws_bom_robot_app/llm/main.py +134 -134
  10. ws_bom_robot_app/llm/models/api.py +6 -0
  11. ws_bom_robot_app/llm/models/feedback.py +30 -30
  12. ws_bom_robot_app/llm/nebuly_handler.py +182 -173
  13. ws_bom_robot_app/llm/settings.py +4 -4
  14. ws_bom_robot_app/llm/tools/models/main.py +4 -0
  15. ws_bom_robot_app/llm/tools/tool_builder.py +65 -23
  16. ws_bom_robot_app/llm/tools/tool_manager.py +312 -228
  17. ws_bom_robot_app/llm/tools/utils.py +41 -41
  18. ws_bom_robot_app/llm/utils/agent.py +34 -34
  19. ws_bom_robot_app/llm/utils/cms.py +77 -0
  20. ws_bom_robot_app/llm/utils/download.py +79 -79
  21. ws_bom_robot_app/llm/utils/print.py +29 -29
  22. ws_bom_robot_app/llm/vector_store/generator.py +137 -137
  23. ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
  24. ws_bom_robot_app/task_manager.py +3 -1
  25. ws_bom_robot_app/util.py +59 -20
  26. {ws_bom_robot_app-0.0.60.dist-info → ws_bom_robot_app-0.0.62.dist-info}/METADATA +17 -17
  27. {ws_bom_robot_app-0.0.60.dist-info → ws_bom_robot_app-0.0.62.dist-info}/RECORD +29 -28
  28. {ws_bom_robot_app-0.0.60.dist-info → ws_bom_robot_app-0.0.62.dist-info}/WHEEL +0 -0
  29. {ws_bom_robot_app-0.0.60.dist-info → ws_bom_robot_app-0.0.62.dist-info}/top_level.txt +0 -0
@@ -1,34 +1,34 @@
1
- import os
2
- from langchain_core.embeddings import Embeddings
3
- from ws_bom_robot_app.llm.models.api import LlmRules
4
- from ws_bom_robot_app.llm.utils.print import HiddenPrints
5
- from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
6
- import warnings
7
-
8
- async def get_rules(embeddings: Embeddings, rules: LlmRules, input: str | list) -> str:
9
- with warnings.catch_warnings():
10
- warnings.simplefilter("ignore", category=Warning)
11
- # check if the input is multimodal and convert it to text
12
- if isinstance(input, list):
13
- input = " ".join(obj.get("text", "") for obj in input)
14
- # check if the input is empty or the rules are not provided
15
- if any([input=="",rules is None,rules and rules.vector_db == "",rules and not os.path.exists(rules.vector_db)]):
16
- return ""
17
- # get the rules from the vector db and return prompt with rules
18
- rules_prompt = ""
19
- rules_doc = await VectorDbManager.get_strategy(rules.vector_type).invoke(
20
- embeddings,
21
- rules.vector_db,
22
- input,
23
- search_type="similarity_score_threshold",
24
- search_kwargs={
25
- "score_threshold": rules.threshold,
26
- "k": 500,
27
- "fetch_k": 500,
28
- },
29
- source = None) #type: ignore
30
- if len(rules_doc) > 0:
31
- rules_prompt = "\nFollow this rules: \n RULES: \n"
32
- for rule_doc in rules_doc:
33
- rules_prompt += "- " + rule_doc.page_content + "\n"
34
- return rules_prompt
1
+ import os
2
+ from langchain_core.embeddings import Embeddings
3
+ from ws_bom_robot_app.llm.models.api import LlmRules
4
+ from ws_bom_robot_app.llm.utils.print import HiddenPrints
5
+ from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
6
+ import warnings
7
+
8
+ async def get_rules(embeddings: Embeddings, rules: LlmRules, query: str | list) -> str:
9
+ with warnings.catch_warnings():
10
+ warnings.simplefilter("ignore", category=Warning)
11
+ # check if the input is multimodal and convert it to text
12
+ if isinstance(query, list):
13
+ query = " ".join(obj.get("text", "") for obj in query)
14
+ # check if the input is empty or the rules are not provided
15
+ if any([query=="",rules is None,rules and rules.vector_db == "",rules and not os.path.exists(rules.vector_db)]):
16
+ return ""
17
+ # get the rules from the vector db and return prompt with rules
18
+ rules_prompt = ""
19
+ rules_doc = await VectorDbManager.get_strategy(rules.vector_type).invoke(
20
+ embeddings,
21
+ rules.vector_db,
22
+ query,
23
+ search_type="similarity_score_threshold",
24
+ search_kwargs={
25
+ "score_threshold": rules.threshold,
26
+ "k": 500,
27
+ "fetch_k": 500,
28
+ },
29
+ source = None) #type: ignore
30
+ if len(rules_doc) > 0:
31
+ rules_prompt = "\nFollow this rules: \n RULES: \n"
32
+ for rule_doc in rules_doc:
33
+ rules_prompt += "- " + rule_doc.page_content + "\n"
34
+ return rules_prompt
@@ -0,0 +1,77 @@
1
+ import logging, aiohttp
2
+ from typing import List, Optional
3
+
4
+ from pydantic import AliasChoices, BaseModel, ConfigDict, Field
5
+ from ws_bom_robot_app.llm.models.api import LlmAppTool
6
+ from ws_bom_robot_app.util import cache_with_ttl
7
+
8
+ class CmsAppCredential(BaseModel):
9
+ app_key: str = Field(..., description="The app key for the credential", validation_alias=AliasChoices("appKey","app_key"))
10
+ api_key: str = Field(..., description="The api key for the credential", validation_alias=AliasChoices("apiKey","api_key"))
11
+ model_config = ConfigDict(extra='ignore')
12
+ class CmsApp(BaseModel):
13
+ id: str = Field(..., description="Unique identifier for the app")
14
+ name: str = Field(..., description="Name of the app")
15
+ credentials: CmsAppCredential = None
16
+ app_tools: Optional[List[LlmAppTool]] = Field([], validation_alias=AliasChoices("appTools","app_tools"))
17
+ model_config = ConfigDict(extra='ignore')
18
+
19
+ @cache_with_ttl(600) # Cache for 10 minutes
20
+ async def get_apps() -> list[CmsApp]:
21
+ import json, os
22
+ from ws_bom_robot_app.config import config
23
+ class DictObject(object):
24
+ def __init__(self, dict_):
25
+ self.__dict__.update(dict_)
26
+ def __repr__(self):
27
+ return json.dumps(self.__dict__)
28
+ @classmethod
29
+ def from_dict(cls, d):
30
+ return json.loads(json.dumps(d), object_hook=DictObject)
31
+ def __attr(obj, *attrs, default=None):
32
+ for attr in attrs:
33
+ obj = getattr(obj, attr, default)
34
+ if obj is None:
35
+ break
36
+ return obj
37
+ host = config.robot_cms_host
38
+ if host:
39
+ url = f"{host}/api/llmApp?depth=1&pagination=false"
40
+ auth = config.robot_cms_auth
41
+ headers = {"Authorization": auth} if auth else {}
42
+ async with aiohttp.ClientSession() as session:
43
+ async with session.get(url, headers=headers) as response:
44
+ if response.status == 200:
45
+ _apps=[]
46
+ cms_apps = await response.json()
47
+ for cms_app in cms_apps:
48
+ if __attr(cms_app,"isActive",default=True) == True:
49
+ _cms_app_dict = DictObject.from_dict(cms_app)
50
+ _app: CmsApp = CmsApp(
51
+ id=_cms_app_dict.id,
52
+ name=_cms_app_dict.name,
53
+ credentials=CmsAppCredential(app_key=_cms_app_dict.settings.credentials.appKey,api_key=_cms_app_dict.settings.credentials.apiKey),
54
+ app_tools=[LlmAppTool(**tool) for tool in cms_app.get('settings').get('appTools',[])]
55
+ )
56
+ if _app.app_tools:
57
+ for tool in _app.app_tools:
58
+ _knowledgeBase = tool.knowledgeBase
59
+ tool.vector_db = _knowledgeBase.get('vectorDbFile').get('filename') if _knowledgeBase.get('vectorDbFile') else None
60
+ tool.vector_type = _knowledgeBase.get('vectorDbType') if _knowledgeBase.get('vectorDbType') else 'faiss'
61
+ del tool.knowledgeBase
62
+ _apps.append(_app)
63
+ return _apps
64
+ else:
65
+ logging.error(f"Error fetching cms apps: {response.status}")
66
+ else:
67
+ logging.error("robot_cms_host environment variable is not set.")
68
+ return []
69
+
70
+ async def get_app_by_id(app_id: str) -> CmsApp | None:
71
+ apps = await get_apps()
72
+ app = next((a for a in apps if a.id == app_id), None)
73
+ if app:
74
+ return app
75
+ else:
76
+ logging.error(f"App with id {app_id} not found.")
77
+ return None
@@ -1,79 +1,79 @@
1
- from typing import List,Optional
2
- import os, logging, aiohttp, asyncio
3
- from tqdm.asyncio import tqdm
4
-
5
- async def download_files(urls: List[str], destination_folder: str, authorization: str = None):
6
- tasks = [download_file(file, os.path.join(destination_folder, os.path.basename(file)), authorization=authorization) for file in urls]
7
- results = await asyncio.gather(*tasks, return_exceptions=False)
8
- for i, result in enumerate(results):
9
- if not result:
10
- raise Exception(f"Download failed for file: {urls[i]}")
11
-
12
- async def download_file(url: str, destination: str, chunk_size: int = 8192, authorization: str = None) -> Optional[str]:
13
- """
14
- Downloads a file from a given URL to a destination path asynchronously.
15
-
16
- Args:
17
- url: The URL of the file to download
18
- destination: The local path where the file should be saved
19
- chunk_size: Size of chunks to download (default: 8192 bytes)
20
-
21
- Returns:
22
- str: Path to the downloaded file if successful, None otherwise
23
-
24
- Raises:
25
- Various exceptions are caught and logged
26
- """
27
- try:
28
- # Ensure the destination directory exists
29
- os.makedirs(os.path.dirname(os.path.abspath(destination)), exist_ok=True)
30
-
31
- async with aiohttp.ClientSession() as session:
32
- if authorization:
33
- headers = {'Authorization': authorization}
34
- session.headers.update(headers)
35
- async with session.get(url) as response:
36
- # Check if the request was successful
37
- if response.status != 200:
38
- logging.error(f"Failed to download file. Status code: {response.status}")
39
- return None
40
-
41
- # Get the total file size if available
42
- total_size = int(response.headers.get('content-length', 0))
43
- # Open the destination file and write chunks
44
- with open(destination, 'wb') as f:
45
- with tqdm(
46
- total=total_size,
47
- desc="Downloading",
48
- unit='B',
49
- unit_scale=True,
50
- unit_divisor=1024
51
- ) as pbar:
52
- async for chunk in response.content.iter_chunked(chunk_size):
53
- if chunk:
54
- f.write(chunk)
55
- pbar.update(len(chunk))
56
-
57
- logging.info(f"File downloaded successfully to {destination}")
58
- return destination
59
-
60
- except aiohttp.ClientError as e:
61
- logging.error(f"Network error occurred: {str(e)}")
62
- return None
63
- except asyncio.TimeoutError:
64
- logging.error("Download timed out")
65
- return None
66
- except IOError as e:
67
- logging.error(f"IO error occurred: {str(e)}")
68
- return None
69
- except Exception as e:
70
- logging.error(f"Unexpected error occurred: {str(e)}")
71
- return None
72
- finally:
73
- # If download failed and file was partially created, clean it up
74
- if os.path.exists(destination) and os.path.getsize(destination) == 0:
75
- try:
76
- os.remove(destination)
77
- logging.info(f"Cleaned up incomplete download: {destination}")
78
- except OSError:
79
- pass
1
+ from typing import List,Optional
2
+ import os, logging, aiohttp, asyncio
3
+ from tqdm.asyncio import tqdm
4
+
5
+ async def download_files(urls: List[str], destination_folder: str, authorization: str = None):
6
+ tasks = [download_file(file, os.path.join(destination_folder, os.path.basename(file)), authorization=authorization) for file in urls]
7
+ results = await asyncio.gather(*tasks, return_exceptions=False)
8
+ for i, result in enumerate(results):
9
+ if not result:
10
+ raise Exception(f"Download failed for file: {urls[i]}")
11
+
12
+ async def download_file(url: str, destination: str, chunk_size: int = 8192, authorization: str = None) -> Optional[str]:
13
+ """
14
+ Downloads a file from a given URL to a destination path asynchronously.
15
+
16
+ Args:
17
+ url: The URL of the file to download
18
+ destination: The local path where the file should be saved
19
+ chunk_size: Size of chunks to download (default: 8192 bytes)
20
+
21
+ Returns:
22
+ str: Path to the downloaded file if successful, None otherwise
23
+
24
+ Raises:
25
+ Various exceptions are caught and logged
26
+ """
27
+ try:
28
+ # Ensure the destination directory exists
29
+ os.makedirs(os.path.dirname(os.path.abspath(destination)), exist_ok=True)
30
+
31
+ async with aiohttp.ClientSession() as session:
32
+ if authorization:
33
+ headers = {'Authorization': authorization}
34
+ session.headers.update(headers)
35
+ async with session.get(url) as response:
36
+ # Check if the request was successful
37
+ if response.status != 200:
38
+ logging.error(f"Failed to download file. Status code: {response.status}")
39
+ return None
40
+
41
+ # Get the total file size if available
42
+ total_size = int(response.headers.get('content-length', 0))
43
+ # Open the destination file and write chunks
44
+ with open(destination, 'wb') as f:
45
+ with tqdm(
46
+ total=total_size,
47
+ desc="Downloading",
48
+ unit='B',
49
+ unit_scale=True,
50
+ unit_divisor=1024
51
+ ) as pbar:
52
+ async for chunk in response.content.iter_chunked(chunk_size):
53
+ if chunk:
54
+ f.write(chunk)
55
+ pbar.update(len(chunk))
56
+
57
+ logging.info(f"File downloaded successfully to {destination}")
58
+ return destination
59
+
60
+ except aiohttp.ClientError as e:
61
+ logging.error(f"Network error occurred: {str(e)}")
62
+ return None
63
+ except asyncio.TimeoutError:
64
+ logging.error("Download timed out")
65
+ return None
66
+ except IOError as e:
67
+ logging.error(f"IO error occurred: {str(e)}")
68
+ return None
69
+ except Exception as e:
70
+ logging.error(f"Unexpected error occurred: {str(e)}")
71
+ return None
72
+ finally:
73
+ # If download failed and file was partially created, clean it up
74
+ if os.path.exists(destination) and os.path.getsize(destination) == 0:
75
+ try:
76
+ os.remove(destination)
77
+ logging.info(f"Cleaned up incomplete download: {destination}")
78
+ except OSError:
79
+ pass
@@ -1,29 +1,29 @@
1
- import os, sys, json
2
-
3
- class HiddenPrints:
4
- def __enter__(self):
5
- self._original_stdout = sys.stdout
6
- self._original_stderr = sys.stderr
7
-
8
- sys.stdout = open(os.devnull, 'w')
9
- sys.stderr = open(os.devnull, 'w')
10
-
11
- def __exit__(self, exc_type, exc_val, exc_tb):
12
- sys.stdout.close()
13
- sys.stderr.close()
14
- sys.stdout = self._original_stdout
15
- sys.stderr = self._original_stderr
16
-
17
- def print_json(data) -> str:
18
- return print_single_json(data) + ","
19
-
20
- def print_single_json(data) -> str:
21
- return json.dumps(data, sort_keys=True)
22
-
23
- def print_string(data: str) -> str:
24
- if data != "":
25
- return print_json(data)
26
-
27
- def print_single_string(data: str) -> str:
28
- if data != "":
29
- return print_single_json(data)
1
+ import os, sys, json
2
+
3
+ class HiddenPrints:
4
+ def __enter__(self):
5
+ self._original_stdout = sys.stdout
6
+ self._original_stderr = sys.stderr
7
+
8
+ sys.stdout = open(os.devnull, 'w')
9
+ sys.stderr = open(os.devnull, 'w')
10
+
11
+ def __exit__(self, exc_type, exc_val, exc_tb):
12
+ sys.stdout.close()
13
+ sys.stderr.close()
14
+ sys.stdout = self._original_stdout
15
+ sys.stderr = self._original_stderr
16
+
17
+ def print_json(data) -> str:
18
+ return print_single_json(data) + ","
19
+
20
+ def print_single_json(data) -> str:
21
+ return json.dumps(data, sort_keys=True)
22
+
23
+ def print_string(data: str) -> str:
24
+ if data != "":
25
+ return print_json(data)
26
+
27
+ def print_single_string(data: str) -> str:
28
+ if data != "":
29
+ return print_single_json(data)
@@ -1,137 +1,137 @@
1
- import os, gc, shutil, logging, traceback
2
- import asyncio, aiofiles, aiofiles.os
3
- from fastapi import HTTPException
4
- from fastapi.responses import StreamingResponse
5
- from langchain_core.documents import Document
6
- from ws_bom_robot_app.llm.vector_store.loader.base import Loader
7
- from ws_bom_robot_app.llm.models.api import RulesRequest, KbRequest, VectorDbResponse
8
- from ws_bom_robot_app.llm.vector_store.integration.manager import IntegrationManager
9
- from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
10
- from ws_bom_robot_app.config import config
11
- from ws_bom_robot_app.llm.models.kb import load_endpoints
12
- from ws_bom_robot_app.llm.utils.download import download_files
13
-
14
- async def _cleanup_directory(directory_path: str):
15
- if os.path.exists(directory_path):
16
- await asyncio.to_thread(shutil.rmtree, directory_path)
17
-
18
- #@timer
19
- async def rules(rq: RulesRequest) -> VectorDbResponse:
20
- _config = rq.config()
21
- db_name = rq.out_name()
22
- store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
23
- try:
24
- await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(),[Document(page_content=rule, metadata={"source": "rules"}) for rule in rq.rules], store_path) #type: ignore
25
- db_file_path = shutil.make_archive(os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name), "zip", store_path)
26
- return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
27
- except Exception as e:
28
- try:
29
- await _cleanup_directory(store_path)
30
- finally:
31
- return VectorDbResponse(success = False, error = str(e))
32
- finally:
33
- gc.collect()
34
-
35
- #@atimer
36
- async def kb(rq: KbRequest) -> VectorDbResponse:
37
- os.environ['MPLCONFIGDIR'] = './tmp/.matplotlib'
38
- _config = rq.config()
39
- db_name = rq.out_name()
40
- src_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_src)
41
- working_path = os.path.join(src_path, db_name)
42
-
43
- if all([not rq.files,not rq.endpoints,not rq.integrations]):
44
- return VectorDbResponse(success = False, error = "No files, endpoints or integrations provided")
45
- else:
46
- await aiofiles.os.makedirs(src_path, exist_ok=True)
47
- await aiofiles.os.makedirs(working_path, exist_ok=True)
48
-
49
- documents: list[Document] = []
50
- # Download/copy all files
51
- if rq.files:
52
- try:
53
- loaders = Loader(working_path)
54
- filter_file_extensions = loaders.managed_file_extensions()
55
- files_to_download = [file for file in rq.files if not os.path.exists(os.path.join(src_path, os.path.basename(file)))]
56
- if files_to_download:
57
- await download_files(
58
- [f"{_config.robot_cms_host}/{_config.robot_cms_kb_folder}/{os.path.basename(file)}" for file in files_to_download if any([file.endswith(ext) for ext in filter_file_extensions])],
59
- src_path, authorization=_config.robot_cms_auth)
60
- # copy files to working tmp folder
61
- for file in rq.files:
62
- async with aiofiles.open(os.path.join(src_path, os.path.basename(file)), 'rb') as src_file:
63
- async with aiofiles.open(os.path.join(working_path, os.path.basename(file)), 'wb') as dest_file:
64
- await dest_file.write(await src_file.read())
65
- #load files
66
- try:
67
- documents.extend(await loaders.load())
68
- except Exception as e:
69
- tb = traceback.format_exc()
70
- _error = f"File loader failure: {e} | {tb}"
71
- logging.warning(_error)
72
- return VectorDbResponse(success = False, error = _error)
73
- except Exception as e:
74
- await _cleanup_directory(working_path)
75
- return VectorDbResponse(success = False, error = f"Failed to download file {e}")
76
-
77
- if rq.endpoints:
78
- try:
79
- documents.extend(await load_endpoints(rq.endpoints, working_path))
80
- except Exception as e:
81
- await _cleanup_directory(working_path)
82
- tb = traceback.format_exc()
83
- _error = f"Endpoint failure: {e} | {tb}"
84
- logging.warning(_error)
85
- return VectorDbResponse(success = False, error = _error)
86
-
87
- if rq.integrations:
88
- tasks = []
89
- for integration in rq.integrations:
90
- tasks.append(
91
- IntegrationManager
92
- .get_strategy(integration.type.lower(), working_path, integration.__pydantic_extra__) #type: ignore
93
- .load()
94
- )
95
- try:
96
- integration_documents = await asyncio.gather(*tasks)
97
- for docs in integration_documents:
98
- documents.extend(docs)
99
- except Exception as e:
100
- await _cleanup_directory(working_path)
101
- tb = traceback.format_exc()
102
- _error = f"Integration failure: {e} | {tb}"
103
- logging.warning(_error)
104
- return VectorDbResponse(success=False, error=_error)
105
-
106
- #cleanup
107
- await _cleanup_directory(working_path)
108
-
109
- if documents and len(documents) > 0:
110
- try:
111
- store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
112
- db_file_path = await aiofiles.os.wrap(shutil.make_archive)(
113
- os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name),
114
- "zip",
115
- await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(), documents, store_path, return_folder_path=True)
116
- )
117
- return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
118
- except Exception as e:
119
- await _cleanup_directory(store_path)
120
- return VectorDbResponse(success = False, error = str(e))
121
- finally:
122
- del documents
123
- gc.collect()
124
- else:
125
- _error = "No documents found in the knowledgebase folder"
126
- logging.warning(_error)
127
- return VectorDbResponse(success = False, error = _error)
128
-
129
- async def kb_stream_file(filename: str):
130
- file_path = os.path.join(config.robot_data_folder, config.robot_data_db_folder, config.robot_data_db_folder_out, filename)
131
- if not os.path.isfile(file_path):
132
- raise HTTPException(status_code=404, detail="File not found")
133
- def iter_file():
134
- with open(file_path, mode="rb") as file:
135
- while chunk := file.read(1024*8):
136
- yield chunk
137
- return StreamingResponse(iter_file(), media_type="application/octet-stream", headers={"Content-Disposition": f"attachment; filename={filename}"})
1
+ import os, gc, shutil, logging, traceback
2
+ import asyncio, aiofiles, aiofiles.os
3
+ from fastapi import HTTPException
4
+ from fastapi.responses import StreamingResponse
5
+ from langchain_core.documents import Document
6
+ from ws_bom_robot_app.llm.vector_store.loader.base import Loader
7
+ from ws_bom_robot_app.llm.models.api import RulesRequest, KbRequest, VectorDbResponse
8
+ from ws_bom_robot_app.llm.vector_store.integration.manager import IntegrationManager
9
+ from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
10
+ from ws_bom_robot_app.config import config
11
+ from ws_bom_robot_app.llm.models.kb import load_endpoints
12
+ from ws_bom_robot_app.llm.utils.download import download_files
13
+
14
+ async def _cleanup_directory(directory_path: str):
15
+ if os.path.exists(directory_path):
16
+ await asyncio.to_thread(shutil.rmtree, directory_path)
17
+
18
+ #@timer
19
+ async def rules(rq: RulesRequest) -> VectorDbResponse:
20
+ _config = rq.config()
21
+ db_name = rq.out_name()
22
+ store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
23
+ try:
24
+ await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(),[Document(page_content=rule, metadata={"source": "rules"}) for rule in rq.rules], store_path) #type: ignore
25
+ db_file_path = shutil.make_archive(os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name), "zip", store_path)
26
+ return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
27
+ except Exception as e:
28
+ try:
29
+ await _cleanup_directory(store_path)
30
+ finally:
31
+ return VectorDbResponse(success = False, error = str(e))
32
+ finally:
33
+ gc.collect()
34
+
35
+ #@atimer
36
+ async def kb(rq: KbRequest) -> VectorDbResponse:
37
+ os.environ['MPLCONFIGDIR'] = './tmp/.matplotlib'
38
+ _config = rq.config()
39
+ db_name = rq.out_name()
40
+ src_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_src)
41
+ working_path = os.path.join(src_path, db_name)
42
+
43
+ if all([not rq.files,not rq.endpoints,not rq.integrations]):
44
+ return VectorDbResponse(success = False, error = "No files, endpoints or integrations provided")
45
+ else:
46
+ await aiofiles.os.makedirs(src_path, exist_ok=True)
47
+ await aiofiles.os.makedirs(working_path, exist_ok=True)
48
+
49
+ documents: list[Document] = []
50
+ # Download/copy all files
51
+ if rq.files:
52
+ try:
53
+ loaders = Loader(working_path)
54
+ filter_file_extensions = loaders.managed_file_extensions()
55
+ files_to_download = [file for file in rq.files if not os.path.exists(os.path.join(src_path, os.path.basename(file)))]
56
+ if files_to_download:
57
+ await download_files(
58
+ [f"{_config.robot_cms_host}/{_config.robot_cms_kb_folder}/{os.path.basename(file)}" for file in files_to_download if any([file.endswith(ext) for ext in filter_file_extensions])],
59
+ src_path, authorization=_config.robot_cms_auth)
60
+ # copy files to working tmp folder
61
+ for file in rq.files:
62
+ async with aiofiles.open(os.path.join(src_path, os.path.basename(file)), 'rb') as src_file:
63
+ async with aiofiles.open(os.path.join(working_path, os.path.basename(file)), 'wb') as dest_file:
64
+ await dest_file.write(await src_file.read())
65
+ #load files
66
+ try:
67
+ documents.extend(await loaders.load())
68
+ except Exception as e:
69
+ tb = traceback.format_exc()
70
+ _error = f"File loader failure: {e} | {tb}"
71
+ logging.warning(_error)
72
+ return VectorDbResponse(success = False, error = _error)
73
+ except Exception as e:
74
+ await _cleanup_directory(working_path)
75
+ return VectorDbResponse(success = False, error = f"Failed to download file {e}")
76
+
77
+ if rq.endpoints:
78
+ try:
79
+ documents.extend(await load_endpoints(rq.endpoints, working_path))
80
+ except Exception as e:
81
+ await _cleanup_directory(working_path)
82
+ tb = traceback.format_exc()
83
+ _error = f"Endpoint failure: {e} | {tb}"
84
+ logging.warning(_error)
85
+ return VectorDbResponse(success = False, error = _error)
86
+
87
+ if rq.integrations:
88
+ tasks = []
89
+ for integration in rq.integrations:
90
+ tasks.append(
91
+ IntegrationManager
92
+ .get_strategy(integration.type.lower(), working_path, integration.__pydantic_extra__) #type: ignore
93
+ .load()
94
+ )
95
+ try:
96
+ integration_documents = await asyncio.gather(*tasks)
97
+ for docs in integration_documents:
98
+ documents.extend(docs)
99
+ except Exception as e:
100
+ await _cleanup_directory(working_path)
101
+ tb = traceback.format_exc()
102
+ _error = f"Integration failure: {e} | {tb}"
103
+ logging.warning(_error)
104
+ return VectorDbResponse(success=False, error=_error)
105
+
106
+ #cleanup
107
+ await _cleanup_directory(working_path)
108
+
109
+ if documents and len(documents) > 0:
110
+ try:
111
+ store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
112
+ db_file_path = await aiofiles.os.wrap(shutil.make_archive)(
113
+ os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name),
114
+ "zip",
115
+ await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(), documents, store_path, return_folder_path=True)
116
+ )
117
+ return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
118
+ except Exception as e:
119
+ await _cleanup_directory(store_path)
120
+ return VectorDbResponse(success = False, error = str(e))
121
+ finally:
122
+ del documents
123
+ gc.collect()
124
+ else:
125
+ _error = "No documents found in the knowledgebase folder"
126
+ logging.warning(_error)
127
+ return VectorDbResponse(success = False, error = _error)
128
+
129
+ async def kb_stream_file(filename: str):
130
+ file_path = os.path.join(config.robot_data_folder, config.robot_data_db_folder, config.robot_data_db_folder_out, filename)
131
+ if not os.path.isfile(file_path):
132
+ raise HTTPException(status_code=404, detail="File not found")
133
+ def iter_file():
134
+ with open(file_path, mode="rb") as file:
135
+ while chunk := file.read(1024*8):
136
+ yield chunk
137
+ return StreamingResponse(iter_file(), media_type="application/octet-stream", headers={"Content-Disposition": f"attachment; filename={filename}"})