ws-bom-robot-app 0.0.80__py3-none-any.whl → 0.0.81__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,137 +1,137 @@
1
- import os, gc, shutil, logging, traceback
2
- import asyncio, aiofiles, aiofiles.os
3
- from fastapi import HTTPException
4
- from fastapi.responses import StreamingResponse
5
- from langchain_core.documents import Document
6
- from ws_bom_robot_app.llm.vector_store.loader.base import Loader
7
- from ws_bom_robot_app.llm.models.api import RulesRequest, KbRequest, VectorDbResponse
8
- from ws_bom_robot_app.llm.vector_store.integration.manager import IntegrationManager
9
- from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
10
- from ws_bom_robot_app.config import config
11
- from ws_bom_robot_app.llm.models.kb import load_endpoints
12
- from ws_bom_robot_app.llm.utils.download import download_files
13
-
14
- async def _cleanup_directory(directory_path: str):
15
- if os.path.exists(directory_path):
16
- await asyncio.to_thread(shutil.rmtree, directory_path)
17
-
18
- #@timer
19
- async def rules(rq: RulesRequest) -> VectorDbResponse:
20
- _config = rq.config()
21
- db_name = rq.out_name()
22
- store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
23
- try:
24
- await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(),[Document(page_content=rule, metadata={"source": "rules"}) for rule in rq.rules], store_path) #type: ignore
25
- db_file_path = shutil.make_archive(os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name), "zip", store_path)
26
- return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
27
- except Exception as e:
28
- try:
29
- await _cleanup_directory(store_path)
30
- finally:
31
- return VectorDbResponse(success = False, error = str(e))
32
- finally:
33
- gc.collect()
34
-
35
- #@atimer
36
- async def kb(rq: KbRequest) -> VectorDbResponse:
37
- os.environ['MPLCONFIGDIR'] = './tmp/.matplotlib'
38
- _config = rq.config()
39
- db_name = rq.out_name()
40
- src_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_src)
41
- working_path = os.path.join(src_path, db_name)
42
-
43
- if all([not rq.files,not rq.endpoints,not rq.integrations]):
44
- return VectorDbResponse(success = False, error = "No files, endpoints or integrations provided")
45
- else:
46
- await aiofiles.os.makedirs(src_path, exist_ok=True)
47
- await aiofiles.os.makedirs(working_path, exist_ok=True)
48
-
49
- documents: list[Document] = []
50
- # Download/copy all files
51
- if rq.files:
52
- try:
53
- loaders = Loader(working_path)
54
- filter_file_extensions = loaders.managed_file_extensions()
55
- files_to_download = [file for file in rq.files if not os.path.exists(os.path.join(src_path, os.path.basename(file)))]
56
- if files_to_download:
57
- await download_files(
58
- [f"{_config.robot_cms_host}/{_config.robot_cms_kb_folder}/{os.path.basename(file)}" for file in files_to_download if any([file.endswith(ext) for ext in filter_file_extensions])],
59
- src_path, authorization=_config.robot_cms_auth)
60
- # copy files to working tmp folder
61
- for file in rq.files:
62
- async with aiofiles.open(os.path.join(src_path, os.path.basename(file)), 'rb') as src_file:
63
- async with aiofiles.open(os.path.join(working_path, os.path.basename(file)), 'wb') as dest_file:
64
- await dest_file.write(await src_file.read())
65
- #load files
66
- try:
67
- documents.extend(await loaders.load())
68
- except Exception as e:
69
- tb = traceback.format_exc()
70
- _error = f"File loader failure: {e} | {tb}"
71
- logging.warning(_error)
72
- return VectorDbResponse(success = False, error = _error)
73
- except Exception as e:
74
- await _cleanup_directory(working_path)
75
- return VectorDbResponse(success = False, error = f"Failed to download file {e}")
76
-
77
- if rq.endpoints:
78
- try:
79
- documents.extend(await load_endpoints(rq.endpoints, working_path))
80
- except Exception as e:
81
- await _cleanup_directory(working_path)
82
- tb = traceback.format_exc()
83
- _error = f"Endpoint failure: {e} | {tb}"
84
- logging.warning(_error)
85
- return VectorDbResponse(success = False, error = _error)
86
-
87
- if rq.integrations:
88
- tasks = []
89
- for integration in rq.integrations:
90
- tasks.append(
91
- IntegrationManager
92
- .get_strategy(integration.type.lower(), working_path, integration.__pydantic_extra__) #type: ignore
93
- .load()
94
- )
95
- try:
96
- integration_documents = await asyncio.gather(*tasks)
97
- for docs in integration_documents:
98
- documents.extend(docs)
99
- except Exception as e:
100
- await _cleanup_directory(working_path)
101
- tb = traceback.format_exc()
102
- _error = f"Integration failure: {e} | {tb}"
103
- logging.warning(_error)
104
- return VectorDbResponse(success=False, error=_error)
105
-
106
- #cleanup
107
- await _cleanup_directory(working_path)
108
-
109
- if documents and len(documents) > 0:
110
- try:
111
- store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
112
- db_file_path = await aiofiles.os.wrap(shutil.make_archive)(
113
- os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name),
114
- "zip",
115
- await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(), documents, store_path, return_folder_path=True)
116
- )
117
- return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
118
- except Exception as e:
119
- await _cleanup_directory(store_path)
120
- return VectorDbResponse(success = False, error = str(e))
121
- finally:
122
- del documents
123
- gc.collect()
124
- else:
125
- _error = "No documents found in the knowledgebase folder"
126
- logging.warning(_error)
127
- return VectorDbResponse(success = False, error = _error)
128
-
129
- async def kb_stream_file(filename: str):
130
- file_path = os.path.join(config.robot_data_folder, config.robot_data_db_folder, config.robot_data_db_folder_out, filename)
131
- if not os.path.isfile(file_path):
132
- raise HTTPException(status_code=404, detail="File not found")
133
- def iter_file():
134
- with open(file_path, mode="rb") as file:
135
- while chunk := file.read(1024*8):
136
- yield chunk
137
- return StreamingResponse(iter_file(), media_type="application/octet-stream", headers={"Content-Disposition": f"attachment; filename={filename}"})
1
+ import os, gc, shutil, logging, traceback
2
+ import asyncio, aiofiles, aiofiles.os
3
+ from fastapi import HTTPException
4
+ from fastapi.responses import StreamingResponse
5
+ from langchain_core.documents import Document
6
+ from ws_bom_robot_app.llm.vector_store.loader.base import Loader
7
+ from ws_bom_robot_app.llm.models.api import RulesRequest, KbRequest, VectorDbResponse
8
+ from ws_bom_robot_app.llm.vector_store.integration.manager import IntegrationManager
9
+ from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
10
+ from ws_bom_robot_app.config import config
11
+ from ws_bom_robot_app.llm.models.kb import load_endpoints
12
+ from ws_bom_robot_app.llm.utils.download import download_files
13
+
14
+ async def _cleanup_directory(directory_path: str):
15
+ if os.path.exists(directory_path):
16
+ await asyncio.to_thread(shutil.rmtree, directory_path)
17
+
18
+ #@timer
19
+ async def rules(rq: RulesRequest) -> VectorDbResponse:
20
+ _config = rq.config()
21
+ db_name = rq.out_name()
22
+ store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
23
+ try:
24
+ await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(),[Document(page_content=rule, metadata={"source": "rules"}) for rule in rq.rules], store_path) #type: ignore
25
+ db_file_path = shutil.make_archive(os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name), "zip", store_path)
26
+ return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
27
+ except Exception as e:
28
+ try:
29
+ await _cleanup_directory(store_path)
30
+ finally:
31
+ return VectorDbResponse(success = False, error = str(e))
32
+ finally:
33
+ gc.collect()
34
+
35
+ #@atimer
36
+ async def kb(rq: KbRequest) -> VectorDbResponse:
37
+ os.environ['MPLCONFIGDIR'] = './tmp/.matplotlib'
38
+ _config = rq.config()
39
+ db_name = rq.out_name()
40
+ src_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_src)
41
+ working_path = os.path.join(src_path, db_name)
42
+
43
+ if all([not rq.files,not rq.endpoints,not rq.integrations]):
44
+ return VectorDbResponse(success = False, error = "No files, endpoints or integrations provided")
45
+ else:
46
+ await aiofiles.os.makedirs(src_path, exist_ok=True)
47
+ await aiofiles.os.makedirs(working_path, exist_ok=True)
48
+
49
+ documents: list[Document] = []
50
+ # Download/copy all files
51
+ if rq.files:
52
+ try:
53
+ loaders = Loader(working_path)
54
+ filter_file_extensions = loaders.managed_file_extensions()
55
+ files_to_download = [file for file in rq.files if not os.path.exists(os.path.join(src_path, os.path.basename(file)))]
56
+ if files_to_download:
57
+ await download_files(
58
+ [f"{_config.robot_cms_host}/{_config.robot_cms_kb_folder}/{os.path.basename(file)}" for file in files_to_download if any([file.endswith(ext) for ext in filter_file_extensions])],
59
+ src_path, authorization=_config.robot_cms_auth)
60
+ # copy files to working tmp folder
61
+ for file in rq.files:
62
+ async with aiofiles.open(os.path.join(src_path, os.path.basename(file)), 'rb') as src_file:
63
+ async with aiofiles.open(os.path.join(working_path, os.path.basename(file)), 'wb') as dest_file:
64
+ await dest_file.write(await src_file.read())
65
+ #load files
66
+ try:
67
+ documents.extend(await loaders.load())
68
+ except Exception as e:
69
+ tb = traceback.format_exc()
70
+ _error = f"File loader failure: {e} | {tb}"
71
+ logging.warning(_error)
72
+ return VectorDbResponse(success = False, error = _error)
73
+ except Exception as e:
74
+ await _cleanup_directory(working_path)
75
+ return VectorDbResponse(success = False, error = f"Failed to download file {e}")
76
+
77
+ if rq.endpoints:
78
+ try:
79
+ documents.extend(await load_endpoints(rq.endpoints, working_path))
80
+ except Exception as e:
81
+ await _cleanup_directory(working_path)
82
+ tb = traceback.format_exc()
83
+ _error = f"Endpoint failure: {e} | {tb}"
84
+ logging.warning(_error)
85
+ return VectorDbResponse(success = False, error = _error)
86
+
87
+ if rq.integrations:
88
+ tasks = []
89
+ for integration in rq.integrations:
90
+ tasks.append(
91
+ IntegrationManager
92
+ .get_strategy(integration.type.lower(), working_path, integration.__pydantic_extra__) #type: ignore
93
+ .load()
94
+ )
95
+ try:
96
+ integration_documents = await asyncio.gather(*tasks)
97
+ for docs in integration_documents:
98
+ documents.extend(docs)
99
+ except Exception as e:
100
+ await _cleanup_directory(working_path)
101
+ tb = traceback.format_exc()
102
+ _error = f"Integration failure: {e} | {tb}"
103
+ logging.warning(_error)
104
+ return VectorDbResponse(success=False, error=_error)
105
+
106
+ #cleanup
107
+ await _cleanup_directory(working_path)
108
+
109
+ if documents and len(documents) > 0:
110
+ try:
111
+ store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
112
+ db_file_path = await aiofiles.os.wrap(shutil.make_archive)(
113
+ os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name),
114
+ "zip",
115
+ await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(), documents, store_path, return_folder_path=True)
116
+ )
117
+ return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
118
+ except Exception as e:
119
+ await _cleanup_directory(store_path)
120
+ return VectorDbResponse(success = False, error = str(e))
121
+ finally:
122
+ del documents
123
+ gc.collect()
124
+ else:
125
+ _error = "No documents found in the knowledgebase folder"
126
+ logging.warning(_error)
127
+ return VectorDbResponse(success = False, error = _error)
128
+
129
+ async def kb_stream_file(filename: str):
130
+ file_path = os.path.join(config.robot_data_folder, config.robot_data_db_folder, config.robot_data_db_folder_out, filename)
131
+ if not os.path.isfile(file_path):
132
+ raise HTTPException(status_code=404, detail="File not found")
133
+ def iter_file():
134
+ with open(file_path, mode="rb") as file:
135
+ while chunk := file.read(1024*8):
136
+ yield chunk
137
+ return StreamingResponse(iter_file(), media_type="application/octet-stream", headers={"Content-Disposition": f"attachment; filename={filename}"})
@@ -13,6 +13,7 @@ from ws_bom_robot_app.llm.vector_store.integration.sharepoint import Sharepoint
13
13
  from ws_bom_robot_app.llm.vector_store.integration.sitemap import Sitemap
14
14
  from ws_bom_robot_app.llm.vector_store.integration.slack import Slack
15
15
  from ws_bom_robot_app.llm.vector_store.integration.thron import Thron
16
+ from ws_bom_robot_app.llm.vector_store.integration.shopify import Shopify
16
17
  class IntegrationManager:
17
18
  _list: dict[str, Type[IntegrationStrategy]] = {
18
19
  "llmkbazure": Azure,
@@ -28,6 +29,7 @@ class IntegrationManager:
28
29
  "llmkbsitemap": Sitemap,
29
30
  "llmkbslack": Slack,
30
31
  "llmkbthron": Thron,
32
+ "llmkbshopify": Shopify,
31
33
  }
32
34
  @classmethod
33
35
  def get_strategy(cls, name: str, knowledgebase_path: str, data: dict[str, str]) -> IntegrationStrategy:
@@ -0,0 +1,144 @@
1
+ import asyncio, logging, aiohttp
2
+ from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
3
+ from unstructured_ingest.v2.processes.connectors.fsspec.sftp import SftpConnectionConfig, SftpAccessConfig, SftpDownloaderConfig, SftpIndexerConfig
4
+ from langchain_core.documents import Document
5
+ from ws_bom_robot_app.llm.vector_store.loader.base import Loader
6
+ from typing import List, Union, Optional
7
+ from pydantic import BaseModel, Field, AliasChoices, field_validator
8
+ import json
9
+ import os
10
+
11
+ class ShopifyParams(BaseModel):
12
+ """
13
+ ShopifyParams is a model that defines the parameters required for Shopify integration.
14
+
15
+ Attributes:
16
+ shop_name (str): The shop name for Shopify.
17
+ access_token (str): The access token for Shopify.
18
+ graphql_query (Union[str, dict]): The GraphQL query string or dict for Shopify.
19
+ """
20
+ shop_name: str = Field(validation_alias=AliasChoices("shopName","shop_name"))
21
+ access_token: str = Field(validation_alias=AliasChoices("accessToken","access_token"))
22
+ graphql_query: Union[str, dict] = Field(validation_alias=AliasChoices("graphqlQuery","graphql_query"))
23
+
24
+ @field_validator('graphql_query')
25
+ @classmethod
26
+ def extract_query_string(cls, v):
27
+ """Extract the query string from dict format if needed"""
28
+ if isinstance(v, dict) and 'query' in v:
29
+ return v['query']
30
+ return v
31
+
32
+ class Shopify(IntegrationStrategy):
33
+ def __init__(self, knowledgebase_path: str, data: dict[str, Union[str,int,list]]):
34
+ super().__init__(knowledgebase_path, data)
35
+ self.__data = ShopifyParams.model_validate(self.data)
36
+
37
+ def working_subdirectory(self) -> str:
38
+ return 'shopify'
39
+
40
+ async def run(self) -> None:
41
+ _data = await self.__get_data()
42
+ json_file_path = os.path.join(self.working_directory, 'shopify_data.json')
43
+ with open(json_file_path, 'w', encoding='utf-8') as f:
44
+ json.dump(_data, f, ensure_ascii=False)
45
+
46
+ async def load(self) -> list[Document]:
47
+ await self.run()
48
+ await asyncio.sleep(1)
49
+ return await Loader(self.working_directory).load()
50
+
51
+ async def __get_data(self, page_size: int = 50) -> List[dict]:
52
+ # URL dell'API
53
+ url = f"https://{self.__data.shop_name}.myshopify.com/admin/api/2024-07/graphql.json"
54
+
55
+ # Headers
56
+ headers = {
57
+ "X-Shopify-Access-Token": self.__data.access_token,
58
+ "Content-Type": "application/json"
59
+ }
60
+
61
+ all_products: List[dict] = []
62
+ has_next_page = True
63
+ cursor = None
64
+ retry_count = 0
65
+ max_retries = 5
66
+
67
+ while has_next_page:
68
+ # Variables per la query
69
+ variables = {
70
+ "first": page_size
71
+ }
72
+
73
+ if cursor:
74
+ variables["after"] = cursor
75
+
76
+ # Payload della richiesta
77
+ payload = {
78
+ "query": self.__data.graphql_query,
79
+ "variables": variables
80
+ }
81
+
82
+ try:
83
+ # Effettua la richiesta
84
+ async with aiohttp.ClientSession() as session:
85
+ async with session.post(url, headers=headers, json=payload) as response:
86
+ # Controlla se la risposta è JSON
87
+ try:
88
+ data = await response.json()
89
+ except aiohttp.ContentTypeError:
90
+ text = await response.text()
91
+ logging.error(f"Non-JSON response received. Status code: {response.status}")
92
+ logging.error(f"Content: {text}")
93
+ raise Exception("Invalid response from API")
94
+
95
+ # Gestione del throttling
96
+ if "errors" in data:
97
+ error = data["errors"][0]
98
+ if error.get("extensions", {}).get("code") == "THROTTLED":
99
+ retry_count += 1
100
+ if retry_count > max_retries:
101
+ raise Exception("Too many throttling attempts. Stopping execution.")
102
+
103
+ # Aspetta un po' più a lungo ad ogni tentativo
104
+ wait_time = 2 ** retry_count # Backoff esponenziale
105
+ print(f"Rate limit reached. Waiting {wait_time} seconds... (Attempt {retry_count}/{max_retries})")
106
+ await asyncio.sleep(wait_time)
107
+ continue
108
+ else:
109
+ raise Exception(f"GraphQL errors: {data['errors']}")
110
+
111
+ # Resetta il contatore dei retry se la richiesta è andata bene
112
+ retry_count = 0
113
+
114
+ # Estrae i dati
115
+ products_data = data["data"]["products"]
116
+ edges = products_data["edges"]
117
+ page_info = products_data["pageInfo"]
118
+
119
+ # Aggiungi i prodotti alla lista
120
+ for edge in edges:
121
+ all_products.append(edge["node"])
122
+
123
+ # Aggiorna il cursore e il flag per la paginazione
124
+ has_next_page = page_info["hasNextPage"]
125
+ cursor = page_info["endCursor"]
126
+
127
+ print(f"Recuperati {len(edges)} prodotti. Totale: {len(all_products)}")
128
+
129
+ # Piccola pausa per evitare di saturare l'API
130
+ await asyncio.sleep(0.1)
131
+
132
+ except aiohttp.ClientError as e:
133
+ logging.error(f"Connection error: {e}")
134
+ retry_count += 1
135
+ if retry_count <= max_retries:
136
+ wait_time = 2 ** retry_count
137
+ logging.warning(f"Retrying in {wait_time} seconds...")
138
+ await asyncio.sleep(wait_time)
139
+ continue
140
+ else:
141
+ raise Exception("Too many network errors. Stopping execution.")
142
+
143
+ logging.info(f"Data retrieval completed! Total products: {len(all_products)}")
144
+ return all_products
@@ -1,103 +1,103 @@
1
- import asyncio, logging, aiohttp
2
- from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
3
- from unstructured_ingest.v2.processes.connectors.fsspec.sftp import SftpConnectionConfig, SftpAccessConfig, SftpDownloaderConfig, SftpIndexerConfig
4
- from langchain_core.documents import Document
5
- from ws_bom_robot_app.llm.vector_store.loader.base import Loader
6
- from typing import List, Union, Optional
7
- from pydantic import BaseModel, Field, AliasChoices
8
- import json
9
- import os
10
-
11
- class ThronParams(BaseModel):
12
- """
13
- ThronParams is a model that defines the parameters required for Thron integration.
14
-
15
- Attributes:
16
- app_id (str): The application ID for Thron.
17
- client_id (str): The client ID for Thron.
18
- client_secret (str): The client secret for Thron.
19
- """
20
- organization_name: str = Field(validation_alias=AliasChoices("organizationName","organization_name"))
21
- attribute_fields: Optional[List[str]] = Field(default=None, validation_alias=AliasChoices("attributeFields","attribute_fields"))
22
- client_id: str = Field(validation_alias=AliasChoices("clientId","client_id"))
23
- client_secret: str = Field(validation_alias=AliasChoices("clientSecret","client_secret"))
24
-
25
- class Thron(IntegrationStrategy):
26
- def __init__(self, knowledgebase_path: str, data: dict[str, Union[str,int,list]]):
27
- super().__init__(knowledgebase_path, data)
28
- self.__data = ThronParams.model_validate(self.data)
29
-
30
- def working_subdirectory(self) -> str:
31
- return 'thron'
32
-
33
- async def run(self) -> None:
34
- _data = await self.__get_data()
35
- transformed_data = self.__transform_data(_data)
36
- json_file_path = os.path.join(self.working_directory, 'thron_data.json')
37
- with open(json_file_path, 'w', encoding='utf-8') as f:
38
- json.dump(transformed_data, f, indent=2, ensure_ascii=False)
39
-
40
- async def load(self) -> list[Document]:
41
- await self.run()
42
- await asyncio.sleep(1)
43
- return await Loader(self.working_directory).load()
44
-
45
- async def __get_auth_token(self) -> str:
46
- try:
47
- async with aiohttp.ClientSession() as session:
48
- auth_data = {
49
- "grant_type": "client_credentials",
50
- "client_id": self.__data.client_id,
51
- "client_secret": self.__data.client_secret
52
- }
53
- headers = {
54
- "accept": "application/json",
55
- "Content-Type": "application/x-www-form-urlencoded"
56
- }
57
- async with session.post("https://websolute.thron.com/api/v1/authentication/oauth2/token", data=auth_data, headers=headers) as response:
58
- result = await response.json()
59
- return result.get("access_token", "")
60
- except Exception as e:
61
- logging.error(f"Error fetching Thron auth token: {e}")
62
- return None
63
-
64
- async def __get_data(self) -> dict:
65
- try:
66
- token = await self.__get_auth_token()
67
- if not token:
68
- logging.error("Failed to obtain Thron authentication token.")
69
- return {}
70
- attribute_fields = ",".join(self.__data.attribute_fields) if self.__data.attribute_fields else ""
71
- async with aiohttp.ClientSession() as session:
72
- headers = {
73
- "accept": "application/json",
74
- "Authorization": f"Bearer {token}"
75
- }
76
- async with session.get(f"https://{self.__data.organization_name}.thron.com/api/v1/product-data/products?attributeFields=product_id,{attribute_fields}", headers=headers) as response:
77
- result = await response.json()
78
- return result.get("items", {})
79
- except Exception as e:
80
- logging.error(f"Error fetching Thron product data: {e}")
81
- return {}
82
- return []
83
-
84
-
85
-
86
- def __transform_data(self, data: dict) -> dict:
87
- _data = []
88
- for item in data:
89
- if item.get("hierarchyLevel") == "MASTER":
90
- # Iterate through variants to find the product_id
91
- for item_variant in data:
92
- if item_variant.get("hierarchyLevel") == "VARIANT":
93
- for attr in item.get("attributes", []):
94
- if attr.get("code") == "product_id" and attr.get("identifier") == item_variant.get("variation").get("master").split(":")[-1]:
95
- # Initialize variants list if it doesn't exist
96
- if "variants" not in item:
97
- item["variants"] = []
98
- item["variants"].append(item_variant)
99
- _data.append(item)
100
- break
101
- elif item.get("hierarchyLevel") == "SIMPLE":
102
- _data.append(item)
103
- return _data
1
+ import asyncio, logging, aiohttp
2
+ from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
3
+ from unstructured_ingest.v2.processes.connectors.fsspec.sftp import SftpConnectionConfig, SftpAccessConfig, SftpDownloaderConfig, SftpIndexerConfig
4
+ from langchain_core.documents import Document
5
+ from ws_bom_robot_app.llm.vector_store.loader.base import Loader
6
+ from typing import List, Union, Optional
7
+ from pydantic import BaseModel, Field, AliasChoices
8
+ import json
9
+ import os
10
+
11
+ class ThronParams(BaseModel):
12
+ """
13
+ ThronParams is a model that defines the parameters required for Thron integration.
14
+
15
+ Attributes:
16
+ app_id (str): The application ID for Thron.
17
+ client_id (str): The client ID for Thron.
18
+ client_secret (str): The client secret for Thron.
19
+ """
20
+ organization_name: str = Field(validation_alias=AliasChoices("organizationName","organization_name"))
21
+ attribute_fields: Optional[List[str]] = Field(default=None, validation_alias=AliasChoices("attributeFields","attribute_fields"))
22
+ client_id: str = Field(validation_alias=AliasChoices("clientId","client_id"))
23
+ client_secret: str = Field(validation_alias=AliasChoices("clientSecret","client_secret"))
24
+
25
+ class Thron(IntegrationStrategy):
26
+ def __init__(self, knowledgebase_path: str, data: dict[str, Union[str,int,list]]):
27
+ super().__init__(knowledgebase_path, data)
28
+ self.__data = ThronParams.model_validate(self.data)
29
+
30
+ def working_subdirectory(self) -> str:
31
+ return 'thron'
32
+
33
+ async def run(self) -> None:
34
+ _data = await self.__get_data()
35
+ transformed_data = self.__transform_data(_data)
36
+ json_file_path = os.path.join(self.working_directory, 'thron_data.json')
37
+ with open(json_file_path, 'w', encoding='utf-8') as f:
38
+ json.dump(transformed_data, f, indent=2, ensure_ascii=False)
39
+
40
+ async def load(self) -> list[Document]:
41
+ await self.run()
42
+ await asyncio.sleep(1)
43
+ return await Loader(self.working_directory).load()
44
+
45
+ async def __get_auth_token(self) -> str:
46
+ try:
47
+ async with aiohttp.ClientSession() as session:
48
+ auth_data = {
49
+ "grant_type": "client_credentials",
50
+ "client_id": self.__data.client_id,
51
+ "client_secret": self.__data.client_secret
52
+ }
53
+ headers = {
54
+ "accept": "application/json",
55
+ "Content-Type": "application/x-www-form-urlencoded"
56
+ }
57
+ async with session.post("https://websolute.thron.com/api/v1/authentication/oauth2/token", data=auth_data, headers=headers) as response:
58
+ result = await response.json()
59
+ return result.get("access_token", "")
60
+ except Exception as e:
61
+ logging.error(f"Error fetching Thron auth token: {e}")
62
+ return None
63
+
64
+ async def __get_data(self) -> dict:
65
+ try:
66
+ token = await self.__get_auth_token()
67
+ if not token:
68
+ logging.error("Failed to obtain Thron authentication token.")
69
+ return {}
70
+ attribute_fields = ",".join(self.__data.attribute_fields) if self.__data.attribute_fields else ""
71
+ async with aiohttp.ClientSession() as session:
72
+ headers = {
73
+ "accept": "application/json",
74
+ "Authorization": f"Bearer {token}"
75
+ }
76
+ async with session.get(f"https://{self.__data.organization_name}.thron.com/api/v1/product-data/products?attributeFields=product_id,{attribute_fields}", headers=headers) as response:
77
+ result = await response.json()
78
+ return result.get("items", {})
79
+ except Exception as e:
80
+ logging.error(f"Error fetching Thron product data: {e}")
81
+ return {}
82
+ return []
83
+
84
+
85
+
86
+ def __transform_data(self, data: dict) -> dict:
87
+ _data = []
88
+ for item in data:
89
+ if item.get("hierarchyLevel") == "MASTER":
90
+ # Iterate through variants to find the product_id
91
+ for item_variant in data:
92
+ if item_variant.get("hierarchyLevel") == "VARIANT":
93
+ for attr in item.get("attributes", []):
94
+ if attr.get("code") == "product_id" and attr.get("identifier") == item_variant.get("variation").get("master").split(":")[-1]:
95
+ # Initialize variants list if it doesn't exist
96
+ if "variants" not in item:
97
+ item["variants"] = []
98
+ item["variants"].append(item_variant)
99
+ _data.append(item)
100
+ break
101
+ elif item.get("hierarchyLevel") == "SIMPLE":
102
+ _data.append(item)
103
+ return _data