ws-bom-robot-app 0.0.63__py3-none-any.whl → 0.0.103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. ws_bom_robot_app/config.py +30 -8
  2. ws_bom_robot_app/cron_manager.py +13 -12
  3. ws_bom_robot_app/llm/agent_context.py +1 -1
  4. ws_bom_robot_app/llm/agent_handler.py +11 -12
  5. ws_bom_robot_app/llm/agent_lcel.py +80 -18
  6. ws_bom_robot_app/llm/api.py +69 -7
  7. ws_bom_robot_app/llm/evaluator.py +319 -0
  8. ws_bom_robot_app/llm/main.py +51 -28
  9. ws_bom_robot_app/llm/models/api.py +40 -6
  10. ws_bom_robot_app/llm/nebuly_handler.py +18 -15
  11. ws_bom_robot_app/llm/providers/llm_manager.py +233 -75
  12. ws_bom_robot_app/llm/tools/tool_builder.py +4 -1
  13. ws_bom_robot_app/llm/tools/tool_manager.py +48 -22
  14. ws_bom_robot_app/llm/utils/chunker.py +6 -1
  15. ws_bom_robot_app/llm/utils/cleanup.py +81 -0
  16. ws_bom_robot_app/llm/utils/cms.py +60 -14
  17. ws_bom_robot_app/llm/utils/download.py +112 -8
  18. ws_bom_robot_app/llm/vector_store/db/base.py +50 -0
  19. ws_bom_robot_app/llm/vector_store/db/chroma.py +28 -8
  20. ws_bom_robot_app/llm/vector_store/db/faiss.py +35 -8
  21. ws_bom_robot_app/llm/vector_store/db/qdrant.py +29 -14
  22. ws_bom_robot_app/llm/vector_store/integration/api.py +216 -0
  23. ws_bom_robot_app/llm/vector_store/integration/azure.py +1 -1
  24. ws_bom_robot_app/llm/vector_store/integration/base.py +58 -15
  25. ws_bom_robot_app/llm/vector_store/integration/confluence.py +33 -5
  26. ws_bom_robot_app/llm/vector_store/integration/dropbox.py +1 -1
  27. ws_bom_robot_app/llm/vector_store/integration/gcs.py +1 -1
  28. ws_bom_robot_app/llm/vector_store/integration/github.py +22 -22
  29. ws_bom_robot_app/llm/vector_store/integration/googledrive.py +46 -17
  30. ws_bom_robot_app/llm/vector_store/integration/jira.py +93 -60
  31. ws_bom_robot_app/llm/vector_store/integration/manager.py +6 -2
  32. ws_bom_robot_app/llm/vector_store/integration/s3.py +1 -1
  33. ws_bom_robot_app/llm/vector_store/integration/sftp.py +1 -1
  34. ws_bom_robot_app/llm/vector_store/integration/sharepoint.py +7 -14
  35. ws_bom_robot_app/llm/vector_store/integration/shopify.py +143 -0
  36. ws_bom_robot_app/llm/vector_store/integration/sitemap.py +6 -1
  37. ws_bom_robot_app/llm/vector_store/integration/slack.py +3 -2
  38. ws_bom_robot_app/llm/vector_store/integration/thron.py +236 -0
  39. ws_bom_robot_app/llm/vector_store/loader/base.py +52 -8
  40. ws_bom_robot_app/llm/vector_store/loader/docling.py +71 -33
  41. ws_bom_robot_app/main.py +148 -146
  42. ws_bom_robot_app/subprocess_runner.py +106 -0
  43. ws_bom_robot_app/task_manager.py +204 -53
  44. ws_bom_robot_app/util.py +6 -0
  45. {ws_bom_robot_app-0.0.63.dist-info → ws_bom_robot_app-0.0.103.dist-info}/METADATA +158 -75
  46. ws_bom_robot_app-0.0.103.dist-info/RECORD +76 -0
  47. ws_bom_robot_app/llm/settings.py +0 -4
  48. ws_bom_robot_app/llm/utils/kb.py +0 -34
  49. ws_bom_robot_app-0.0.63.dist-info/RECORD +0 -72
  50. {ws_bom_robot_app-0.0.63.dist-info → ws_bom_robot_app-0.0.103.dist-info}/WHEEL +0 -0
  51. {ws_bom_robot_app-0.0.63.dist-info → ws_bom_robot_app-0.0.103.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,216 @@
1
+ import asyncio, logging, aiohttp
2
+ from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy
3
+ from langchain_core.documents import Document
4
+ from ws_bom_robot_app.llm.vector_store.loader.base import Loader
5
+ from typing import List, Union, Optional, Dict, Any, Literal
6
+ from pydantic import BaseModel, Field, AliasChoices, field_validator
7
+ import json
8
+ import os
9
+
10
+
11
+ class AuthConfig(BaseModel):
12
+ """
13
+ Configuration for API authentication.
14
+
15
+ Attributes:
16
+ type: Type of authentication (bearer, basic, api_key, custom, none)
17
+ token: Bearer token or API key value
18
+ username: Username for basic auth
19
+ password: Password for basic auth
20
+ header_name: Custom header name for API key
21
+ prefix: Prefix for the auth value (e.g., 'Bearer', 'Token')
22
+ """
23
+ type: Literal["bearer", "basic", "api_key", "custom", "none"] = Field(default="none")
24
+ token: Optional[str] = Field(default=None)
25
+ username: Optional[str] = Field(default=None)
26
+ password: Optional[str] = Field(default=None)
27
+ header_name: Optional[str] = Field(default=None, validation_alias=AliasChoices("headerName", "header_name"))
28
+ prefix: Optional[str] = Field(default=None)
29
+
30
+
31
+ class ApiParams(BaseModel):
32
+ """
33
+ Generic API Integration Parameters.
34
+
35
+ Attributes:
36
+ url: The base URL of the API endpoint
37
+ method: HTTP method (GET, POST, PUT, DELETE, PATCH)
38
+ headers: Custom headers to include in the request
39
+ params: Query parameters for the request
40
+ body: Request body for POST/PUT/PATCH requests
41
+ auth: Authentication configuration
42
+ response_data_path: JSON path to extract data from response (e.g., 'data.items', 'results')
43
+ max_retries: Maximum number of retry attempts for failed requests
44
+ retry_delay: Base delay in seconds between retries (uses exponential backoff)
45
+ timeout: Request timeout in seconds
46
+ """
47
+ url: str = Field(validation_alias=AliasChoices("url", "endpoint"))
48
+ method: Literal["GET", "POST", "PUT", "DELETE", "PATCH"] = Field(default="GET")
49
+ headers: Optional[Dict[str, str]] = Field(default_factory=dict)
50
+ params: Optional[Dict[str, Any]] = Field(default_factory=dict)
51
+ body: Optional[Union[Dict[str, Any], str]] = Field(default=None)
52
+ auth: Optional[AuthConfig] = Field(default_factory=lambda: AuthConfig())
53
+ response_data_path: Optional[str] = Field(default=None, validation_alias=AliasChoices("responseDataPath", "response_data_path"))
54
+ max_retries: int = Field(default=5, validation_alias=AliasChoices("maxRetries", "max_retries"))
55
+ retry_delay: float = Field(default=1.0, validation_alias=AliasChoices("retryDelay", "retry_delay"))
56
+ timeout: int = Field(default=30)
57
+
58
+ @field_validator('auth', mode='before')
59
+ @classmethod
60
+ def parse_auth(cls, v):
61
+ """Parse auth config from dict if needed"""
62
+ if isinstance(v, dict):
63
+ return AuthConfig(**v)
64
+ return v or AuthConfig()
65
+
66
+
67
+ class Api(IntegrationStrategy):
68
+ """
69
+ Generic API Integration that supports:
70
+ - Multiple HTTP methods (GET, POST, PUT, DELETE, PATCH)
71
+ - Various authentication types (Bearer, Basic, API Key, Custom)
72
+ - Custom headers and parameters
73
+ - Automatic retry with exponential backoff
74
+ - Flexible response data extraction
75
+ """
76
+
77
+ def __init__(self, knowledgebase_path: str, data: dict[str, Union[str, int, list]]):
78
+ super().__init__(knowledgebase_path, data)
79
+ self.__data = ApiParams.model_validate(self.data)
80
+
81
+ def working_subdirectory(self) -> str:
82
+ return 'api_integration'
83
+
84
+ async def run(self) -> None:
85
+ """Fetch data from the API and save to JSON file"""
86
+ _data = await self.__fetch_data()
87
+ json_file_path = os.path.join(self.working_directory, 'api_data.json')
88
+ with open(json_file_path, 'w', encoding='utf-8') as f:
89
+ json.dump(_data, f, ensure_ascii=False, indent=2)
90
+ logging.info(f"Saved {len(_data) if isinstance(_data, list) else 1} items to {json_file_path}")
91
+
92
+ async def load(self) -> list[Document]:
93
+ """Load data from API and convert to documents"""
94
+ await self.run()
95
+ await asyncio.sleep(1)
96
+ return await Loader(self.working_directory).load()
97
+
98
+ def __prepare_headers(self) -> Dict[str, str]:
99
+ """Prepare request headers with authentication"""
100
+ headers = self.__data.headers.copy() if self.__data.headers else {}
101
+
102
+ # Add Content-Type if not present
103
+ if 'Content-Type' not in headers and self.__data.method in ["POST", "PUT", "PATCH"]:
104
+ headers['Content-Type'] = 'application/json'
105
+
106
+ # Add authentication
107
+ auth = self.__data.auth
108
+ if auth.type == "bearer":
109
+ prefix = auth.prefix or "Bearer"
110
+ headers['Authorization'] = f"{prefix} {auth.token}"
111
+ elif auth.type == "basic":
112
+ import base64
113
+ credentials = f"{auth.username}:{auth.password}"
114
+ encoded = base64.b64encode(credentials.encode()).decode()
115
+ headers['Authorization'] = f"Basic {encoded}"
116
+ elif auth.type == "api_key" and auth.header_name:
117
+ prefix = f"{auth.prefix} " if auth.prefix else ""
118
+ headers[auth.header_name] = f"{prefix}{auth.token}"
119
+
120
+ return headers
121
+
122
+ def __get_nested_value(self, data: Any, path: Optional[str]) -> Any:
123
+ """Extract nested value from data using dot notation path"""
124
+ if not path:
125
+ return data
126
+
127
+ keys = path.split('.')
128
+ current = data
129
+ for key in keys:
130
+ if isinstance(current, dict):
131
+ current = current.get(key)
132
+ elif isinstance(current, list) and key.isdigit():
133
+ current = current[int(key)]
134
+ else:
135
+ return None
136
+
137
+ if current is None:
138
+ return None
139
+
140
+ return current
141
+
142
+ async def __make_request(
143
+ self,
144
+ url: str,
145
+ headers: Dict[str, str],
146
+ params: Optional[Dict[str, Any]] = None
147
+ ) -> Dict[str, Any]:
148
+ """Make HTTP request with retry logic"""
149
+ retry_count = 0
150
+
151
+ while retry_count <= self.__data.max_retries:
152
+ try:
153
+ timeout = aiohttp.ClientTimeout(total=self.__data.timeout)
154
+
155
+ async with aiohttp.ClientSession(timeout=timeout) as session:
156
+ request_kwargs = {
157
+ "headers": headers,
158
+ "params": params or self.__data.params
159
+ }
160
+
161
+ # Add body for POST/PUT/PATCH
162
+ if self.__data.method in ["POST", "PUT", "PATCH"] and self.__data.body:
163
+ if isinstance(self.__data.body, dict):
164
+ request_kwargs["json"] = self.__data.body
165
+ else:
166
+ request_kwargs["data"] = self.__data.body
167
+
168
+ async with session.request(
169
+ self.__data.method,
170
+ url,
171
+ **request_kwargs
172
+ ) as response:
173
+ # Check response status
174
+ if response.status == 429: # Rate limit
175
+ retry_count += 1
176
+ if retry_count > self.__data.max_retries:
177
+ raise Exception("Rate limit exceeded. Maximum retries reached.")
178
+
179
+ wait_time = self.__data.retry_delay * (2 ** retry_count)
180
+ logging.warning(f"Rate limited. Waiting {wait_time}s (Attempt {retry_count}/{self.__data.max_retries})")
181
+ await asyncio.sleep(wait_time)
182
+ continue
183
+
184
+ response.raise_for_status()
185
+
186
+ # Parse response
187
+ try:
188
+ data = await response.json()
189
+ return data
190
+ except aiohttp.ContentTypeError:
191
+ text = await response.text()
192
+ logging.warning(f"Non-JSON response received: {text[:200]}")
193
+ return {"text": text}
194
+
195
+ except aiohttp.ClientError as e:
196
+ retry_count += 1
197
+ if retry_count > self.__data.max_retries:
198
+ raise Exception(f"Request failed after {self.__data.max_retries} retries: {e}")
199
+
200
+ wait_time = self.__data.retry_delay * (2 ** retry_count)
201
+ logging.warning(f"Request error: {e}. Retrying in {wait_time}s...")
202
+ await asyncio.sleep(wait_time)
203
+ continue
204
+
205
+ raise Exception("Maximum retries exceeded")
206
+
207
+ async def __fetch_data(self) -> Any:
208
+ """Fetch data from API"""
209
+ headers = self.__prepare_headers()
210
+ response = await self.__make_request(self.__data.url, headers)
211
+
212
+ # Extract data from response using path if specified
213
+ data = self.__get_nested_value(response, self.__data.response_data_path)
214
+ result = data if data is not None else response
215
+
216
+ return result
@@ -1,6 +1,6 @@
1
1
  import asyncio
2
2
  from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
3
- from unstructured_ingest.v2.processes.connectors.fsspec.azure import AzureConnectionConfig, AzureAccessConfig, AzureDownloaderConfig, AzureIndexerConfig
3
+ from unstructured_ingest.processes.connectors.fsspec.azure import AzureConnectionConfig, AzureAccessConfig, AzureDownloaderConfig, AzureIndexerConfig
4
4
  from langchain_core.documents import Document
5
5
  from ws_bom_robot_app.llm.vector_store.loader.base import Loader
6
6
  from typing import Union, Optional
@@ -1,10 +1,17 @@
1
- import os
1
+ import os, copy
2
+ from random import random
2
3
  from langchain_core.documents import Document
3
4
  from abc import ABC, abstractmethod
4
- from unstructured_ingest.v2.interfaces import ProcessorConfig
5
- from unstructured_ingest.v2.pipeline.pipeline import Pipeline, PartitionerConfig, FiltererConfig
5
+ from unstructured_ingest.interfaces import ProcessorConfig
6
+ from unstructured_ingest.pipeline.pipeline import (
7
+ Pipeline,
8
+ PartitionerConfig,
9
+ FiltererConfig
10
+ )
11
+ from unstructured_ingest.processes.connector_registry import source_registry
6
12
  from typing import Union
7
13
  from ws_bom_robot_app.llm.utils.secrets import Secrets
14
+ from ws_bom_robot_app.config import config
8
15
 
9
16
  class IntegrationStrategy(ABC):
10
17
  @classmethod
@@ -32,23 +39,59 @@ class IntegrationStrategy(ABC):
32
39
  pass
33
40
 
34
41
  class UnstructuredIngest():
42
+ _PIPELINE: Pipeline = None
35
43
  def __init__(self, working_directory: str):
36
44
  self.working_directory = working_directory
37
- def pipeline(self,indexer,downloader,connection,extension: list[str] = None) -> Pipeline:
38
- return Pipeline.from_configs(
39
- context=ProcessorConfig(
45
+ self._runtime_options = config.runtime_options()
46
+ def pipeline(self,indexer_config,downloader_config,connection_config,extension: list[str] = None) -> Pipeline:
47
+ def _default_processor_config() -> ProcessorConfig:
48
+ return ProcessorConfig(
40
49
  reprocess=False,
41
50
  verbose=False,
42
51
  tqdm=False,
43
- num_processes=2,
52
+ num_processes=config.robot_ingest_max_threads, #safe choice to 1, avoid potential process-related issues with Docker
53
+ disable_parallelism=False,
44
54
  preserve_downloads=True,
45
55
  download_only=True,
46
- raise_on_error=False
47
- ),
48
- indexer_config=indexer,
49
- downloader_config=downloader,
50
- source_connection_config=connection,
51
- partitioner_config=PartitionerConfig(),
52
- filterer_config=FiltererConfig(file_glob=[f"**/*{ext}" for ext in extension] if extension else None)
53
- )
56
+ raise_on_error=False,
57
+ iter_delete=True,
58
+ delete_cache=False #already managed by the generator task
59
+ )
60
+ def _init_pipeline() -> Pipeline:
61
+ return Pipeline.from_configs(
62
+ context=_default_processor_config(),
63
+ indexer_config=indexer_config,
64
+ downloader_config=downloader_config,
65
+ source_connection_config=connection_config,
66
+ partitioner_config=PartitionerConfig(),
67
+ filterer_config=FiltererConfig(file_glob=[f"**/*{ext}" for ext in extension] if extension else None)
68
+ )
69
+ def _instance_pipeline() -> Pipeline:
70
+ from unstructured_ingest.pipeline.steps.index import IndexStep
71
+ from unstructured_ingest.pipeline.steps.download import DownloadStep
72
+ from unstructured_ingest.pipeline.steps.filter import Filterer, FilterStep
73
+ _context = _default_processor_config()
74
+ source_entry = {
75
+ k: v
76
+ for k, v in source_registry.items()
77
+ if type(indexer_config) is v.indexer_config
78
+ and type(downloader_config) is v.downloader_config
79
+ and type(connection_config) is v.connection_config
80
+ }
81
+ source = list(source_entry.values())[0]
82
+ _pipeline = copy.deepcopy(UnstructuredIngest._PIPELINE)
83
+ _pipeline.context = _context
84
+ _pipeline.context.work_dir = f"{self.working_directory}_unstructured" # use sibling directory, cleaned up by the generator task
85
+ _pipeline.indexer_step = IndexStep(process=source.indexer(index_config=indexer_config, connection_config=connection_config), context=_context)
86
+ _pipeline.downloader_step = DownloadStep(process=source.downloader(download_config=downloader_config, connection_config=connection_config), context=_context)
87
+ _pipeline.filter_step = FilterStep(process=Filterer(config=FiltererConfig(file_glob=[f"**/*{ext}" for ext in extension] if extension else None)), context=_context) if extension else None
88
+ return _pipeline
54
89
 
90
+ if not UnstructuredIngest._PIPELINE:
91
+ import random
92
+ import time
93
+ time.sleep(random.uniform(0.2, 1))
94
+ if not UnstructuredIngest._PIPELINE:
95
+ UnstructuredIngest._PIPELINE = _init_pipeline()
96
+
97
+ return _instance_pipeline()
@@ -1,9 +1,10 @@
1
1
  import asyncio
2
2
  from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
3
- from unstructured_ingest.v2.processes.connectors.confluence import ConfluenceIndexerConfig, ConfluenceDownloaderConfig, ConfluenceConnectionConfig, ConfluenceAccessConfig
3
+ from unstructured_ingest.processes.connectors.confluence import ConfluenceIndexerConfig, ConfluenceIndexer, ConfluenceDownloaderConfig, ConfluenceConnectionConfig, ConfluenceAccessConfig
4
+ from unstructured_ingest.pipeline.pipeline import Pipeline
4
5
  from langchain_core.documents import Document
5
6
  from ws_bom_robot_app.llm.vector_store.loader.base import Loader
6
- from typing import Optional, Union
7
+ from typing import List, Optional, Union
7
8
  from pydantic import BaseModel, Field, AliasChoices
8
9
 
9
10
  class ConfluenceParams(BaseModel):
@@ -16,6 +17,7 @@ class ConfluenceParams(BaseModel):
16
17
  password: Confluence password or Cloud API token, if filled, set the access_token to None and vice versa.
17
18
  access_token (str): The personal access token for authenticating with Confluence, e.g., 'AT....'
18
19
  spaces (list[str]): A list of Confluence spaces to interact with, e.g., ['SPACE1', 'SPACE2'].
20
+ max_num_of_docs_from_each_space (int): The maximum number of documents to fetch from each space. Defaults to 500, with a maximum limit of 5000.
19
21
  extension (list[str], optional): A list of file extensions to filter by. Defaults to None, e.g., ['.pdf', '.docx'].
20
22
  """
21
23
  url: str
@@ -23,6 +25,7 @@ class ConfluenceParams(BaseModel):
23
25
  password: Optional[str] = None
24
26
  access_token: Optional[str] = Field(None, validation_alias=AliasChoices("accessToken","access_token"))
25
27
  spaces: list[str] = []
28
+ max_num_of_docs_from_each_space: int = Field(default=500, ge=1, le=5000,validation_alias=AliasChoices("maxNumOfDocsFromEachSpace","max_num_of_docs_from_each_space"))
26
29
  extension: list[str] = Field(default=None)
27
30
  class Confluence(IntegrationStrategy):
28
31
  def __init__(self, knowledgebase_path: str, data: dict[str, Union[str,int,list]]):
@@ -33,7 +36,8 @@ class Confluence(IntegrationStrategy):
33
36
  return 'confluence'
34
37
  def run(self) -> None:
35
38
  indexer_config = ConfluenceIndexerConfig(
36
- spaces=self.__data.spaces
39
+ spaces=self.__data.spaces,
40
+ max_num_of_docs_from_each_space=self.__data.max_num_of_docs_from_each_space
37
41
  )
38
42
  downloader_config = ConfluenceDownloaderConfig(
39
43
  download_dir=self.working_directory
@@ -43,13 +47,37 @@ class Confluence(IntegrationStrategy):
43
47
  url=self.__data.url,
44
48
  username=self.__data.username
45
49
  )
46
- self.__unstructured_ingest.pipeline(
50
+ pipeline: Pipeline = self.__unstructured_ingest.pipeline(
47
51
  indexer_config,
48
52
  downloader_config,
49
53
  connection_config,
50
- extension=self.__data.extension).run()
54
+ extension=self.__data.extension
55
+ )
56
+ pipeline.indexer_step.process = CustomConfluenceIndexer(**vars(pipeline.indexer_step.process))
57
+ pipeline.run()
51
58
  async def load(self) -> list[Document]:
52
59
  await asyncio.to_thread(self.run)
53
60
  await asyncio.sleep(1)
54
61
  return await Loader(self.working_directory).load()
55
62
 
63
+ class CustomConfluenceIndexer(ConfluenceIndexer):
64
+ def __init__(self, **kwargs):
65
+ for key, value in kwargs.items():
66
+ try:
67
+ setattr(super(), key, value)
68
+ except AttributeError:
69
+ setattr(self, key, value)
70
+ def _get_docs_ids_within_one_space(self, space_key: str) -> List[dict]:
71
+ with self.connection_config.get_client() as client:
72
+ pages = client.get_all_pages_from_space(
73
+ space=space_key,
74
+ start=0,
75
+ limit=self.index_config.max_num_of_docs_from_each_space, #explicitly limit the number of pages fetched (omitted in unstructured-ingest)
76
+ expand=None,
77
+ content_type="page", # blogpost and comment types not currently supported
78
+ status=None,
79
+ )
80
+ limited_pages = pages[: self.index_config.max_num_of_docs_from_each_space]
81
+ doc_ids = [{"space_id": space_key, "doc_id": page["id"]} for page in limited_pages]
82
+ return doc_ids
83
+
@@ -1,6 +1,6 @@
1
1
  import asyncio
2
2
  from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
3
- from unstructured_ingest.v2.processes.connectors.fsspec.dropbox import DropboxConnectionConfig, DropboxAccessConfig, DropboxDownloaderConfig, DropboxIndexerConfig
3
+ from unstructured_ingest.processes.connectors.fsspec.dropbox import DropboxConnectionConfig, DropboxAccessConfig, DropboxDownloaderConfig, DropboxIndexerConfig
4
4
  from langchain_core.documents import Document
5
5
  from ws_bom_robot_app.llm.vector_store.loader.base import Loader
6
6
  from typing import Union
@@ -1,6 +1,6 @@
1
1
  import asyncio
2
2
  from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
3
- from unstructured_ingest.v2.processes.connectors.fsspec.gcs import GcsIndexerConfig, GcsConnectionConfig, GcsAccessConfig, GcsDownloaderConfig
3
+ from unstructured_ingest.processes.connectors.fsspec.gcs import GcsIndexerConfig, GcsConnectionConfig, GcsAccessConfig, GcsDownloaderConfig
4
4
  from langchain_core.documents import Document
5
5
  from ws_bom_robot_app.llm.vector_store.loader.base import Loader
6
6
  from typing import Union, Optional
@@ -1,10 +1,12 @@
1
1
  import asyncio
2
2
  from typing import Optional, Union
3
- from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy
4
- from unstructured_ingest.interfaces import ProcessorConfig, ReadConfig
5
- from unstructured_ingest.connector.git import GitAccessConfig
6
- from unstructured_ingest.connector.github import SimpleGitHubConfig
7
- from unstructured_ingest.runner import GithubRunner
3
+ from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
4
+ from unstructured_ingest.processes.connectors.github import (
5
+ GithubIndexerConfig,
6
+ GithubDownloaderConfig,
7
+ GithubConnectionConfig,
8
+ GithubAccessConfig
9
+ )
8
10
  from langchain_core.documents import Document
9
11
  from ws_bom_robot_app.llm.vector_store.loader.base import Loader
10
12
  from pydantic import BaseModel, Field, AliasChoices
@@ -27,28 +29,26 @@ class Github(IntegrationStrategy):
27
29
  def __init__(self, knowledgebase_path: str, data: dict[str, Union[str,int,list]]):
28
30
  super().__init__(knowledgebase_path, data)
29
31
  self.__data = GithubParams.model_validate(self.data)
32
+ self.__unstructured_ingest = UnstructuredIngest(self.working_directory)
30
33
  def working_subdirectory(self) -> str:
31
34
  return 'github'
32
35
  def run(self) -> None:
33
- access_config = GitAccessConfig(
34
- access_token=self.__data.access_token
35
- )
36
- file_ext = self.__data.file_ext or None
37
- file_glob = [f"**/*{ext}" for ext in file_ext] if file_ext else None
38
- config = SimpleGitHubConfig(
39
- url = self.__data.repo,
40
- access_config=access_config,
36
+ indexer_config = GithubIndexerConfig(
41
37
  branch=self.__data.branch,
42
- file_glob=file_glob
38
+ recursive=True
39
+ )
40
+ downloader_config = GithubDownloaderConfig(
41
+ download_dir=self.working_directory
42
+ )
43
+ connection_config = GithubConnectionConfig(
44
+ access_config=GithubAccessConfig(access_token=self.__data.access_token),
45
+ url=self.__data.repo
43
46
  )
44
- runner = GithubRunner(
45
- connector_config=config,
46
- processor_config=ProcessorConfig(reprocess=False,verbose=False,num_processes=2,raise_on_error=False),
47
- read_config=ReadConfig(download_dir=self.working_directory,re_download=True,preserve_downloads=True,download_only=True),
48
- partition_config=None,
49
- retry_strategy_config=None
50
- )
51
- runner.run()
47
+ self.__unstructured_ingest.pipeline(
48
+ indexer_config,
49
+ downloader_config,
50
+ connection_config,
51
+ extension=self.__data.file_ext).run()
52
52
  async def load(self) -> list[Document]:
53
53
  await asyncio.to_thread(self.run)
54
54
  await asyncio.sleep(1)
@@ -1,10 +1,38 @@
1
1
  import asyncio
2
+ import json
3
+ from pathlib import Path
2
4
  from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy, UnstructuredIngest
3
- from unstructured_ingest.v2.processes.connectors.google_drive import GoogleDriveConnectionConfig, GoogleDriveDownloaderConfig, GoogleDriveIndexerConfig, GoogleDriveAccessConfig
5
+ from unstructured_ingest.processes.connectors.google_drive import GoogleDriveConnectionConfig, GoogleDriveDownloaderConfig, GoogleDriveIndexerConfig, GoogleDriveAccessConfig
6
+ from unstructured_ingest.data_types.file_data import FileData as OriginalFileData, BatchFileData as OriginalBatchFileData
4
7
  from langchain_core.documents import Document
5
8
  from ws_bom_robot_app.llm.vector_store.loader.base import Loader
6
9
  from typing import Union
7
10
  from pydantic import BaseModel, Field, AliasChoices
11
+
12
+ # UTF-8 safe FileData classes
13
+ class FileData(OriginalFileData):
14
+ @classmethod
15
+ def from_file(cls, path: str):
16
+ path = Path(path).resolve()
17
+ if not path.exists() or not path.is_file():
18
+ raise ValueError(f"file path not valid: {path}")
19
+ for encoding in ['utf-8', 'cp1252', 'iso-8859-1', 'latin-1']:
20
+ try:
21
+ with open(str(path), "r", encoding=encoding) as f:
22
+ return cls.model_validate(json.load(f))
23
+ except (UnicodeDecodeError, UnicodeError):
24
+ continue
25
+ raise ValueError(f"Could not decode file {path} with any supported encoding")
26
+
27
+ def to_file(self, path: str) -> None:
28
+ path = Path(path).resolve()
29
+ path.parent.mkdir(parents=True, exist_ok=True)
30
+ with open(str(path), "w", encoding="utf-8") as f:
31
+ json.dump(self.model_dump(), f, indent=2, ensure_ascii=False)
32
+
33
+ class BatchFileData(OriginalBatchFileData, FileData):
34
+ pass
35
+
8
36
  class GoogleDriveParams(BaseModel):
9
37
  """
10
38
  GoogleDriveParams is a model that holds parameters for Google Drive integration.
@@ -42,26 +70,27 @@ class GoogleDrive(IntegrationStrategy):
42
70
  super().__init__(knowledgebase_path, data)
43
71
  self.__data = GoogleDriveParams.model_validate(self.data)
44
72
  self.__unstructured_ingest = UnstructuredIngest(self.working_directory)
73
+ self._apply_encoding_fix()
74
+
75
+ def _apply_encoding_fix(self):
76
+ """Replace FileData classes with UTF-8 safe versions"""
77
+ import unstructured_ingest.data_types.file_data as fd
78
+ fd.FileData = FileData
79
+ fd.BatchFileData = BatchFileData
80
+ fd.file_data_from_file = lambda path: BatchFileData.from_file(path) if path else FileData.from_file(path)
81
+
45
82
  def working_subdirectory(self) -> str:
46
83
  return 'googledrive'
84
+
47
85
  def run(self) -> None:
48
- indexer_config = GoogleDriveIndexerConfig(
49
- extensions=self.__data.extensions,
50
- recursive=self.__data.recursive
51
- )
52
- downloader_config = GoogleDriveDownloaderConfig(
53
- download_dir=self.working_directory
54
- )
55
- connection_config = GoogleDriveConnectionConfig(
56
- access_config=GoogleDriveAccessConfig(
57
- service_account_key=self.__data.service_account_key
58
- ),
59
- drive_id=self.__data.drive_id
60
- )
61
86
  self.__unstructured_ingest.pipeline(
62
- indexer_config,
63
- downloader_config,
64
- connection_config).run()
87
+ GoogleDriveIndexerConfig(extensions=self.__data.extensions, recursive=self.__data.recursive),
88
+ GoogleDriveDownloaderConfig(download_dir=self.working_directory),
89
+ GoogleDriveConnectionConfig(
90
+ access_config=GoogleDriveAccessConfig(service_account_key=self.__data.service_account_key),
91
+ drive_id=self.__data.drive_id
92
+ )
93
+ ).run()
65
94
  async def load(self) -> list[Document]:
66
95
  await asyncio.to_thread(self.run)
67
96
  await asyncio.sleep(1)