datasourcelib 0.1.2__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/PKG-INFO +1 -1
  2. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/setup.py +1 -1
  3. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/indexes/azure_search_index.py +102 -1
  4. datasourcelib-0.1.4/src/datasourcelib/strategies/__init__.py +14 -0
  5. datasourcelib-0.1.4/src/datasourcelib/strategies/daily_load.py +22 -0
  6. datasourcelib-0.1.4/src/datasourcelib/strategies/full_load.py +38 -0
  7. datasourcelib-0.1.4/src/datasourcelib/strategies/incremental_load.py +27 -0
  8. datasourcelib-0.1.4/src/datasourcelib/strategies/ondemand_load.py +19 -0
  9. datasourcelib-0.1.4/src/datasourcelib/strategies/timerange_load.py +24 -0
  10. datasourcelib-0.1.4/src/datasourcelib/utils/__init__.py +12 -0
  11. datasourcelib-0.1.4/src/datasourcelib/utils/byte_reader.py +256 -0
  12. datasourcelib-0.1.4/src/datasourcelib/utils/exceptions.py +9 -0
  13. datasourcelib-0.1.4/src/datasourcelib/utils/file_reader.py +217 -0
  14. datasourcelib-0.1.4/src/datasourcelib/utils/logger.py +12 -0
  15. datasourcelib-0.1.4/src/datasourcelib/utils/validators.py +7 -0
  16. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib.egg-info/PKG-INFO +1 -1
  17. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib.egg-info/SOURCES.txt +12 -2
  18. datasourcelib-0.1.2/src/datasourcelib/indexes/azure_search_index_only.py +0 -162
  19. datasourcelib-0.1.2/src/datasourcelib/indexes/azure_search_index_vector.py +0 -286
  20. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/LICENSE +0 -0
  21. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/README.md +0 -0
  22. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/pyproject.toml +0 -0
  23. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/setup.cfg +0 -0
  24. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/__init__.py +0 -0
  25. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/core/__init__.py +0 -0
  26. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/core/sync_base.py +0 -0
  27. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/core/sync_manager.py +0 -0
  28. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/core/sync_types.py +0 -0
  29. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/datasources/__init__.py +0 -0
  30. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/datasources/azure_devops_source.py +0 -0
  31. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/datasources/blob_source.py +0 -0
  32. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/datasources/datasource_base.py +0 -0
  33. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/datasources/datasource_types.py +0 -0
  34. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/datasources/sharepoint_source - Copy.py +0 -0
  35. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/datasources/sharepoint_source.py +0 -0
  36. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/datasources/sql_source.py +0 -0
  37. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib/indexes/__init__.py +0 -0
  38. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib.egg-info/dependency_links.txt +0 -0
  39. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib.egg-info/requires.txt +0 -0
  40. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/src/datasourcelib.egg-info/top_level.txt +0 -0
  41. {datasourcelib-0.1.2 → datasourcelib-0.1.4}/tests/test_sync_strategies.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: datasourcelib
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: Data source sync strategies for vector DBs
5
5
  Home-page: https://github.com/jaiprakash0217/datasourcelib
6
6
  Author: Jai Prakash
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="datasourcelib",
5
- version="0.1.2",
5
+ version="0.1.4",
6
6
  packages=find_packages(where="src"),
7
7
  package_dir={"": "src"},
8
8
  install_requires=[
@@ -110,7 +110,7 @@ class AzureSearchIndexer:
110
110
  logger.exception(f"Failed to get embeddings for text: {text[:100]}...")
111
111
  raise
112
112
 
113
- def _build_vector_search_config(self):
113
+ def _build_vector_search_config_old(self):
114
114
  AzureKeyCredential, SearchClient, SearchIndexClient, AzureOpenAI, SearchIndex, SearchField, SearchFieldDataType, SimpleField, SearchableField, VectorSearch, VectorSearchProfile, HnswAlgorithmConfiguration, SemanticSearch, SemanticField, SemanticConfiguration, SemanticPrioritizedFields = self._ensure_sdk()
115
115
  vector_config = self.config.get("vector_config", {})
116
116
  dimensions = vector_config.get("dimensions", 1536)
@@ -121,6 +121,107 @@ class AzureSearchIndexer:
121
121
  )
122
122
 
123
123
  return vector_search, dimensions
124
+
125
+ def _build_vector_search_config(self):
126
+ AzureKeyCredential, SearchClient, SearchIndexClient, AzureOpenAI, SearchIndex, SearchField, SearchFieldDataType, SimpleField, SearchableField, VectorSearch, VectorSearchProfile, HnswAlgorithmConfiguration, SemanticSearch, SemanticField, SemanticConfiguration, SemanticPrioritizedFields = self._ensure_sdk()
127
+
128
+ vector_config = self.config.get("vector_config", {})
129
+ dimensions = vector_config.get("dimensions", 1536)
130
+ algorithm = vector_config.get("algorithm", "hnsw").lower()
131
+
132
+ # Build algorithm configuration (SDK model if available)
133
+ alg_cfg = HnswAlgorithmConfiguration(name="algorithms-config-1")
134
+
135
+ # Build vectorizer settings using Azure OpenAI config from vector_db_config
136
+ deployment = self.config.get("embedding_deployment")
137
+ endpoint = self.config.get("embedding_endpoint")
138
+ api_key = self.config.get("embedding_key")
139
+ # modelName required for API version 2025-09-01 — prefer explicit embedding_model, fall back to deployment
140
+ model_name = self.config.get("embedding_model") or deployment
141
+ content_field = self.config.get("content_field", "content")
142
+ vector_field = self.config.get("vector_field", "contentVector")
143
+
144
+ if not model_name:
145
+ raise RuntimeError("Vectorizer configuration requires 'embedding_model' or 'embedding_deployment' in vector_db_config")
146
+
147
+ # Define vectorizer with explicit name and required azureOpenAIParameters including modelName
148
+ vectorizer_name = "azure-openai-vectorizer"
149
+ vectorizer = {
150
+ "name": vectorizer_name,
151
+ "kind": "azureOpenAI",
152
+ "azureOpenAIParameters": {
153
+ "resourceUri": endpoint.rstrip('/') if endpoint else None,
154
+ # include both modelName (required) and deploymentId (if provided)
155
+ "modelName": model_name,
156
+ **({"deploymentId": deployment} if deployment else {}),
157
+ "apiKey": api_key
158
+ },
159
+ "options": {
160
+ "fieldMapping": [
161
+ {
162
+ "sourceContext": f"/document/{content_field}",
163
+ "outputs": [
164
+ {
165
+ "targetContext": f"/document/{vector_field}",
166
+ "targetDimensions": dimensions
167
+ }
168
+ ]
169
+ }
170
+ ]
171
+ }
172
+ }
173
+
174
+ profile_name = "vector-profile-1"
175
+ try:
176
+ # Create profile with vectorizer reference (SDK may expect vectorizer_name or vectorizer depending on version)
177
+ try:
178
+ profile = VectorSearchProfile(
179
+ name=profile_name,
180
+ algorithm_configuration_name="algorithms-config-1",
181
+ vectorizer_name=vectorizer_name
182
+ )
183
+ except TypeError:
184
+ # fallback if SDK constructor uses different parameter names
185
+ profile = VectorSearchProfile(name=profile_name, algorithm_configuration_name="algorithms-config-1")
186
+ try:
187
+ setattr(profile, "vectorizer_name", vectorizer_name)
188
+ except Exception:
189
+ pass
190
+
191
+ try:
192
+ # Construct full vector search config with both profile and vectorizer
193
+ vector_search = VectorSearch(
194
+ profiles=[profile],
195
+ algorithms=[alg_cfg],
196
+ vectorizers=[vectorizer]
197
+ )
198
+ except Exception:
199
+ # Fallback to dict if SDK constructor differs
200
+ vector_search = {
201
+ "profiles": [{
202
+ "name": profile_name,
203
+ "algorithmConfigurationName": "algorithms-config-1",
204
+ "vectorizerName": vectorizer_name
205
+ }],
206
+ "algorithms": [{"name": "algorithms-config-1"}],
207
+ "vectorizers": [vectorizer]
208
+ }
209
+ except Exception:
210
+ # Full dict fallback
211
+ vector_search = {
212
+ "profiles": [{
213
+ "name": profile_name,
214
+ "algorithmConfigurationName": "algorithms-config-1",
215
+ "vectorizerName": vectorizer_name
216
+ }],
217
+ "algorithms": [{"name": "algorithms-config-1"}],
218
+ "vectorizers": [vectorizer]
219
+ }
220
+
221
+ logger.info("Built vector_search config (dimensions=%s, model=%s, vectorizer=%s)",
222
+ dimensions, model_name, vectorizer_name)
223
+ return vector_search, dimensions
224
+
124
225
 
125
226
  def _build_semantic_settings(self):
126
227
  """
@@ -0,0 +1,14 @@
1
+ from .daily_load import DailyLoadStrategy
2
+ from .full_load import FullLoadStrategy
3
+ from .incremental_load import IncrementalLoadStrategy
4
+ from .ondemand_load import OnDemandLoadStrategy
5
+ from .timerange_load import TimeRangeLoadStrategy
6
+
7
+
8
+ __all__ = [
9
+ "DailyLoadStrategy",
10
+ "FullLoadStrategy",
11
+ "IncrementalLoadStrategy",
12
+ "OnDemandLoadStrategy",
13
+ "TimeRangeLoadStrategy"
14
+ ]
@@ -0,0 +1,22 @@
1
+ from datasourcelib.core.sync_base import SyncBase
2
+ from datasourcelib.utils.logger import get_logger
3
+ from datetime import datetime, timedelta
4
+
5
+ logger = get_logger(__name__)
6
+
7
+ class DailyLoadStrategy(SyncBase):
8
+ """Daily scheduled load (wraps incremental)."""
9
+
10
+ def validate(self) -> bool:
11
+ return True
12
+
13
+ def sync(self, run_date: str = None, **kwargs) -> bool:
14
+ try:
15
+ run_date = run_date or datetime.utcnow().date().isoformat()
16
+ logger.info("Starting daily load for %s", run_date)
17
+ # Typically call incremental with last_sync = previous day midnight
18
+ # TODO implement scheduling integration externally; the strategy here is idempotent
19
+ return True
20
+ except Exception:
21
+ logger.exception("DailyLoadStrategy.sync failed")
22
+ return False
@@ -0,0 +1,38 @@
1
+ from datasourcelib.core.sync_base import SyncBase
2
+ from datasourcelib.utils.logger import get_logger
3
+ from datasourcelib.indexes.azure_search_index import AzureSearchIndexer
4
+ logger = get_logger(__name__)
5
+
6
+ class FullLoadStrategy(SyncBase):
7
+ """Full load: replace or reload entire source into vector DB."""
8
+
9
+ def validate(self) -> bool:
10
+ # Minimal validation: required keys exist
11
+ dsok = self.data_source.validate_config()
12
+ return dsok
13
+
14
+ def sync(self, **kwargs) -> bool:
15
+ try:
16
+ logger.info("Running full data load")
17
+ data = self.data_source.fetch_data(**kwargs)
18
+ for key, value in kwargs.items():
19
+ print(f"{key} = {value}")
20
+ # Implement real extract -> transform -> load to vector DB
21
+ # Example pseudocode:
22
+ # vector_client.upsert_batch(self.vector_db_config, rows)
23
+ # New: use AzureSearchIndexer to create index and upload documents if requested
24
+ if isinstance(data, list) and data:
25
+ indexer = AzureSearchIndexer(self.vector_db_config or {})
26
+ if not indexer.validate_config():
27
+ logger.error("Vector DB config invalid for Azure Search indexer")
28
+ return False
29
+ ok = indexer.index(data)
30
+ if not ok:
31
+ logger.error("Indexing data to Azure Search failed")
32
+ return False
33
+
34
+ logger.info("Full data load finished successfully")
35
+ return True
36
+ except Exception:
37
+ logger.exception("FullLoadStrategy.sync failed")
38
+ return False
@@ -0,0 +1,27 @@
1
+ from datetime import datetime
2
+ from datasourcelib.core.sync_base import SyncBase
3
+ from datasourcelib.utils.logger import get_logger
4
+
5
+ logger = get_logger(__name__)
6
+
7
+ class IncrementalLoadStrategy(SyncBase):
8
+ """Incremental load using last_sync timestamp or cursor."""
9
+
10
+ def validate(self) -> bool:
11
+ # require source to support incremental field or cursor
12
+ if "cursor_field" not in self.source_config and "last_sync" not in self.source_config:
13
+ logger.error("IncrementalLoadStrategy missing cursor_field or last_sync in source_config")
14
+ return False
15
+ return True
16
+
17
+ def sync(self, last_sync: str = None, **kwargs) -> bool:
18
+ try:
19
+ last = last_sync or self.source_config.get("last_sync")
20
+ logger.info("Running incremental load since %s", last)
21
+ # TODO: fetch delta rows since 'last' and upsert to vector DB
22
+ # After successful run store new last_sync timestamp
23
+ logger.info("Incremental load completed")
24
+ return True
25
+ except Exception:
26
+ logger.exception("IncrementalLoadStrategy.sync failed")
27
+ return False
@@ -0,0 +1,19 @@
1
+ from datasourcelib.core.sync_base import SyncBase
2
+ from datasourcelib.utils.logger import get_logger
3
+
4
+ logger = get_logger(__name__)
5
+
6
+ class OnDemandLoadStrategy(SyncBase):
7
+ """On demand load triggered by user request (arbitrary params)."""
8
+
9
+ def validate(self) -> bool:
10
+ return True
11
+
12
+ def sync(self, **kwargs) -> bool:
13
+ try:
14
+ logger.info("On-demand sync invoked with params: %s", kwargs)
15
+ # Use kwargs to drive partial loads, filters, ids etc.
16
+ return True
17
+ except Exception:
18
+ logger.exception("OnDemandLoadStrategy.sync failed")
19
+ return False
@@ -0,0 +1,24 @@
1
+ from datetime import datetime
2
+ from datasourcelib.core.sync_base import SyncBase
3
+ from datasourcelib.utils.logger import get_logger
4
+
5
+ logger = get_logger(__name__)
6
+
7
+ class TimeRangeLoadStrategy(SyncBase):
8
+ """Load records between a start and end timestamp."""
9
+
10
+ def validate(self) -> bool:
11
+ # rely on params at runtime; minimal validation OK
12
+ return True
13
+
14
+ def sync(self, start: str = None, end: str = None, **kwargs) -> bool:
15
+ try:
16
+ if not start or not end:
17
+ logger.error("TimeRangeLoadStrategy requires 'start' and 'end'")
18
+ return False
19
+ logger.info("Time range load between %s and %s", start, end)
20
+ # TODO: query source for timeframe and upsert
21
+ return True
22
+ except Exception:
23
+ logger.exception("TimeRangeLoadStrategy.sync failed")
24
+ return False
@@ -0,0 +1,12 @@
1
+ from .byte_reader import ByteReader
2
+ from .exceptions import DatasourceLibError, SyncStrategyNotFound, DataSourceNotFound
3
+ from .file_reader import FileReader
4
+
5
+
6
+ __all__ = [
7
+ "ByteReader",
8
+ "FileReader",
9
+ "DatasourceLibError",
10
+ "SyncStrategyNotFound",
11
+ "SourceNotFound"
12
+ ]
@@ -0,0 +1,256 @@
1
+ from pathlib import Path
2
+ from typing import Optional, Union, List
3
+ import io
4
+ import pandas as pd
5
+
6
+ # --- Optional helpers ---
7
+ from charset_normalizer import from_bytes as cn_from_bytes
8
+
9
+ # DOCX
10
+ from docx import Document as DocxDocument
11
+
12
+ # PDF
13
+ import fitz # pymupdf
14
+ import pdfplumber
15
+
16
+ # PPTX
17
+ from pptx import Presentation
18
+
19
+ # YAML / XML
20
+ import yaml
21
+ from lxml import etree
22
+ import json
23
+
24
+
25
+ class ByteReader:
26
+ """
27
+ Unified reader for common file types.
28
+ - read_text(path): file path -> text
29
+ - read_table(path): file path -> DataFrame
30
+ - read_text_from_bytes(data, ext): bytes -> text
31
+ - read_table_from_bytes(data, ext): bytes -> DataFrame
32
+ """
33
+
34
+ TEXT_EXTS = {".txt", ".log", ".md"}
35
+ TABLE_EXTS = {".csv", ".tsv", ".xlsx", ".xls"}
36
+ DOCX_EXTS = {".docx"}
37
+ PDF_EXTS = {".pdf"}
38
+ PPTX_EXTS = {".pptx"}
39
+ JSON_EXTS = {".json"}
40
+ YAML_EXTS = {".yaml", ".yml"}
41
+ INI_EXTS = {".ini", ".cfg"}
42
+ XML_EXTS = {".xml"}
43
+
44
+ def __init__(self, default_encoding: str = "utf-8", errors: str = "replace"):
45
+ self.default_encoding = default_encoding
46
+ self.errors = errors
47
+
48
+ # -----------------------
49
+ # Public API (paths)
50
+ # -----------------------
51
+ def read_text(self, path: Union[str, Path]) -> str:
52
+ path = Path(path)
53
+ ext = path.suffix.lower()
54
+
55
+ if ext in self.TEXT_EXTS:
56
+ return path.read_text(encoding=self.default_encoding, errors=self.errors)
57
+
58
+ if ext in self.PDF_EXTS:
59
+ return self._read_pdf_text_path(path)
60
+
61
+ if ext in self.DOCX_EXTS:
62
+ return self._read_docx_text_fp(open(path, "rb"))
63
+
64
+ if ext in self.PPTX_EXTS:
65
+ return self._read_pptx_text_fp(open(path, "rb"))
66
+
67
+ if ext in self.JSON_EXTS:
68
+ with path.open("r", encoding=self.default_encoding, errors=self.errors) as f:
69
+ obj = json.load(f)
70
+ return json.dumps(obj, indent=2, ensure_ascii=False)
71
+
72
+ if ext in self.YAML_EXTS:
73
+ with path.open("r", encoding=self.default_encoding, errors=self.errors) as f:
74
+ obj = yaml.safe_load(f)
75
+ return yaml.safe_dump(obj, sort_keys=False, allow_unicode=True)
76
+
77
+ if ext in self.INI_EXTS:
78
+ import configparser
79
+ parser = configparser.ConfigParser()
80
+ with path.open("r", encoding=self.default_encoding, errors=self.errors) as f:
81
+ parser.read_file(f)
82
+ output = io.StringIO()
83
+ parser.write(output)
84
+ return output.getvalue()
85
+
86
+ if ext in self.XML_EXTS:
87
+ tree = etree.parse(str(path))
88
+ return etree.tostring(tree, pretty_print=True, encoding="unicode")
89
+
90
+ if ext in self.TABLE_EXTS:
91
+ df = self.read_table(path)
92
+ return df.to_csv(index=False)
93
+
94
+ raise ValueError(f"Unsupported file extension for text extraction: {ext}")
95
+
96
+ def read_table(self, path: Union[str, Path]) -> pd.DataFrame:
97
+ path = Path(path)
98
+ ext = path.suffix.lower()
99
+
100
+ if ext == ".csv":
101
+ return pd.read_csv(path)
102
+ if ext == ".tsv":
103
+ return pd.read_csv(path, sep="\t")
104
+ if ext == ".xlsx":
105
+ return pd.read_excel(path, engine="openpyxl")
106
+ if ext == ".xls":
107
+ return pd.read_excel(path, engine="xlrd")
108
+
109
+ # Fallback: attempt CSV read if unknown
110
+ try:
111
+ return pd.read_csv(path)
112
+ except Exception as e:
113
+ raise ValueError(f"Unsupported file extension for tables: {ext}") from e
114
+
115
+ # -----------------------
116
+ # Public API (bytes)
117
+ # -----------------------
118
+ def read_text_from_bytes(self, data: bytes, ext: str) -> str:
119
+ """
120
+ Extract text from in-memory bytes.
121
+ ext: file extension (e.g., '.pdf', '.docx', '.txt', '.pptx', '.json', '.yaml', '.xml', '.csv', '.xlsx')
122
+ """
123
+ ext = self._normalize_ext(ext)
124
+
125
+ if ext in self.TEXT_EXTS:
126
+ # Robust encoding detection
127
+ res = cn_from_bytes(data).best()
128
+ return str(res) if res else data.decode(self.default_encoding, errors=self.errors)
129
+
130
+ if ext in self.PDF_EXTS:
131
+ return self._read_pdf_text_bytes(data)
132
+
133
+ if ext in self.DOCX_EXTS:
134
+ return self._read_docx_text_fp(io.BytesIO(data))
135
+
136
+ if ext in self.PPTX_EXTS:
137
+ return self._read_pptx_text_fp(io.BytesIO(data))
138
+
139
+ if ext in self.JSON_EXTS:
140
+ obj = json.loads(data.decode(self.default_encoding, errors=self.errors))
141
+ return json.dumps(obj, indent=2, ensure_ascii=False)
142
+
143
+ if ext in self.YAML_EXTS:
144
+ obj = yaml.safe_load(data.decode(self.default_encoding, errors=self.errors))
145
+ return yaml.safe_dump(obj, sort_keys=False, allow_unicode=True)
146
+
147
+ if ext in self.INI_EXTS:
148
+ import configparser
149
+ parser = configparser.ConfigParser()
150
+ parser.read_string(data.decode(self.default_encoding, errors=self.errors))
151
+ output = io.StringIO()
152
+ parser.write(output)
153
+ return output.getvalue()
154
+
155
+ if ext in self.XML_EXTS:
156
+ tree = etree.parse(io.BytesIO(data))
157
+ return etree.tostring(tree, pretty_print=True, encoding="unicode")
158
+
159
+ if ext in self.TABLE_EXTS:
160
+ df = self.read_table_from_bytes(data, ext)
161
+ return df.to_csv(index=False)
162
+
163
+ raise ValueError(f"Unsupported extension for text extraction from bytes: {ext}")
164
+
165
+ def read_table_from_bytes(self, data: bytes, ext: str) -> pd.DataFrame:
166
+ """
167
+ Load tabular data from in-memory bytes into a DataFrame.
168
+ """
169
+ ext = self._normalize_ext(ext)
170
+
171
+ if ext == ".csv":
172
+ return pd.read_csv(io.BytesIO(data))
173
+ if ext == ".tsv":
174
+ return pd.read_csv(io.BytesIO(data), sep="\t")
175
+ if ext == ".xlsx":
176
+ return pd.read_excel(io.BytesIO(data), engine="openpyxl")
177
+ if ext == ".xls":
178
+ return pd.read_excel(io.BytesIO(data), engine="xlrd")
179
+
180
+ # Opportunistic fallback: try CSV
181
+ try:
182
+ return pd.read_csv(io.BytesIO(data))
183
+ except Exception as e:
184
+ raise ValueError(f"Unsupported extension for table reading from bytes: {ext}") from e
185
+
186
+ # -----------------------
187
+ # Internal helpers
188
+ # -----------------------
189
+ def _normalize_ext(self, ext: str) -> str:
190
+ ext = (ext or "").strip().lower()
191
+ if not ext.startswith("."):
192
+ ext = "." + ext
193
+ return ext
194
+
195
+ def _read_pdf_text_path(self, path: Path) -> str:
196
+ # Prefer PyMuPDF
197
+ try:
198
+ parts: List[str] = []
199
+ with fitz.open(str(path)) as doc:
200
+ if doc.is_encrypted and not doc.authenticate(""):
201
+ raise RuntimeError("Encrypted PDF requires a password.")
202
+ for page in doc:
203
+ parts.append(page.get_text("text"))
204
+ text = "\n\n".join(parts).strip()
205
+ if text:
206
+ return text
207
+ except Exception:
208
+ pass
209
+
210
+ # Fallback: pdfplumber
211
+ with pdfplumber.open(str(path)) as pdf:
212
+ return "\n\n".join([(p.extract_text() or "") for p in pdf.pages]).strip()
213
+
214
+ def _read_pdf_text_bytes(self, data: bytes) -> str:
215
+ # PyMuPDF can open from bytes
216
+ try:
217
+ doc = fitz.open(stream=data, filetype="pdf")
218
+ parts: List[str] = []
219
+ if doc.is_encrypted and not doc.authenticate(""):
220
+ raise RuntimeError("Encrypted PDF requires a password.")
221
+ for page in doc:
222
+ parts.append(page.get_text("text"))
223
+ doc.close()
224
+ text = "\n\n".join(parts).strip()
225
+ if text:
226
+ return text
227
+ except Exception:
228
+ pass
229
+
230
+ # Fallback to pdfplumber from BytesIO
231
+ with pdfplumber.open(io.BytesIO(data)) as pdf:
232
+ return "\n\n".join([(p.extract_text() or "") for p in pdf.pages]).strip()
233
+
234
+ def _read_docx_text_fp(self, fp) -> str:
235
+ doc = DocxDocument(fp)
236
+ chunks = []
237
+ for p in doc.paragraphs:
238
+ if p.text:
239
+ chunks.append(p.text)
240
+ for table in doc.tables:
241
+ for row in table.rows:
242
+ cells = [cell.text.strip() for cell in row.cells]
243
+ if any(cells):
244
+ chunks.append("\t".join(cells))
245
+ return "\n".join(chunks).strip()
246
+
247
+ def _read_pptx_text_fp(self, fp) -> str:
248
+ prs = Presentation(fp)
249
+ chunks = []
250
+ for slide in prs.slides:
251
+ for shape in slide.shapes:
252
+ if hasattr(shape, "has_text_frame") and shape.has_text_frame:
253
+ text = shape.text or ""
254
+ if text:
255
+ chunks.append(text)
256
+ return "\n".join(chunks).strip()
@@ -0,0 +1,9 @@
1
+ class DatasourceLibError(Exception):
2
+ """Base exception for datasourcelib."""
3
+
4
+ class SyncStrategyNotFound(DatasourceLibError):
5
+ """Raised when a strategy is not found."""
6
+
7
+ # Added: DataSourceNotFound to represent missing/unknown data sources
8
+ class DataSourceNotFound(DatasourceLibError):
9
+ """Raised when a data source is not found or not registered."""