sie-langchain 0.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,256 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .venv
140
+ env/
141
+ venv/
142
+ ENV/
143
+ env.bak/
144
+ venv.bak/
145
+
146
+ # Spyder project settings
147
+ .spyderproject
148
+ .spyproject
149
+
150
+ # Rope project settings
151
+ .ropeproject
152
+
153
+ # mkdocs documentation
154
+ /site
155
+
156
+ # Pyre type checker
157
+ .pyre/
158
+
159
+ # pytype static type analyzer
160
+ .pytype/
161
+
162
+ # Cython debug symbols
163
+ cython_debug/
164
+
165
+ # PyCharm
166
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
167
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
168
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
169
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
170
+ #.idea/
171
+
172
+ # Abstra
173
+ # Abstra is an AI-powered process automation framework.
174
+ # Ignore directories containing user credentials, local state, and settings.
175
+ # Learn more at https://abstra.io/docs
176
+ .abstra/
177
+
178
+ # Visual Studio Code
179
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
180
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
181
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
182
+ # you could uncomment the following to ignore the entire vscode folder
183
+ # .vscode/
184
+
185
+ # Ruff stuff:
186
+ .ruff_cache/
187
+
188
+ # PyPI configuration file
189
+ .pypirc
190
+
191
+ # Cursor
192
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
193
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
194
+ # refer to https://docs.cursor.com/context/ignore-files
195
+ .cursorignore
196
+ .cursorindexingignore
197
+
198
+ # vcscode
199
+ .vscode
200
+
201
+ # Marimo
202
+ marimo/_static/
203
+ marimo/_lsp/
204
+ __marimo__/
205
+
206
+ # SIE specific
207
+ # Model weights cache
208
+ .cache/
209
+ *.safetensors
210
+ *.bin
211
+
212
+ # Secrets (never commit)
213
+ *.pem
214
+ *.key
215
+ credentials.json
216
+ *-key.json
217
+
218
+ # Terraform
219
+ # Local .terraform directories (cached providers/modules)
220
+ **/.terraform/*
221
+ .terraform.lock*
222
+ # State files (contain sensitive data)
223
+ *.tfstate
224
+ *.tfstate.*
225
+ # Crash log files
226
+ crash.log
227
+ crash.*.log
228
+ # Override files (local developer overrides)
229
+ override.tf
230
+ override.tf.json
231
+ *_override.tf
232
+ *_override.tf.json
233
+ # tfvars files may contain secrets
234
+ *.tfvars
235
+ *.tfvars.json
236
+ # Keep .terraform.lock.hcl for reproducibility (provider versions)
237
+
238
+ # Node.js
239
+ node_modules/
240
+
241
+ # OS
242
+ .DS_Store
243
+ Thumbs.db
244
+
245
+ # VIM
246
+ *.swp
247
+
248
+ # Worktree metadata
249
+ .base-branch
250
+
251
+ # Temporary files
252
+ tmp/
253
+ .tmp/
254
+ .local/
255
+
256
+ .requirements-modal.txt
@@ -0,0 +1,12 @@
1
+ Metadata-Version: 2.4
2
+ Name: sie-langchain
3
+ Version: 0.1.7
4
+ Summary: SIE integration for LangChain
5
+ Requires-Python: >=3.10
6
+ Requires-Dist: langchain-core>=0.2.0
7
+ Requires-Dist: sie-sdk>=0.1.0
8
+ Provides-Extra: dev
9
+ Requires-Dist: chromadb>=0.4; extra == 'dev'
10
+ Requires-Dist: langchain-chroma>=0.1; extra == 'dev'
11
+ Requires-Dist: pytest-asyncio>=0.23; extra == 'dev'
12
+ Requires-Dist: pytest>=8.0; extra == 'dev'
@@ -0,0 +1,25 @@
1
+ [project]
2
+ name = "sie-langchain"
3
+ version = "0.1.7"
4
+ description = "SIE integration for LangChain"
5
+ requires-python = ">=3.10"
6
+ dependencies = [
7
+ "sie-sdk>=0.1.0",
8
+ "langchain-core>=0.2.0",
9
+ ]
10
+
11
+ [project.optional-dependencies]
12
+ dev = [
13
+ "pytest>=8.0",
14
+ "pytest-asyncio>=0.23",
15
+ # For integration tests
16
+ "chromadb>=0.4",
17
+ "langchain-chroma>=0.1",
18
+ ]
19
+
20
+ [build-system]
21
+ requires = ["hatchling"]
22
+ build-backend = "hatchling.build"
23
+
24
+ [tool.hatch.build.targets.wheel]
25
+ packages = ["src/sie_langchain"]
@@ -0,0 +1,36 @@
1
+ """SIE integration for LangChain.
2
+
3
+ Provides LangChain-compatible wrappers for SIE's encoding, reranking,
4
+ and entity extraction capabilities.
5
+
6
+ Example:
7
+ >>> from sie_langchain import SIEEmbeddings, SIEReranker, SIEExtractor
8
+ >>>
9
+ >>> # Create embeddings
10
+ >>> embeddings = SIEEmbeddings(base_url="http://localhost:8080", model="BAAI/bge-m3")
11
+ >>> vectors = embeddings.embed_documents(["Hello world"])
12
+ >>>
13
+ >>> # Create reranker
14
+ >>> reranker = SIEReranker(base_url="http://localhost:8080")
15
+ >>> reranked = reranker.compress_documents(documents, query)
16
+ >>>
17
+ >>> # Create extractor tool
18
+ >>> extractor = SIEExtractor(base_url="http://localhost:8080")
19
+ >>> entities = extractor.invoke("John Smith works at Acme Corp")
20
+
21
+ Hybrid search example:
22
+ >>> from langchain_pinecone import PineconeHybridSearchRetriever
23
+ >>> from sie_langchain import SIEEmbeddings, SIESparseEncoder
24
+ >>>
25
+ >>> retriever = PineconeHybridSearchRetriever(
26
+ ... embeddings=SIEEmbeddings(model="BAAI/bge-m3"),
27
+ ... sparse_encoder=SIESparseEncoder(model="BAAI/bge-m3"),
28
+ ... index=pinecone_index,
29
+ ... )
30
+ """
31
+
32
+ from sie_langchain.embeddings import SIEEmbeddings, SIESparseEncoder
33
+ from sie_langchain.extractors import SIEExtractor
34
+ from sie_langchain.rerankers import SIEReranker
35
+
36
+ __all__ = ["SIEEmbeddings", "SIEExtractor", "SIEReranker", "SIESparseEncoder"]
@@ -0,0 +1,328 @@
1
+ """SIE embeddings integration for LangChain.
2
+
3
+ Provides drop-in replacement for OpenAI embeddings using SIE's inference server:
4
+ - SIEEmbeddings: Dense embeddings for vector stores
5
+ - SIESparseEncoder: Sparse encoder for hybrid search with PineconeHybridSearchRetriever
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import TYPE_CHECKING
11
+
12
+ from langchain_core.embeddings import Embeddings
13
+
14
+ if TYPE_CHECKING:
15
+ from sie_sdk import SIEAsyncClient, SIEClient
16
+
17
+
18
+ class SIEEmbeddings(Embeddings):
19
+ """LangChain Embeddings implementation using SIE.
20
+
21
+ Wraps SIEClient.encode() to implement the Embeddings interface.
22
+
23
+ Example:
24
+ >>> # Basic usage
25
+ >>> embeddings = SIEEmbeddings(base_url="http://localhost:8080", model="BAAI/bge-m3")
26
+ >>> vectors = embeddings.embed_documents(["Hello world"])
27
+
28
+ >>> # With GPU routing for multi-GPU clusters
29
+ >>> embeddings = SIEEmbeddings(base_url="https://cluster.example.com", model="BAAI/bge-m3", gpu="a100-80gb")
30
+
31
+ Args:
32
+ base_url: URL of the SIE server.
33
+ model: Model name/ID to use for encoding.
34
+ client: Optional pre-configured SIEClient instance.
35
+ async_client: Optional pre-configured SIEAsyncClient instance.
36
+ instruction: Optional instruction prefix for embedding (model-dependent).
37
+ output_dtype: Output dtype: "float32" (default), "float16", "int8", "binary".
38
+ options: Runtime options dict passed to the model adapter. Available options
39
+ depend on the model - see model documentation for details.
40
+ gpu: Target GPU type for routing (e.g., "l4", "a100-80gb").
41
+ timeout_s: Request timeout in seconds.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ *,
47
+ base_url: str = "http://localhost:8080",
48
+ model: str = "BAAI/bge-m3",
49
+ client: SIEClient | None = None,
50
+ async_client: SIEAsyncClient | None = None,
51
+ instruction: str | None = None,
52
+ output_dtype: str | None = None,
53
+ options: dict[str, object] | None = None,
54
+ gpu: str | None = None,
55
+ timeout_s: float = 180.0,
56
+ ) -> None:
57
+ """Initialize SIE embeddings."""
58
+ self._base_url = base_url
59
+ self._model = model
60
+ self._instruction = instruction
61
+ self._output_dtype = output_dtype
62
+ self._options = options
63
+ self._gpu = gpu
64
+ self._timeout_s = timeout_s
65
+
66
+ # Store provided clients or create lazily
67
+ self._client = client
68
+ self._async_client = async_client
69
+
70
+ @property
71
+ def client(self) -> SIEClient:
72
+ """Get or create the sync SIEClient."""
73
+ if self._client is None:
74
+ from sie_sdk import SIEClient
75
+
76
+ self._client = SIEClient(
77
+ self._base_url,
78
+ timeout_s=self._timeout_s,
79
+ gpu=self._gpu,
80
+ options=self._options,
81
+ )
82
+ return self._client
83
+
84
+ @property
85
+ def async_client(self) -> SIEAsyncClient:
86
+ """Get or create the async SIEClient."""
87
+ if self._async_client is None:
88
+ from sie_sdk import SIEAsyncClient
89
+
90
+ self._async_client = SIEAsyncClient(
91
+ self._base_url,
92
+ timeout_s=self._timeout_s,
93
+ gpu=self._gpu,
94
+ options=self._options,
95
+ )
96
+ return self._async_client
97
+
98
+ def embed_documents(self, texts: list[str]) -> list[list[float]]:
99
+ """Embed a list of documents.
100
+
101
+ Args:
102
+ texts: List of document texts to embed.
103
+
104
+ Returns:
105
+ List of embedding vectors (as lists of floats).
106
+ """
107
+ if not texts:
108
+ return []
109
+
110
+ from sie_sdk.types import Item
111
+
112
+ items = [Item(text=text) for text in texts]
113
+ results = self.client.encode(
114
+ self._model,
115
+ items,
116
+ output_types=["dense"],
117
+ instruction=self._instruction,
118
+ output_dtype=self._output_dtype,
119
+ )
120
+
121
+ return [self._extract_dense(result) for result in results]
122
+
123
+ def embed_query(self, text: str) -> list[float]:
124
+ """Embed a single query text.
125
+
126
+ Args:
127
+ text: Query text to embed.
128
+
129
+ Returns:
130
+ Embedding vector as list of floats.
131
+ """
132
+ from sie_sdk.types import Item
133
+
134
+ result = self.client.encode(
135
+ self._model,
136
+ Item(text=text),
137
+ output_types=["dense"],
138
+ instruction=self._instruction,
139
+ output_dtype=self._output_dtype,
140
+ options={"is_query": True},
141
+ )
142
+
143
+ return self._extract_dense(result)
144
+
145
+ async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
146
+ """Async embed a list of documents.
147
+
148
+ Args:
149
+ texts: List of document texts to embed.
150
+
151
+ Returns:
152
+ List of embedding vectors (as lists of floats).
153
+ """
154
+ if not texts:
155
+ return []
156
+
157
+ from sie_sdk.types import Item
158
+
159
+ items = [Item(text=text) for text in texts]
160
+ results = await self.async_client.encode(
161
+ self._model,
162
+ items,
163
+ output_types=["dense"],
164
+ instruction=self._instruction,
165
+ output_dtype=self._output_dtype,
166
+ )
167
+
168
+ return [self._extract_dense(result) for result in results]
169
+
170
+ async def aembed_query(self, text: str) -> list[float]:
171
+ """Async embed a single query text.
172
+
173
+ Args:
174
+ text: Query text to embed.
175
+
176
+ Returns:
177
+ Embedding vector as list of floats.
178
+ """
179
+ from sie_sdk.types import Item
180
+
181
+ result = await self.async_client.encode(
182
+ self._model,
183
+ Item(text=text),
184
+ output_types=["dense"],
185
+ instruction=self._instruction,
186
+ output_dtype=self._output_dtype,
187
+ options={"is_query": True},
188
+ )
189
+
190
+ return self._extract_dense(result)
191
+
192
+ def _extract_dense(self, result: object) -> list[float]:
193
+ """Extract dense embedding from encode result.
194
+
195
+ Args:
196
+ result: EncodeResult from SIE client (dict with "dense" key containing numpy array).
197
+
198
+ Returns:
199
+ Dense embedding as list of floats.
200
+ """
201
+ # SDK returns {"dense": np.ndarray, ...}
202
+ dense = result.get("dense") if isinstance(result, dict) else getattr(result, "dense", None)
203
+ if dense is None:
204
+ msg = "Encode result missing dense embedding"
205
+ raise ValueError(msg)
206
+ # Convert numpy array to list
207
+ return dense.tolist() if hasattr(dense, "tolist") else list(dense)
208
+
209
+
210
+ class SIESparseEncoder:
211
+ """Sparse encoder for LangChain hybrid search.
212
+
213
+ Compatible with PineconeHybridSearchRetriever's sparse_encoder interface.
214
+ Provides encode_queries() and encode_documents() methods.
215
+
216
+ Example:
217
+ >>> from langchain_pinecone import PineconeHybridSearchRetriever
218
+ >>> from sie_langchain import SIEEmbeddings, SIESparseEncoder
219
+ >>>
220
+ >>> retriever = PineconeHybridSearchRetriever(
221
+ ... embeddings=SIEEmbeddings(model="BAAI/bge-m3"),
222
+ ... sparse_encoder=SIESparseEncoder(model="BAAI/bge-m3"),
223
+ ... index=pinecone_index,
224
+ ... )
225
+
226
+ Args:
227
+ base_url: URL of the SIE server.
228
+ model: Model name/ID to use for encoding. Must support sparse output.
229
+ gpu: Target GPU type for routing (e.g., "l4", "a100-80gb").
230
+ timeout_s: Request timeout in seconds.
231
+ """
232
+
233
+ def __init__(
234
+ self,
235
+ *,
236
+ base_url: str = "http://localhost:8080",
237
+ model: str = "BAAI/bge-m3",
238
+ gpu: str | None = None,
239
+ timeout_s: float = 180.0,
240
+ ) -> None:
241
+ """Initialize SIE sparse encoder."""
242
+ self._base_url = base_url
243
+ self._model = model
244
+ self._gpu = gpu
245
+ self._timeout_s = timeout_s
246
+ self._client: SIEClient | None = None
247
+
248
+ @property
249
+ def client(self) -> SIEClient:
250
+ """Get or create the sync SIEClient."""
251
+ if self._client is None:
252
+ from sie_sdk import SIEClient
253
+
254
+ self._client = SIEClient(
255
+ self._base_url,
256
+ timeout_s=self._timeout_s,
257
+ gpu=self._gpu,
258
+ )
259
+ return self._client
260
+
261
+ def encode_queries(self, texts: list[str]) -> list[dict[str, list]]:
262
+ """Encode query texts to sparse vectors.
263
+
264
+ Args:
265
+ texts: List of query texts to encode.
266
+
267
+ Returns:
268
+ List of dicts with "indices" and "values" keys.
269
+ """
270
+ if not texts:
271
+ return []
272
+
273
+ from sie_sdk.types import Item
274
+
275
+ items = [Item(text=text) for text in texts]
276
+ results = self.client.encode(
277
+ self._model,
278
+ items,
279
+ output_types=["sparse"],
280
+ options={"is_query": True},
281
+ )
282
+
283
+ return [self._extract_sparse(result) for result in results]
284
+
285
+ def encode_documents(self, texts: list[str]) -> list[dict[str, list]]:
286
+ """Encode document texts to sparse vectors.
287
+
288
+ Args:
289
+ texts: List of document texts to encode.
290
+
291
+ Returns:
292
+ List of dicts with "indices" and "values" keys.
293
+ """
294
+ if not texts:
295
+ return []
296
+
297
+ from sie_sdk.types import Item
298
+
299
+ items = [Item(text=text) for text in texts]
300
+ results = self.client.encode(
301
+ self._model,
302
+ items,
303
+ output_types=["sparse"],
304
+ )
305
+
306
+ return [self._extract_sparse(result) for result in results]
307
+
308
+ def _extract_sparse(self, result: object) -> dict[str, list]:
309
+ """Extract sparse embedding from encode result.
310
+
311
+ Args:
312
+ result: EncodeResult from SIE client with "sparse" key.
313
+
314
+ Returns:
315
+ Dict with "indices" and "values" lists.
316
+ """
317
+ # SDK returns {"sparse": {"indices": np.ndarray, "values": np.ndarray}, ...}
318
+ sparse = result.get("sparse") if isinstance(result, dict) else getattr(result, "sparse", None)
319
+ if sparse is None:
320
+ return {"indices": [], "values": []}
321
+
322
+ indices = sparse.get("indices") if isinstance(sparse, dict) else getattr(sparse, "indices", None)
323
+ values = sparse.get("values") if isinstance(sparse, dict) else getattr(sparse, "values", None)
324
+
325
+ return {
326
+ "indices": indices.tolist() if hasattr(indices, "tolist") else list(indices or []),
327
+ "values": values.tolist() if hasattr(values, "tolist") else list(values or []),
328
+ }