llama-index-vector-stores-opensearch 0.1.10__tar.gz → 0.1.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-index-vector-stores-opensearch might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-index-vector-stores-opensearch
3
- Version: 0.1.10
3
+ Version: 0.1.11
4
4
  Summary: llama-index vector_stores opensearch integration
5
5
  License: MIT
6
6
  Author: Your Name
@@ -11,6 +11,7 @@ Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.9
12
12
  Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
14
15
  Requires-Dist: llama-index-core (>=0.10.1,<0.11.0)
15
16
  Requires-Dist: opensearch-py[async] (>=2.4.2,<3.0.0)
16
17
  Description-Content-Type: text/markdown
@@ -265,17 +265,34 @@ class OpensearchVectorClient:
265
265
  k: int,
266
266
  filters: Optional[MetadataFilters] = None,
267
267
  ) -> Dict:
268
- knn_query = self._knn_search_query(
269
- embedding_field, query_embedding, k, filters
270
- )["query"]
271
- lexical_query = {"must": {"match": {text_field: {"query": query_str}}}}
268
+ knn_query = self._knn_search_query(embedding_field, query_embedding, k, filters)
269
+ lexical_query = self._lexical_search_query(text_field, query_str, k, filters)
270
+
271
+ return {
272
+ "size": k,
273
+ "query": {
274
+ "hybrid": {"queries": [lexical_query["query"], knn_query["query"]]}
275
+ },
276
+ }
277
+
278
+ def _lexical_search_query(
279
+ self,
280
+ text_field: str,
281
+ query_str: str,
282
+ k: int,
283
+ filters: Optional[MetadataFilters] = None,
284
+ ) -> Dict:
285
+ lexical_query = {
286
+ "bool": {"must": {"match": {text_field: {"query": query_str}}}}
287
+ }
272
288
 
273
289
  parsed_filters = self._parse_filters(filters)
274
290
  if len(parsed_filters) > 0:
275
- lexical_query["filter"] = parsed_filters
291
+ lexical_query["bool"]["filter"] = parsed_filters
292
+
276
293
  return {
277
294
  "size": k,
278
- "query": {"hybrid": {"queries": [{"bool": lexical_query}, knn_query]}},
295
+ "query": lexical_query,
279
296
  }
280
297
 
281
298
  def __get_painless_scripting_source(
@@ -389,6 +406,11 @@ class OpensearchVectorClient:
389
406
  params = {
390
407
  "search_pipeline": self._search_pipeline,
391
408
  }
409
+ elif query_mode == VectorStoreQueryMode.TEXT_SEARCH:
410
+ search_query = self._lexical_search_query(
411
+ self._text_field, query_str, k, filters=filters
412
+ )
413
+ params = None
392
414
  else:
393
415
  search_query = self._knn_search_query(
394
416
  self._embedding_field, query_embedding, k, filters=filters
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
27
27
  license = "MIT"
28
28
  name = "llama-index-vector-stores-opensearch"
29
29
  readme = "README.md"
30
- version = "0.1.10"
30
+ version = "0.1.11"
31
31
 
32
32
  [tool.poetry.dependencies]
33
33
  python = ">=3.8.1,<4.0"