llama-index-vector-stores-opensearch 0.5.0__tar.gz → 0.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-index-vector-stores-opensearch might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-index-vector-stores-opensearch
3
- Version: 0.5.0
3
+ Version: 0.5.1
4
4
  Summary: llama-index vector_stores opensearch integration
5
5
  License: MIT
6
6
  Author: Your Name
@@ -4,8 +4,8 @@ import uuid
4
4
  from datetime import datetime
5
5
  from typing import Any, Dict, Iterable, List, Optional, Union, cast
6
6
 
7
+ from llama_index.core.async_utils import asyncio_run
7
8
  from llama_index.core.bridge.pydantic import PrivateAttr
8
-
9
9
  from llama_index.core.schema import BaseNode, MetadataMode, TextNode
10
10
  from llama_index.core.vector_stores.types import (
11
11
  FilterCondition,
@@ -123,14 +123,22 @@ class OpensearchVectorClient:
123
123
  self._os_async_client = os_async_client or self._get_async_opensearch_client(
124
124
  self._endpoint, **kwargs
125
125
  )
126
- self._os_version = self._get_opensearch_version()
127
- self._efficient_filtering_enabled = self._is_efficient_filtering_enabled(
128
- self._os_version
129
- )
126
+ self._efficient_filtering_enabled = self._is_efficient_filtering_enabled()
130
127
  not_found_error = self._import_not_found_error()
131
128
 
132
129
  try:
133
130
  self._os_client.indices.get(index=self._index)
131
+ except TypeError:
132
+ # Probably using async so switch to async client
133
+ try:
134
+ asyncio_run(self._os_async_client.indices.get(index=self._index))
135
+ except not_found_error:
136
+ asyncio_run(
137
+ self._os_async_client.indices.create(
138
+ index=self._index, body=idx_conf
139
+ )
140
+ )
141
+ asyncio_run(self._os_async_client.indices.refresh(index=self._index))
134
142
  except not_found_error:
135
143
  self._os_client.indices.create(index=self._index, body=idx_conf)
136
144
  self._os_client.indices.refresh(index=self._index)
@@ -617,10 +625,18 @@ class OpensearchVectorClient:
617
625
  return True
618
626
  return False
619
627
 
620
- def _is_efficient_filtering_enabled(self, os_version: str) -> bool:
628
+ def _is_efficient_filtering_enabled(self) -> bool:
621
629
  """Check if kNN with efficient filtering is enabled."""
622
- major, minor, patch = os_version.split(".")
623
- return int(major) >= 2 and int(minor) >= 9
630
+ # Technically, AOSS supports efficient filtering,
631
+ # but we can't check the version number using .info(); AOSS doesn't support 'GET /'
632
+ # so we must skip and disable by default.
633
+ if self.is_aoss:
634
+ ef_enabled = False
635
+ else:
636
+ self._os_version = self._get_opensearch_version()
637
+ major, minor, patch = self.os_version.split(".")
638
+ ef_enabled = int(major) >= 2 and int(minor) >= 9
639
+ return ef_enabled
624
640
 
625
641
  def index_results(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
626
642
  """Store results in the index."""
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
27
27
  license = "MIT"
28
28
  name = "llama-index-vector-stores-opensearch"
29
29
  readme = "README.md"
30
- version = "0.5.0"
30
+ version = "0.5.1"
31
31
 
32
32
  [tool.poetry.dependencies]
33
33
  python = ">=3.9,<4.0"