haiku.rag 0.7.2__py3-none-any.whl → 0.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of haiku.rag might be problematic. Click here for more details.

@@ -9,7 +9,7 @@ class EmbedderBase:
9
9
  self._model = model
10
10
  self._vector_dim = vector_dim
11
11
 
12
- async def embed(self, text: str) -> list[float]:
12
+ async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
13
13
  raise NotImplementedError(
14
14
  "Embedder is an abstract class. Please implement the embed method in a subclass."
15
15
  )
@@ -1,11 +1,17 @@
1
- from ollama import AsyncClient
1
+ from openai import AsyncOpenAI
2
2
 
3
3
  from haiku.rag.config import Config
4
4
  from haiku.rag.embeddings.base import EmbedderBase
5
5
 
6
6
 
7
7
  class Embedder(EmbedderBase):
8
- async def embed(self, text: str) -> list[float]:
9
- client = AsyncClient(host=Config.OLLAMA_BASE_URL)
10
- res = await client.embeddings(model=self._model, prompt=text)
11
- return list(res["embedding"])
8
+ async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
9
+ client = AsyncOpenAI(base_url=f"{Config.OLLAMA_BASE_URL}/v1", api_key="dummy")
10
+ response = await client.embeddings.create(
11
+ model=self._model,
12
+ input=text,
13
+ )
14
+ if isinstance(text, str):
15
+ return response.data[0].embedding
16
+ else:
17
+ return [item.embedding for item in response.data]
@@ -4,10 +4,13 @@ from haiku.rag.embeddings.base import EmbedderBase
4
4
 
5
5
 
6
6
  class Embedder(EmbedderBase):
7
- async def embed(self, text: str) -> list[float]:
7
+ async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
8
8
  client = AsyncOpenAI()
9
9
  response = await client.embeddings.create(
10
10
  model=self._model,
11
11
  input=text,
12
12
  )
13
- return response.data[0].embedding
13
+ if isinstance(text, str):
14
+ return response.data[0].embedding
15
+ else:
16
+ return [item.embedding for item in response.data]
@@ -5,7 +5,7 @@ from haiku.rag.embeddings.base import EmbedderBase
5
5
 
6
6
 
7
7
  class Embedder(EmbedderBase):
8
- async def embed(self, text: str) -> list[float]:
8
+ async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
9
9
  client = AsyncOpenAI(
10
10
  base_url=f"{Config.VLLM_EMBEDDINGS_BASE_URL}/v1", api_key="dummy"
11
11
  )
@@ -13,4 +13,7 @@ class Embedder(EmbedderBase):
13
13
  model=self._model,
14
14
  input=text,
15
15
  )
16
- return response.data[0].embedding
16
+ if isinstance(text, str):
17
+ return response.data[0].embedding
18
+ else:
19
+ return [item.embedding for item in response.data]
@@ -4,10 +4,14 @@ try:
4
4
  from haiku.rag.embeddings.base import EmbedderBase
5
5
 
6
6
  class Embedder(EmbedderBase):
7
- async def embed(self, text: str) -> list[float]:
7
+ async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
8
8
  client = Client()
9
- res = client.embed([text], model=self._model, output_dtype="float")
10
- return res.embeddings[0] # type: ignore[return-value]
9
+ if isinstance(text, str):
10
+ res = client.embed([text], model=self._model, output_dtype="float")
11
+ return res.embeddings[0] # type: ignore[return-value]
12
+ else:
13
+ res = client.embed(text, model=self._model, output_dtype="float")
14
+ return res.embeddings # type: ignore[return-value]
11
15
 
12
16
  except ImportError:
13
17
  pass
@@ -154,13 +154,7 @@ class ChunkRepository:
154
154
  """Create chunks and embeddings for a document from DoclingDocument."""
155
155
  chunk_texts = await chunker.chunk(document)
156
156
 
157
- # Generate embeddings in parallel for all chunks
158
- embeddings_tasks = []
159
- for chunk_text in chunk_texts:
160
- embeddings_tasks.append(self.embedder.embed(chunk_text))
161
-
162
- # Wait for all embeddings to complete
163
- embeddings = await asyncio.gather(*embeddings_tasks)
157
+ embeddings = await self.embedder.embed(chunk_texts)
164
158
 
165
159
  # Prepare all chunk records for batch insertion
166
160
  chunk_records = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: haiku.rag
3
- Version: 0.7.2
3
+ Version: 0.7.3
4
4
  Summary: Retrieval Augmented Generation (RAG) with LanceDB
5
5
  Author-email: Yiorgis Gozadinos <ggozadinos@gmail.com>
6
6
  License: MIT
@@ -11,11 +11,11 @@ haiku/rag/monitor.py,sha256=r386nkhdlsU8UECwIuVwnrSlgMk3vNIuUZGNIzkZuec,2770
11
11
  haiku/rag/reader.py,sha256=qkPTMJuQ_o4sK-8zpDl9WFYe_MJ7aL_gUw6rczIpW-g,3274
12
12
  haiku/rag/utils.py,sha256=c8F0ECsFSqvQxzxINAOAnvShoOnJPLsOaNE3JEY2JSc,3230
13
13
  haiku/rag/embeddings/__init__.py,sha256=n7aHW3BxHlpGxU4ze4YYDOsljzFpEep8dwVE2n45JoE,1218
14
- haiku/rag/embeddings/base.py,sha256=NTQvuzbZPu0LBo5wAu3qGyJ4xXUaRAt1fjBO0ygWn_Y,465
15
- haiku/rag/embeddings/ollama.py,sha256=y6-lp0XpbnyIjoOEdtSzMdEVkU5glOwnWQ1FkpUZnpI,370
16
- haiku/rag/embeddings/openai.py,sha256=iA-DewCOSip8PLU_RhEJHFHBle4DtmCCIGNfGs58Wvk,357
17
- haiku/rag/embeddings/vllm.py,sha256=ymNDmpIvDWmkmce5j-TRc_QnJR4qwZCpnYA0tP3ab5o,480
18
- haiku/rag/embeddings/voyageai.py,sha256=0hiRTIqu-bpl-4OaCtMHvWfPdgbrzhnfZJowSV8pLRA,415
14
+ haiku/rag/embeddings/base.py,sha256=BnSviKrlzjv3L0sZJs_T-pxfawd-bcTak-rsX-D2f3A,497
15
+ haiku/rag/embeddings/ollama.py,sha256=LuLlHH6RGoO9_gFCIlbmesuXOj017gTw6z-p8Ez0CfE,595
16
+ haiku/rag/embeddings/openai.py,sha256=fIFCk-jpUtaW0xsnrQnJ824O0UCjaGG2sgvBzREhilc,503
17
+ haiku/rag/embeddings/vllm.py,sha256=vhaUnCn6VMkfSluLhWKtSV-sekFaPsp4pKo2N7-SBCY,626
18
+ haiku/rag/embeddings/voyageai.py,sha256=UW-MW4tJKnPB6Fs2P7A3yt-ZeRm46H9npckchSriPX8,661
19
19
  haiku/rag/qa/__init__.py,sha256=Sl7Kzrg9CuBOcMF01wc1NtQhUNWjJI0MhIHfCWrb8V4,434
20
20
  haiku/rag/qa/agent.py,sha256=15-jMuF08U0uxGdqgQysKMZLr8BUWssI76PtyQ2Ngd8,2912
21
21
  haiku/rag/qa/prompts.py,sha256=xdT4cyrOrAK9UDgVqyev1wHF49jD57Bh40gx2sH4NPI,3341
@@ -30,12 +30,12 @@ haiku/rag/store/models/__init__.py,sha256=s0E72zneGlowvZrFWaNxHYjOAUjgWdLxzdYsnv
30
30
  haiku/rag/store/models/chunk.py,sha256=ZNyTfO6lh3rXWLVYO3TZcitbL4LSUGr42fR6jQQ5iQc,364
31
31
  haiku/rag/store/models/document.py,sha256=zSSpt6pyrMJAIXGQvIcqojcqUzwZnhp3WxVokaWxNRc,396
32
32
  haiku/rag/store/repositories/__init__.py,sha256=Olv5dLfBQINRV3HrsfUpjzkZ7Qm7goEYyMNykgo_DaY,291
33
- haiku/rag/store/repositories/chunk.py,sha256=5S77mGh6pWxPHjaXriJGmvbSOhoNM8tLwygE2GXPlbU,13586
33
+ haiku/rag/store/repositories/chunk.py,sha256=v4y4eh4yIf6zJaWfHxljvnmb12dmvwdinzmxQt8Lvhs,13343
34
34
  haiku/rag/store/repositories/document.py,sha256=lP8Lo82KTP-qwXFRpYZ46WjeAdAsHwZ5pJcrXdz4g0U,6988
35
35
  haiku/rag/store/repositories/settings.py,sha256=dqnAvm-98nQrWpLBbf9QghJw673QD80-iqQhRMP5t0c,5025
36
36
  haiku/rag/store/upgrades/__init__.py,sha256=wUiEoSiHTahvuagx93E4FB07v123AhdbOjwUkPusiIg,14
37
- haiku_rag-0.7.2.dist-info/METADATA,sha256=CLBIBBUYbvBbtynct-n_q7ZVO6cayLa7YUeFivrwEf4,4610
38
- haiku_rag-0.7.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
39
- haiku_rag-0.7.2.dist-info/entry_points.txt,sha256=G1U3nAkNd5YDYd4v0tuYFbriz0i-JheCsFuT9kIoGCI,48
40
- haiku_rag-0.7.2.dist-info/licenses/LICENSE,sha256=eXZrWjSk9PwYFNK9yUczl3oPl95Z4V9UXH7bPN46iPo,1065
41
- haiku_rag-0.7.2.dist-info/RECORD,,
37
+ haiku_rag-0.7.3.dist-info/METADATA,sha256=PAvA6VZuyZp9IekXhYCLWDxM1wMZMmujtntxZE2lBoE,4610
38
+ haiku_rag-0.7.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
39
+ haiku_rag-0.7.3.dist-info/entry_points.txt,sha256=G1U3nAkNd5YDYd4v0tuYFbriz0i-JheCsFuT9kIoGCI,48
40
+ haiku_rag-0.7.3.dist-info/licenses/LICENSE,sha256=eXZrWjSk9PwYFNK9yUczl3oPl95Z4V9UXH7bPN46iPo,1065
41
+ haiku_rag-0.7.3.dist-info/RECORD,,