vision-rag 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vision_rag-0.1.0/PKG-INFO +6 -0
- vision_rag-0.1.0/README.md +277 -0
- vision_rag-0.1.0/pyproject.toml +16 -0
- vision_rag-0.1.0/setup.cfg +4 -0
- vision_rag-0.1.0/vision-rag/__init__.py +38 -0
- vision_rag-0.1.0/vision-rag/embedding.py +328 -0
- vision_rag-0.1.0/vision-rag/generator.py +394 -0
- vision_rag-0.1.0/vision-rag/retriever.py +174 -0
- vision_rag-0.1.0/vision-rag/vectorstores.py +372 -0
- vision_rag-0.1.0/vision-rag/video_chunker.py +447 -0
- vision_rag-0.1.0/vision-rag/video_ingestion.py +103 -0
- vision_rag-0.1.0/vision_rag.egg-info/PKG-INFO +6 -0
- vision_rag-0.1.0/vision_rag.egg-info/SOURCES.txt +14 -0
- vision_rag-0.1.0/vision_rag.egg-info/dependency_links.txt +1 -0
- vision_rag-0.1.0/vision_rag.egg-info/requires.txt +1 -0
- vision_rag-0.1.0/vision_rag.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
# vision-rag
|
|
2
|
+
|
|
3
|
+
**Video RAG** — A Python library for Retrieval-Augmented Generation over video.
|
|
4
|
+
|
|
5
|
+
Ask questions about any video and get answers using the transcript and visual frames.
|
|
6
|
+
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
## Install
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
pip install vision-rag
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
## How it works
|
|
18
|
+
|
|
19
|
+
```
|
|
20
|
+
Video → Chunks → Embeddings → Vector Store → Retrieval → Answer
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
1. **Ingest** — reads video metadata
|
|
24
|
+
2. **Chunk** — splits video into time-based overlapping chunks with frames and transcript
|
|
25
|
+
3. **Embed** — converts text and frames into vectors (your choice of model)
|
|
26
|
+
4. **Index** — stores vectors in FAISS or Chroma
|
|
27
|
+
5. **Retrieve** — searches both text and image indexes for a query
|
|
28
|
+
6. **Generate** — passes retrieved chunks to a VLM to generate the answer
|
|
29
|
+
|
|
30
|
+
---
|
|
31
|
+
|
|
32
|
+
## Quick Start
|
|
33
|
+
|
|
34
|
+
```python
|
|
35
|
+
from vision-rag.video_ingestion import VideoLoader
|
|
36
|
+
from vision-rag.video_chunker import Chunker, WhisperLocalASR
|
|
37
|
+
from vision-rag.embedding import EmbeddingBuilder, BaseTextEmbedder, BaseImageEmbedder
|
|
38
|
+
from vision-rag.vectorstores import FAISS
|
|
39
|
+
from vision-rag.retriever import Retriever
|
|
40
|
+
from vision-rag.generator import Generator, OllamaGenerator
|
|
41
|
+
import requests, base64
|
|
42
|
+
|
|
43
|
+
# --- your choice of embedder (example: Jina v4) ---
|
|
44
|
+
class JinaTextEmbedder(BaseTextEmbedder):
|
|
45
|
+
def __init__(self, api_key):
|
|
46
|
+
self.api_key = api_key
|
|
47
|
+
def embed(self, text):
|
|
48
|
+
r = requests.post("https://api.jina.ai/v1/embeddings",
|
|
49
|
+
headers={"Authorization": f"Bearer {self.api_key}"},
|
|
50
|
+
json={"model": "jina-embeddings-v4", "input": [{"text": text}], "task": "retrieval.passage"})
|
|
51
|
+
return r.json()["data"][0]["embedding"]
|
|
52
|
+
|
|
53
|
+
class JinaImageEmbedder(BaseImageEmbedder):
|
|
54
|
+
def __init__(self, api_key):
|
|
55
|
+
self.api_key = api_key
|
|
56
|
+
def embed(self, image_path):
|
|
57
|
+
with open(image_path, "rb") as f:
|
|
58
|
+
b64 = base64.b64encode(f.read()).decode()
|
|
59
|
+
r = requests.post("https://api.jina.ai/v1/embeddings",
|
|
60
|
+
headers={"Authorization": f"Bearer {self.api_key}"},
|
|
61
|
+
json={"model": "jina-embeddings-v4", "input": [{"image": b64}], "task": "retrieval.passage"})
|
|
62
|
+
return r.json()["data"][0]["embedding"]
|
|
63
|
+
|
|
64
|
+
# Stage 1 — Ingest
|
|
65
|
+
video_doc = VideoLoader().load("video.mp4")
|
|
66
|
+
|
|
67
|
+
# Stage 2 — Chunk
|
|
68
|
+
chunks = Chunker(
|
|
69
|
+
asr=WhisperLocalASR(model_size="base"),
|
|
70
|
+
use_asr=True,
|
|
71
|
+
use_frames=True,
|
|
72
|
+
chunk_size=5.0,
|
|
73
|
+
chunk_overlap=1.0,
|
|
74
|
+
).chunk("video.mp4")
|
|
75
|
+
|
|
76
|
+
# Stage 3 — Embed
|
|
77
|
+
text_embedder = JinaTextEmbedder(api_key="your_jina_key")
|
|
78
|
+
image_embedder = JinaImageEmbedder(api_key="your_jina_key")
|
|
79
|
+
embedded_chunks = EmbeddingBuilder(
|
|
80
|
+
text_embedding=text_embedder,
|
|
81
|
+
image_embedding=image_embedder,
|
|
82
|
+
).embed(chunks)
|
|
83
|
+
|
|
84
|
+
# Stage 4 — Index
|
|
85
|
+
store = FAISS()
|
|
86
|
+
store.index(embedded_chunks)
|
|
87
|
+
|
|
88
|
+
# Stage 5 + 6 — Retrieve and Generate
|
|
89
|
+
query = input("Ask a question: ")
|
|
90
|
+
results = Retriever(store=store, text_embedder=text_embedder).retrieve(query)
|
|
91
|
+
answer = Generator(llm=OllamaGenerator(model="llava:7b")).generate(query=query, results=results)
|
|
92
|
+
|
|
93
|
+
print(answer.text)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## Chunker
|
|
99
|
+
|
|
100
|
+
```python
|
|
101
|
+
from vision-rag.video_chunker import Chunker, WhisperLocalASR
|
|
102
|
+
|
|
103
|
+
chunker = Chunker(
|
|
104
|
+
asr=WhisperLocalASR(model_size="medium"), # or OpenAIASR, DeepgramASR, or your own
|
|
105
|
+
use_asr=True,
|
|
106
|
+
use_frames=True,
|
|
107
|
+
chunk_size=5.0, # seconds per chunk
|
|
108
|
+
chunk_overlap=1.0, # overlap between chunks
|
|
109
|
+
)
|
|
110
|
+
chunks = chunker.chunk("video.mp4")
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
Each chunk contains:
|
|
114
|
+
|
|
115
|
+
| Field | Description |
|
|
116
|
+
|---|---|
|
|
117
|
+
| `chunk.chunk_id` | chunk index |
|
|
118
|
+
| `chunk.start` | start time in seconds |
|
|
119
|
+
| `chunk.end` | end time in seconds |
|
|
120
|
+
| `chunk.duration` | duration in seconds |
|
|
121
|
+
| `chunk.text` | transcript for this chunk |
|
|
122
|
+
| `chunk.frame_path` | path to keyframe image |
|
|
123
|
+
| `chunk.metadata` | source info, asr provider, etc. |
|
|
124
|
+
|
|
125
|
+
---
|
|
126
|
+
|
|
127
|
+
## ASR — Bring Your Own
|
|
128
|
+
|
|
129
|
+
vision-rag ships with built-in ASR providers but you can plug in anything:
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
from vision-rag.video_chunker import BaseASR
|
|
133
|
+
|
|
134
|
+
# built-in
|
|
135
|
+
from vision-rag.video_chunker import WhisperLocalASR, OpenAIASR, DeepgramASR
|
|
136
|
+
|
|
137
|
+
# your own — any model, any API
|
|
138
|
+
class MyASR(BaseASR):
|
|
139
|
+
def transcribe(self, audio_path: str) -> list[dict]:
|
|
140
|
+
return [{"start": 0.0, "end": 5.0, "text": "..."}]
|
|
141
|
+
|
|
142
|
+
chunker = Chunker(asr=MyASR(), use_asr=True)
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
---
|
|
146
|
+
|
|
147
|
+
## Embedding — Bring Your Own
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
from vision-rag.embedding import BaseTextEmbedder, BaseImageEmbedder
|
|
151
|
+
|
|
152
|
+
# your own text embedder
|
|
153
|
+
class MyTextEmbedder(BaseTextEmbedder):
|
|
154
|
+
def embed(self, text: str) -> list[float]:
|
|
155
|
+
return [...] # your model or API
|
|
156
|
+
|
|
157
|
+
# your own image embedder
|
|
158
|
+
class MyImageEmbedder(BaseImageEmbedder):
|
|
159
|
+
def embed(self, image_path: str) -> list[float]:
|
|
160
|
+
return [...] # your model or API
|
|
161
|
+
|
|
162
|
+
embedder = EmbeddingBuilder(
|
|
163
|
+
text_embedding=MyTextEmbedder(),
|
|
164
|
+
image_embedding=MyImageEmbedder(),
|
|
165
|
+
)
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
Built-in providers: `OpenAITextEmbedder`, `SentenceTransformerTextEmbedder`, `CLIPImageEmbedder`, `OpenAIImageEmbedder`
|
|
169
|
+
|
|
170
|
+
---
|
|
171
|
+
|
|
172
|
+
## Vector Stores
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
from vision-rag.vectorstores import FAISS, Chroma
|
|
176
|
+
|
|
177
|
+
# FAISS — fast local search
|
|
178
|
+
store = FAISS()
|
|
179
|
+
store.index(embedded_chunks)
|
|
180
|
+
store.save("my_index")
|
|
181
|
+
store.load("my_index")
|
|
182
|
+
|
|
183
|
+
# Chroma — persistent local DB
|
|
184
|
+
store = Chroma(path="my_chroma_db")
|
|
185
|
+
store.index(embedded_chunks)
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
Plug in your own:
|
|
189
|
+
|
|
190
|
+
```python
|
|
191
|
+
from vision-rag.vectorstores import BaseVectorStore
|
|
192
|
+
|
|
193
|
+
class MyVectorStore(BaseVectorStore):
|
|
194
|
+
def index(self, embedded_chunks): ...
|
|
195
|
+
def search_text(self, vector, top_k): ...
|
|
196
|
+
def search_image(self, vector, top_k): ...
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
---
|
|
200
|
+
|
|
201
|
+
## Retrieval
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
from vision-rag.retriever import Retriever
|
|
205
|
+
|
|
206
|
+
retriever = Retriever(
|
|
207
|
+
store=store,
|
|
208
|
+
text_embedder=text_embedder,
|
|
209
|
+
top_k_text=5,
|
|
210
|
+
top_k_image=5,
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
# semantic search
|
|
214
|
+
results = retriever.retrieve("What did they say about frozen yogurt?")
|
|
215
|
+
results.text_results # top text matches
|
|
216
|
+
results.image_results # top image matches
|
|
217
|
+
results.all # combined, ranked by score
|
|
218
|
+
|
|
219
|
+
# time-based search
|
|
220
|
+
chunks = retriever.retrieve_by_time(start=10.0, end=20.0)
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
---
|
|
224
|
+
|
|
225
|
+
## Generation — Bring Your Own VLM
|
|
226
|
+
|
|
227
|
+
```python
|
|
228
|
+
from vision-rag.generator import Generator, OpenAIGenerator, AnthropicGenerator, GeminiGenerator, OllamaGenerator
|
|
229
|
+
|
|
230
|
+
# GPT-4o
|
|
231
|
+
generator = Generator(llm=OpenAIGenerator(api_key="sk-..."))
|
|
232
|
+
|
|
233
|
+
# Claude
|
|
234
|
+
generator = Generator(llm=AnthropicGenerator(api_key="sk-ant-..."))
|
|
235
|
+
|
|
236
|
+
# Gemini
|
|
237
|
+
generator = Generator(llm=GeminiGenerator(api_key="..."))
|
|
238
|
+
|
|
239
|
+
# Ollama (local)
|
|
240
|
+
generator = Generator(llm=OllamaGenerator(model="llava:7b"))
|
|
241
|
+
|
|
242
|
+
# your own
|
|
243
|
+
from vision-rag.generator import BaseGenerator
|
|
244
|
+
|
|
245
|
+
class MyGenerator(BaseGenerator):
|
|
246
|
+
def generate(self, query: str, chunks) -> str:
|
|
247
|
+
return "answer..."
|
|
248
|
+
|
|
249
|
+
generator = Generator(llm=MyGenerator())
|
|
250
|
+
```
|
|
251
|
+
|
|
252
|
+
---
|
|
253
|
+
|
|
254
|
+
## Dependencies
|
|
255
|
+
|
|
256
|
+
vision-rag ships with only one hard dependency — `pymediainfo`. Everything else is installed based on what you use:
|
|
257
|
+
|
|
258
|
+
| Feature | Install |
|
|
259
|
+
|---|---|
|
|
260
|
+
| ASR (local Whisper) | `pip install faster-whisper` |
|
|
261
|
+
| ASR (OpenAI) | `pip install openai` |
|
|
262
|
+
| ASR (Deepgram) | `pip install deepgram-sdk` |
|
|
263
|
+
| Frames + Audio | `brew install ffmpeg` |
|
|
264
|
+
| FAISS vector store | `pip install faiss-cpu` |
|
|
265
|
+
| Chroma vector store | `pip install chromadb` |
|
|
266
|
+
| OpenAI embedding | `pip install openai` |
|
|
267
|
+
| Sentence Transformers | `pip install sentence-transformers` |
|
|
268
|
+
| CLIP image embedding | `pip install git+https://github.com/openai/CLIP.git torch Pillow` |
|
|
269
|
+
| Ollama generation | `pip install ollama` |
|
|
270
|
+
| Anthropic generation | `pip install anthropic` |
|
|
271
|
+
| Gemini generation | `pip install google-genai` |
|
|
272
|
+
|
|
273
|
+
---
|
|
274
|
+
|
|
275
|
+
## License
|
|
276
|
+
|
|
277
|
+
MIT
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "vision-rag"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Video RAG — a Python library for retrieval-augmented generation over video"
|
|
9
|
+
requires-python = ">=3.9"
|
|
10
|
+
dependencies = [
|
|
11
|
+
"pymediainfo",
|
|
12
|
+
]
|
|
13
|
+
|
|
14
|
+
[tool.setuptools.packages.find]
|
|
15
|
+
where = ["."]
|
|
16
|
+
include = ["vision-rag*"]
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from vision-rag.video_ingestion import VideoLoader, VideoDocument
|
|
2
|
+
from vision-rag.video_chunker import Chunker, Chunk, BaseASR, WhisperLocalASR, OpenAIASR, DeepgramASR
|
|
3
|
+
from vision-rag.embedding import (
|
|
4
|
+
EmbeddingBuilder, EmbeddedChunk,
|
|
5
|
+
BaseTextEmbedder, BaseImageEmbedder,
|
|
6
|
+
OpenAITextEmbedder, SentenceTransformerTextEmbedder,
|
|
7
|
+
CLIPImageEmbedder, OpenAIImageEmbedder,
|
|
8
|
+
)
|
|
9
|
+
from vision-rag.vectorstores import BaseVectorStore, SearchResult, FAISS, Chroma
|
|
10
|
+
from vision-rag.retriever import Retriever, RetrievalResult
|
|
11
|
+
from vision-rag.generator import (
|
|
12
|
+
Generator, GeneratorAnswer,
|
|
13
|
+
BaseGenerator,
|
|
14
|
+
OpenAIGenerator, AnthropicGenerator, GeminiGenerator, OllamaGenerator,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__version__ = "0.1.0"
|
|
18
|
+
__all__ = [
|
|
19
|
+
# Stage 1
|
|
20
|
+
"VideoLoader", "VideoDocument",
|
|
21
|
+
# Stage 2
|
|
22
|
+
"Chunker", "Chunk",
|
|
23
|
+
"BaseASR", "WhisperLocalASR", "OpenAIASR", "DeepgramASR",
|
|
24
|
+
# Stage 3
|
|
25
|
+
"EmbeddingBuilder", "EmbeddedChunk",
|
|
26
|
+
"BaseTextEmbedder", "BaseImageEmbedder",
|
|
27
|
+
"OpenAITextEmbedder", "SentenceTransformerTextEmbedder",
|
|
28
|
+
"CLIPImageEmbedder", "OpenAIImageEmbedder",
|
|
29
|
+
# Stage 4
|
|
30
|
+
"BaseVectorStore", "SearchResult",
|
|
31
|
+
"FAISS", "Chroma",
|
|
32
|
+
# Stage 5
|
|
33
|
+
"Retriever", "RetrievalResult",
|
|
34
|
+
# Stage 6
|
|
35
|
+
"Generator", "GeneratorAnswer",
|
|
36
|
+
"BaseGenerator",
|
|
37
|
+
"OpenAIGenerator", "AnthropicGenerator", "GeminiGenerator", "OllamaGenerator",
|
|
38
|
+
]
|
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
"""
|
|
2
|
+
vision-rag/embedding.py
|
|
3
|
+
|
|
4
|
+
Stage 3 of the vision-rag pipeline — Embedding.
|
|
5
|
+
Converts chunk.text and chunk.frame_path into vectors.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from vision-rag.embedding import EmbeddingBuilder, CLIPImageEmbedder, OpenAITextEmbedder
|
|
9
|
+
|
|
10
|
+
embedder = EmbeddingBuilder(
|
|
11
|
+
text_embedding=OpenAITextEmbedder(),
|
|
12
|
+
image_embedding=CLIPImageEmbedder()
|
|
13
|
+
)
|
|
14
|
+
embedded_chunks = embedder.embed(chunks)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import os
|
|
20
|
+
from dataclasses import dataclass, field
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
from typing import Optional
|
|
23
|
+
|
|
24
|
+
from vision-rag.video_chunker import Chunk
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# ──────────────────────────────────────────────────────────────
|
|
28
|
+
# EmbeddedChunk — Chunk + vectors
|
|
29
|
+
# ──────────────────────────────────────────────────────────────
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class EmbeddedChunk:
|
|
33
|
+
"""
|
|
34
|
+
A Chunk with text and image vectors attached.
|
|
35
|
+
This is what flows into the indexing stage.
|
|
36
|
+
"""
|
|
37
|
+
# -- original chunk fields (unchanged) --
|
|
38
|
+
chunk_id: int
|
|
39
|
+
video_path: str
|
|
40
|
+
start: float
|
|
41
|
+
end: float
|
|
42
|
+
duration: float
|
|
43
|
+
frame_path: Optional[str]
|
|
44
|
+
text: Optional[str]
|
|
45
|
+
metadata: dict = field(default_factory=dict)
|
|
46
|
+
|
|
47
|
+
# -- new: vectors --
|
|
48
|
+
text_vector: Optional[list[float]] = None # None if text=None or text_embedding not set
|
|
49
|
+
image_vector: Optional[list[float]] = None # None if frame_path=None or image_embedding not set
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_chunk(cls, chunk: Chunk) -> "EmbeddedChunk":
|
|
53
|
+
"""Create an EmbeddedChunk from a Chunk (vectors are None until embed() is called)."""
|
|
54
|
+
return cls(
|
|
55
|
+
chunk_id = chunk.chunk_id,
|
|
56
|
+
video_path = chunk.video_path,
|
|
57
|
+
start = chunk.start,
|
|
58
|
+
end = chunk.end,
|
|
59
|
+
duration = chunk.duration,
|
|
60
|
+
frame_path = chunk.frame_path,
|
|
61
|
+
text = chunk.text,
|
|
62
|
+
metadata = chunk.metadata.copy(),
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
def __repr__(self) -> str:
|
|
66
|
+
tv = f"dim={len(self.text_vector)}" if self.text_vector else "None"
|
|
67
|
+
iv = f"dim={len(self.image_vector)}" if self.image_vector else "None"
|
|
68
|
+
return (
|
|
69
|
+
f"EmbeddedChunk("
|
|
70
|
+
f"id={self.chunk_id}, "
|
|
71
|
+
f"start={self.start:.2f}s, "
|
|
72
|
+
f"end={self.end:.2f}s, "
|
|
73
|
+
f"text_vector={tv}, "
|
|
74
|
+
f"image_vector={iv}"
|
|
75
|
+
f")"
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
# ──────────────────────────────────────────────────────────────
|
|
80
|
+
# Base classes — plug in anything
|
|
81
|
+
# ──────────────────────────────────────────────────────────────
|
|
82
|
+
|
|
83
|
+
class BaseTextEmbedder:
|
|
84
|
+
"""
|
|
85
|
+
Base class for all text embedding providers.
|
|
86
|
+
Subclass and implement embed() to use any model or API.
|
|
87
|
+
|
|
88
|
+
Example:
|
|
89
|
+
class MyTextEmbedder(BaseTextEmbedder):
|
|
90
|
+
def embed(self, text: str) -> list[float]:
|
|
91
|
+
# your model or API here
|
|
92
|
+
return [...]
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
def embed(self, text: str) -> list[float]:
|
|
96
|
+
raise NotImplementedError("Implement embed() in your TextEmbedder subclass.")
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def provider_name(self) -> str:
|
|
100
|
+
return self.__class__.__name__
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class BaseImageEmbedder:
|
|
104
|
+
"""
|
|
105
|
+
Base class for all image embedding providers.
|
|
106
|
+
Subclass and implement embed() to use any model or API.
|
|
107
|
+
|
|
108
|
+
Example:
|
|
109
|
+
class MyImageEmbedder(BaseImageEmbedder):
|
|
110
|
+
def embed(self, image_path: str) -> list[float]:
|
|
111
|
+
# your model or API here
|
|
112
|
+
return [...]
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
def embed(self, image_path: str) -> list[float]:
|
|
116
|
+
raise NotImplementedError("Implement embed() in your ImageEmbedder subclass.")
|
|
117
|
+
|
|
118
|
+
@property
|
|
119
|
+
def provider_name(self) -> str:
|
|
120
|
+
return self.__class__.__name__
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
# ──────────────────────────────────────────────────────────────
|
|
124
|
+
# Built-in Text Embedders
|
|
125
|
+
# ──────────────────────────────────────────────────────────────
|
|
126
|
+
|
|
127
|
+
class OpenAITextEmbedder(BaseTextEmbedder):
|
|
128
|
+
"""
|
|
129
|
+
OpenAI text embeddings (text-embedding-3-small by default).
|
|
130
|
+
|
|
131
|
+
Usage:
|
|
132
|
+
embedder = OpenAITextEmbedder(api_key="sk-...")
|
|
133
|
+
# or set OPENAI_API_KEY env var and just do OpenAITextEmbedder()
|
|
134
|
+
"""
|
|
135
|
+
|
|
136
|
+
def __init__(self, api_key: Optional[str] = None, model: str = "text-embedding-3-small"):
|
|
137
|
+
self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
138
|
+
if not self.api_key:
|
|
139
|
+
raise ValueError(
|
|
140
|
+
"OpenAI API key required. Pass api_key= or set OPENAI_API_KEY env var."
|
|
141
|
+
)
|
|
142
|
+
self.model = model
|
|
143
|
+
self._client = None
|
|
144
|
+
|
|
145
|
+
def embed(self, text: str) -> list[float]:
|
|
146
|
+
try:
|
|
147
|
+
from openai import OpenAI
|
|
148
|
+
except ImportError:
|
|
149
|
+
raise RuntimeError("pip install openai")
|
|
150
|
+
|
|
151
|
+
if self._client is None:
|
|
152
|
+
self._client = OpenAI(api_key=self.api_key)
|
|
153
|
+
|
|
154
|
+
response = self._client.embeddings.create(input=text, model=self.model)
|
|
155
|
+
return response.data[0].embedding
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
class SentenceTransformerTextEmbedder(BaseTextEmbedder):
|
|
159
|
+
"""
|
|
160
|
+
Local text embeddings using SentenceTransformers. No API key needed.
|
|
161
|
+
|
|
162
|
+
Usage:
|
|
163
|
+
embedder = SentenceTransformerTextEmbedder()
|
|
164
|
+
embedder = SentenceTransformerTextEmbedder(model="all-mpnet-base-v2")
|
|
165
|
+
|
|
166
|
+
pip install sentence-transformers
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
def __init__(self, model: str = "all-MiniLM-L6-v2"):
|
|
170
|
+
self.model_name = model
|
|
171
|
+
self._model = None
|
|
172
|
+
|
|
173
|
+
def embed(self, text: str) -> list[float]:
|
|
174
|
+
try:
|
|
175
|
+
from sentence_transformers import SentenceTransformer
|
|
176
|
+
except ImportError:
|
|
177
|
+
raise RuntimeError("pip install sentence-transformers")
|
|
178
|
+
|
|
179
|
+
if self._model is None:
|
|
180
|
+
self._model = SentenceTransformer(self.model_name)
|
|
181
|
+
|
|
182
|
+
return self._model.encode(text).tolist()
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
# ──────────────────────────────────────────────────────────────
|
|
186
|
+
# Built-in Image Embedders
|
|
187
|
+
# ──────────────────────────────────────────────────────────────
|
|
188
|
+
|
|
189
|
+
class CLIPImageEmbedder(BaseImageEmbedder):
|
|
190
|
+
"""
|
|
191
|
+
Local image embeddings using OpenAI CLIP. No API key needed.
|
|
192
|
+
Industry standard for image embeddings.
|
|
193
|
+
|
|
194
|
+
Usage:
|
|
195
|
+
embedder = CLIPImageEmbedder()
|
|
196
|
+
embedder = CLIPImageEmbedder(model="ViT-B/32")
|
|
197
|
+
|
|
198
|
+
pip install git+https://github.com/openai/CLIP.git Pillow torch
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
def __init__(self, model: str = "ViT-B/32", device: str = "cpu"):
|
|
202
|
+
self.model_name = model
|
|
203
|
+
self.device = device
|
|
204
|
+
self._model = None
|
|
205
|
+
self._preprocess = None
|
|
206
|
+
|
|
207
|
+
def embed(self, image_path: str) -> list[float]:
|
|
208
|
+
try:
|
|
209
|
+
import clip
|
|
210
|
+
import torch
|
|
211
|
+
from PIL import Image
|
|
212
|
+
except ImportError:
|
|
213
|
+
raise RuntimeError(
|
|
214
|
+
"pip install git+https://github.com/openai/CLIP.git Pillow torch"
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
if self._model is None:
|
|
218
|
+
self._model, self._preprocess = clip.load(self.model_name, device=self.device)
|
|
219
|
+
|
|
220
|
+
image = self._preprocess(Image.open(image_path)).unsqueeze(0).to(self.device)
|
|
221
|
+
with torch.no_grad():
|
|
222
|
+
vector = self._model.encode_image(image)
|
|
223
|
+
return vector.squeeze().tolist()
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class OpenAIImageEmbedder(BaseImageEmbedder):
|
|
227
|
+
"""
|
|
228
|
+
Image embeddings via OpenAI vision model.
|
|
229
|
+
Encodes the image as base64 and gets an embedding via the API.
|
|
230
|
+
|
|
231
|
+
Usage:
|
|
232
|
+
embedder = OpenAIImageEmbedder(api_key="sk-...")
|
|
233
|
+
# or set OPENAI_API_KEY env var
|
|
234
|
+
|
|
235
|
+
pip install openai
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
def __init__(self, api_key: Optional[str] = None, model: str = "text-embedding-3-small"):
|
|
239
|
+
self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
240
|
+
if not self.api_key:
|
|
241
|
+
raise ValueError(
|
|
242
|
+
"OpenAI API key required. Pass api_key= or set OPENAI_API_KEY env var."
|
|
243
|
+
)
|
|
244
|
+
self.model = model
|
|
245
|
+
self._client = None
|
|
246
|
+
|
|
247
|
+
def embed(self, image_path: str) -> list[float]:
|
|
248
|
+
import base64
|
|
249
|
+
try:
|
|
250
|
+
from openai import OpenAI
|
|
251
|
+
except ImportError:
|
|
252
|
+
raise RuntimeError("pip install openai")
|
|
253
|
+
|
|
254
|
+
if self._client is None:
|
|
255
|
+
self._client = OpenAI(api_key=self.api_key)
|
|
256
|
+
|
|
257
|
+
with open(image_path, "rb") as f:
|
|
258
|
+
b64 = base64.b64encode(f.read()).decode("utf-8")
|
|
259
|
+
|
|
260
|
+
response = self._client.embeddings.create(
|
|
261
|
+
input=f"data:image/jpeg;base64,{b64}",
|
|
262
|
+
model=self.model,
|
|
263
|
+
)
|
|
264
|
+
return response.data[0].embedding
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
# ──────────────────────────────────────────────────────────────
|
|
268
|
+
# EmbeddingBuilder — main entry point
|
|
269
|
+
# ──────────────────────────────────────────────────────────────
|
|
270
|
+
|
|
271
|
+
class EmbeddingBuilder:
|
|
272
|
+
"""
|
|
273
|
+
Embeds a list of Chunk objects into EmbeddedChunk objects.
|
|
274
|
+
|
|
275
|
+
Parameters
|
|
276
|
+
----------
|
|
277
|
+
text_embedding : BaseTextEmbedder | None
|
|
278
|
+
Any text embedding provider. None = skip text embedding.
|
|
279
|
+
image_embedding : BaseImageEmbedder | None
|
|
280
|
+
Any image embedding provider. None = skip image embedding.
|
|
281
|
+
|
|
282
|
+
Usage:
|
|
283
|
+
embedder = EmbeddingBuilder(
|
|
284
|
+
text_embedding=OpenAITextEmbedder(),
|
|
285
|
+
image_embedding=CLIPImageEmbedder()
|
|
286
|
+
)
|
|
287
|
+
embedded_chunks = embedder.embed(chunks)
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
def __init__(
|
|
291
|
+
self,
|
|
292
|
+
text_embedding: Optional[BaseTextEmbedder] = None,
|
|
293
|
+
image_embedding: Optional[BaseImageEmbedder] = None,
|
|
294
|
+
):
|
|
295
|
+
if text_embedding is None and image_embedding is None:
|
|
296
|
+
raise ValueError(
|
|
297
|
+
"At least one of text_embedding or image_embedding must be provided."
|
|
298
|
+
)
|
|
299
|
+
self.text_embedding = text_embedding
|
|
300
|
+
self.image_embedding = image_embedding
|
|
301
|
+
|
|
302
|
+
def embed(self, chunks: list[Chunk]) -> list[EmbeddedChunk]:
|
|
303
|
+
"""
|
|
304
|
+
Embed a list of chunks. Returns a list of EmbeddedChunk objects.
|
|
305
|
+
|
|
306
|
+
Parameters
|
|
307
|
+
----------
|
|
308
|
+
chunks : list[Chunk]
|
|
309
|
+
Output from Chunker.chunk()
|
|
310
|
+
"""
|
|
311
|
+
embedded = []
|
|
312
|
+
for chunk in chunks:
|
|
313
|
+
ec = EmbeddedChunk.from_chunk(chunk)
|
|
314
|
+
|
|
315
|
+
# -- text vector --
|
|
316
|
+
if self.text_embedding and chunk.text:
|
|
317
|
+
ec.text_vector = self.text_embedding.embed(chunk.text)
|
|
318
|
+
ec.metadata["text_embedder"] = self.text_embedding.provider_name
|
|
319
|
+
|
|
320
|
+
# -- image vector --
|
|
321
|
+
if self.image_embedding and chunk.frame_path:
|
|
322
|
+
if Path(chunk.frame_path).exists():
|
|
323
|
+
ec.image_vector = self.image_embedding.embed(chunk.frame_path)
|
|
324
|
+
ec.metadata["image_embedder"] = self.image_embedding.provider_name
|
|
325
|
+
|
|
326
|
+
embedded.append(ec)
|
|
327
|
+
|
|
328
|
+
return embedded
|