hie-rag 0.1.3__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hie_rag/ai_client.py +26 -0
- hie_rag/hie_rag.py +6 -6
- hie_rag/process.py +4 -4
- hie_rag/split.py +4 -4
- hie_rag/split_and_process.py +4 -4
- hie_rag/tree_index.py +4 -4
- hie_rag/utils.py +15 -15
- hie_rag/vectordb.py +2 -2
- {hie_rag-0.1.3.dist-info → hie_rag-0.2.0.dist-info}/METADATA +2 -2
- hie_rag-0.2.0.dist-info/RECORD +14 -0
- {hie_rag-0.1.3.dist-info → hie_rag-0.2.0.dist-info}/WHEEL +1 -1
- hie_rag/app.py +0 -77
- hie_rag-0.1.3.dist-info/RECORD +0 -14
- {hie_rag-0.1.3.dist-info → hie_rag-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {hie_rag-0.1.3.dist-info → hie_rag-0.2.0.dist-info}/top_level.txt +0 -0
hie_rag/ai_client.py
ADDED
@@ -0,0 +1,26 @@
|
|
1
|
+
import requests
|
2
|
+
|
3
|
+
|
4
|
+
class AiClient:
|
5
|
+
def __init__(self, base_url="http://localhost:11434"):
|
6
|
+
self.base_url = base_url
|
7
|
+
self.headers = {"Content-Type": "application/json"}
|
8
|
+
|
9
|
+
def get_embedding(self, text: str, model="nomic-embed-text") -> list:
|
10
|
+
url = f"{self.base_url}/api/embeddings"
|
11
|
+
payload = {
|
12
|
+
"model": model,
|
13
|
+
"prompt": text
|
14
|
+
}
|
15
|
+
response = requests.post(url, json=payload, headers=self.headers, timeout=60)
|
16
|
+
response.raise_for_status()
|
17
|
+
data = response.json()
|
18
|
+
|
19
|
+
# Extract embedding, adapt if your API response structure differs
|
20
|
+
embedding = data.get("embedding") or (data.get("data") and data["data"][0].get("embedding"))
|
21
|
+
if embedding is None:
|
22
|
+
raise ValueError("Embedding not found in Ollama response")
|
23
|
+
return embedding
|
24
|
+
|
25
|
+
def list_embeddings(self, texts: list, model="nomic-embed-text") -> list:
|
26
|
+
return [self.get_embedding(text, model=model) for text in texts]
|
hie_rag/hie_rag.py
CHANGED
@@ -6,12 +6,12 @@ from hie_rag.vectordb import Vectordb
|
|
6
6
|
|
7
7
|
|
8
8
|
class HieRag:
|
9
|
-
def __init__(self,
|
10
|
-
self.split = Split(
|
11
|
-
self.utils = Utils(
|
12
|
-
self.tree_index = TreeIndex(
|
13
|
-
self.process = Process(
|
14
|
-
self.vector_db = Vectordb(path=path,
|
9
|
+
def __init__(self, base_url, path="./db", collection_name="db_collection"):
|
10
|
+
self.split = Split(base_url=base_url)
|
11
|
+
self.utils = Utils(base_url=base_url)
|
12
|
+
self.tree_index = TreeIndex(base_url=base_url)
|
13
|
+
self.process = Process(base_url=base_url)
|
14
|
+
self.vector_db = Vectordb(path=path, base_url=base_url, collection_name=collection_name)
|
15
15
|
|
16
16
|
def process_and_save_index_stream(self, file_name: str, uploaded_file: bytes, min_chunk_size, max_chunk_size):
|
17
17
|
yield {"status": "🔍 Extracting text..."}
|
hie_rag/process.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
from typing import Dict, List
|
2
2
|
|
3
3
|
from langchain_core.prompts import PromptTemplate
|
4
|
-
from
|
4
|
+
from langchain_ollama import ChatOllama
|
5
5
|
from pydantic import Field
|
6
6
|
from typing_extensions import TypedDict
|
7
7
|
|
@@ -9,9 +9,9 @@ from .utils import Utils
|
|
9
9
|
|
10
10
|
|
11
11
|
class Process:
|
12
|
-
def __init__(self,
|
13
|
-
self.client =
|
14
|
-
self.utils = Utils(
|
12
|
+
def __init__(self, base_url=None):
|
13
|
+
self.client = ChatOllama(model="llama3.2:latest")
|
14
|
+
self.utils = Utils(base_url=base_url)
|
15
15
|
|
16
16
|
def _generate_metadata(self, chunk: str) -> Dict:
|
17
17
|
"""Generate metadata for a chunk using LangChain"""
|
hie_rag/split.py
CHANGED
@@ -4,11 +4,11 @@ from .utils import Utils
|
|
4
4
|
|
5
5
|
|
6
6
|
class Split:
|
7
|
-
def __init__(self,
|
7
|
+
def __init__(self, base_url: str = None):
|
8
8
|
"""
|
9
9
|
Initializes the Split object with default or user-defined thresholds.
|
10
10
|
"""
|
11
|
-
self.utils = Utils(
|
11
|
+
self.utils = Utils(base_url=base_url)
|
12
12
|
|
13
13
|
def _split_large_chunk(self, paragraphs: List[str], embeddings: List[List[float]]) -> (List[str], List[str]):
|
14
14
|
"""
|
@@ -34,8 +34,8 @@ class Split:
|
|
34
34
|
def split(
|
35
35
|
self,
|
36
36
|
extracted_text: str,
|
37
|
-
min_chunk_size: int =
|
38
|
-
max_chunk_size: int =
|
37
|
+
min_chunk_size: int = 300,
|
38
|
+
max_chunk_size: int = 500
|
39
39
|
) -> List[str]:
|
40
40
|
"""
|
41
41
|
Splits the input text into chunks of token-size between [min_chunk_size, max_chunk_size].
|
hie_rag/split_and_process.py
CHANGED
@@ -4,10 +4,10 @@ from hie_rag.utils import Utils
|
|
4
4
|
|
5
5
|
|
6
6
|
class SplitAndProcess:
|
7
|
-
def __init__(self,
|
8
|
-
self.split = Split(
|
9
|
-
self.utils = Utils(
|
10
|
-
self.process = Process(
|
7
|
+
def __init__(self, base_url: str):
|
8
|
+
self.split = Split(base_url=base_url)
|
9
|
+
self.utils = Utils(base_url=base_url)
|
10
|
+
self.process = Process(base_url=base_url)
|
11
11
|
|
12
12
|
def split_and_process(self, uploaded_file):
|
13
13
|
extracted_text = self.utils.extract_text(uploaded_file)
|
hie_rag/tree_index.py
CHANGED
@@ -2,7 +2,7 @@ import json
|
|
2
2
|
from typing import List
|
3
3
|
|
4
4
|
from langchain_core.prompts import PromptTemplate
|
5
|
-
from
|
5
|
+
from langchain_ollama import ChatOllama
|
6
6
|
from pydantic import Field
|
7
7
|
from typing_extensions import TypedDict
|
8
8
|
|
@@ -10,9 +10,9 @@ from .utils import Utils
|
|
10
10
|
|
11
11
|
|
12
12
|
class TreeIndex:
|
13
|
-
def __init__(self,
|
14
|
-
self.client =
|
15
|
-
self.utils = Utils(
|
13
|
+
def __init__(self, base_url: str):
|
14
|
+
self.client = ChatOllama(model="llama3.2:latest")
|
15
|
+
self.utils = Utils(base_url=base_url)
|
16
16
|
|
17
17
|
def _convert_to_string(self, chunk_metadata: dict) -> str:
|
18
18
|
"""
|
hie_rag/utils.py
CHANGED
@@ -6,17 +6,20 @@ import tempfile
|
|
6
6
|
import numpy as np
|
7
7
|
import tiktoken
|
8
8
|
from markitdown import MarkItDown
|
9
|
-
from openai import OpenAI
|
10
9
|
from sklearn.metrics.pairwise import cosine_similarity
|
11
10
|
|
11
|
+
from .ai_client import AiClient
|
12
|
+
|
12
13
|
|
13
14
|
class Utils:
|
14
|
-
def __init__(self,
|
15
|
-
self.client = OpenAI(api_key=api_key)
|
15
|
+
def __init__(self, base_url=None):
|
16
|
+
# self.client = OpenAI(api_key=api_key)
|
17
|
+
self.client = AiClient(base_url=base_url)
|
16
18
|
|
17
19
|
def extract_text(self, uploaded_file: bytes):
|
18
20
|
"""Extract text from an uploaded file using MarkItDown."""
|
19
|
-
md = MarkItDown(llm_client=self.client, llm_model="gpt-4o")
|
21
|
+
# md = MarkItDown(llm_client=self.client, llm_model="gpt-4o")
|
22
|
+
md = MarkItDown()
|
20
23
|
|
21
24
|
# Accept both raw bytes and file-like objects with `.read()`
|
22
25
|
if isinstance(uploaded_file, bytes):
|
@@ -46,18 +49,15 @@ class Utils:
|
|
46
49
|
tokenizer = tiktoken.get_encoding(encoding)
|
47
50
|
return len(tokenizer.encode(text))
|
48
51
|
|
49
|
-
def
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
response = self.client.embeddings.create(input=chunk, model=model)
|
54
|
-
embeddings.append(response.data[0].embedding)
|
55
|
-
return embeddings
|
52
|
+
def get_embedding(self, text: str, model="nomic-embed-text") -> list:
|
53
|
+
if not self.client:
|
54
|
+
raise RuntimeError("No embedding client configured")
|
55
|
+
return self.client.get_embedding(text, model=model)
|
56
56
|
|
57
|
-
def
|
58
|
-
|
59
|
-
|
60
|
-
return
|
57
|
+
def list_embeddings(self, chunks: list, model="nomic-embed-text") -> list:
|
58
|
+
if not self.client:
|
59
|
+
raise RuntimeError("No embedding client configured")
|
60
|
+
return self.client.list_embeddings(chunks, model=model)
|
61
61
|
|
62
62
|
def get_consecutive_least_similar(self, embeddings: list) -> int:
|
63
63
|
"""Find the index where consecutive similarity is lowest"""
|
hie_rag/vectordb.py
CHANGED
@@ -7,9 +7,9 @@ from .utils import Utils
|
|
7
7
|
|
8
8
|
|
9
9
|
class Vectordb():
|
10
|
-
def __init__(self, path,
|
10
|
+
def __init__(self, path, base_url, collection_name):
|
11
11
|
self.client = chromadb.PersistentClient(path = path)
|
12
|
-
self.utils = Utils(
|
12
|
+
self.utils = Utils(base_url=base_url)
|
13
13
|
self.collection = self.client.get_or_create_collection(collection_name)
|
14
14
|
|
15
15
|
def _convert_numpy(self, obj):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: hie_rag
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.2.0
|
4
4
|
Summary: A hierarchical RAG framework for chunks retrieval.
|
5
5
|
License: Apache License
|
6
6
|
Version 2.0, January 2004
|
@@ -29,7 +29,7 @@ Requires-Dist: openai==1.66.3
|
|
29
29
|
Requires-Dist: scikit-learn
|
30
30
|
Requires-Dist: tiktoken==0.8.0
|
31
31
|
Requires-Dist: langchain==0.3.13
|
32
|
-
Requires-Dist: langchain-
|
32
|
+
Requires-Dist: langchain-ollama==0.3.3
|
33
33
|
Requires-Dist: chromadb==0.6.2
|
34
34
|
Dynamic: license-file
|
35
35
|
|
@@ -0,0 +1,14 @@
|
|
1
|
+
hie_rag/__init__.py,sha256=p2glSTkCqGvMlcivcuKBStFh2C5adojaC9aGmF6nbhY,358
|
2
|
+
hie_rag/ai_client.py,sha256=VbGQ0e3vZNn8W2YoR15Vvq2r-MUs-TBRNLGiImT4QxU,1000
|
3
|
+
hie_rag/hie_rag.py,sha256=KB44QBz3tE0Eq_FJw9pvKynCfjyAuulaMFYKk6bzjug,2359
|
4
|
+
hie_rag/process.py,sha256=D_vMnF84ingLb4_KoC77uLQXSa6FwEpR30RGukG2H9U,2414
|
5
|
+
hie_rag/split.py,sha256=My7QQ_pPiJD0TvwRzm2MgonMMA79-r3Vifwp1xLWX4I,4905
|
6
|
+
hie_rag/split_and_process.py,sha256=PkFlnOF7nW4Zs47JTsGF4AY9VDOXz1AtxG9Die8_mQk,572
|
7
|
+
hie_rag/tree_index.py,sha256=TuRi9-M2aiD46ciS-iwIJYDc9nXq7i7mwxwVbMXk5Lo,2668
|
8
|
+
hie_rag/utils.py,sha256=F5bqx147yT37z080MPWPrwzOa0tGEAWmvNFgjXpe4ZA,2729
|
9
|
+
hie_rag/vectordb.py,sha256=iI73ujrONjDaHU66RNdHnD2PZWSppnjm0isIHPJEGAY,11068
|
10
|
+
hie_rag-0.2.0.dist-info/licenses/LICENSE,sha256=IwAxruLb1UG8F0KZtfnV6MJq10FRAxWM-XOTWkWsJt4,632
|
11
|
+
hie_rag-0.2.0.dist-info/METADATA,sha256=Oym7z46OyhT_Gp7unhX1rsYlFQi9UuOBU5VRsko1m_A,1698
|
12
|
+
hie_rag-0.2.0.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
13
|
+
hie_rag-0.2.0.dist-info/top_level.txt,sha256=tN2S3VpMUl6oLWL9sN4xIh4o2na_zjnW8rHiwPFf0T8,8
|
14
|
+
hie_rag-0.2.0.dist-info/RECORD,,
|
hie_rag/app.py
DELETED
@@ -1,77 +0,0 @@
|
|
1
|
-
# import json
|
2
|
-
# import os
|
3
|
-
|
4
|
-
# from .generate import Generate
|
5
|
-
# from .process import Process
|
6
|
-
# from .split import Split
|
7
|
-
# from .tree_index import TreeIndex
|
8
|
-
# from .utils import Utils
|
9
|
-
# from .vectordb import Vectordb
|
10
|
-
|
11
|
-
|
12
|
-
# # Function to handle data
|
13
|
-
# def handle_data(data):
|
14
|
-
# """
|
15
|
-
# Processes incoming data and returns a response.
|
16
|
-
# """
|
17
|
-
# try:
|
18
|
-
# # This is the logic that used to be in the /api/data route
|
19
|
-
# return {"received": data}
|
20
|
-
# except Exception as e:
|
21
|
-
# return {"error": str(e)}
|
22
|
-
|
23
|
-
# # Function to handle file upload and processing
|
24
|
-
# def handle_file_upload(uploaded_file, access_token):
|
25
|
-
# """
|
26
|
-
# Processes the uploaded file and extracts its text.
|
27
|
-
# """
|
28
|
-
# try:
|
29
|
-
# utils = Utils(api_key=access_token)
|
30
|
-
# process = Process(api_key=access_token)
|
31
|
-
# split = Split(api_key=access_token)
|
32
|
-
# tree_index = TreeIndex(api_key=access_token)
|
33
|
-
|
34
|
-
# if uploaded_file is None:
|
35
|
-
# return {"error": "No file selected for uploading"}
|
36
|
-
|
37
|
-
# filename = uploaded_file.filename
|
38
|
-
# extracted_text = utils.extract_text(uploaded_file)
|
39
|
-
# final_chunk_list = split.split(extracted_text)
|
40
|
-
# processed_chunks = process.process_chunks(final_chunk_list)
|
41
|
-
# data = tree_index.output_index(processed_chunks)
|
42
|
-
|
43
|
-
# return {"filename": filename, "data": data}
|
44
|
-
# except Exception as e:
|
45
|
-
# return {"error": str(e)}
|
46
|
-
|
47
|
-
# # Function to handle generation logic
|
48
|
-
# def handle_generation(file, access_token):
|
49
|
-
# """
|
50
|
-
# Handles the file for generation and returns generated data.
|
51
|
-
# """
|
52
|
-
# try:
|
53
|
-
# data = json.load(file)
|
54
|
-
|
55
|
-
# if "chunks" not in data:
|
56
|
-
# return {"error": "Missing 'chunks' in data"}
|
57
|
-
|
58
|
-
# path = os.getenv("INDEX_PATH")
|
59
|
-
# vectordb = Vectordb(path=path, api_key=access_token)
|
60
|
-
# generate = Generate(api_key=access_token)
|
61
|
-
|
62
|
-
# save_index_result = vectordb.save_index(data)
|
63
|
-
# generated_full_data = []
|
64
|
-
|
65
|
-
# for i in data["chunks"]:
|
66
|
-
# original_chunk = i["original_chunk"]
|
67
|
-
# query_result = vectordb.query_by_text(original_chunk, n_results=3)
|
68
|
-
# possible_reference = query_result["metadatas"][0][1]["summary"] + "\n" + query_result["metadatas"][0][2]["summary"]
|
69
|
-
|
70
|
-
# data_gen = generate.generate(original_chunk, possible_reference)
|
71
|
-
# generated_full_data.extend(data_gen["dataset"])
|
72
|
-
|
73
|
-
# return {"data": generated_full_data}
|
74
|
-
# except json.JSONDecodeError:
|
75
|
-
# return {"error": "Invalid JSON file format"}
|
76
|
-
# except Exception as e:
|
77
|
-
# return {"error": str(e)}
|
hie_rag-0.1.3.dist-info/RECORD
DELETED
@@ -1,14 +0,0 @@
|
|
1
|
-
hie_rag/__init__.py,sha256=p2glSTkCqGvMlcivcuKBStFh2C5adojaC9aGmF6nbhY,358
|
2
|
-
hie_rag/app.py,sha256=jZkGEIXhYL2mY3KhixXFqvkOn8r0Cdav3EZxlChvKDA,2636
|
3
|
-
hie_rag/hie_rag.py,sha256=h5EcGcxbcGm6-jB3lr_EIuZ-0wEQFJTF1xffzQKDJUI,2353
|
4
|
-
hie_rag/process.py,sha256=JaL8i1IZckeeaHsNSYiUIlYRsRRB73E9QqLCSh09JHA,2434
|
5
|
-
hie_rag/split.py,sha256=st_bZ4UaKUOXbxUIDobfG1IsW5vC9rHeyo4LXprfKrk,4904
|
6
|
-
hie_rag/split_and_process.py,sha256=eRMiBYBZWUo3ljFasZGAOSP_6_adiwBD094DZJfVQDk,565
|
7
|
-
hie_rag/tree_index.py,sha256=5rCoCCO14KLFvRzeOGB08mAnd6d3p7dl4h4jGQqF13A,2688
|
8
|
-
hie_rag/utils.py,sha256=cxYLNch5CVgnpuD3ScVoJMP8Kp0_Ni3grF5tV1_sCOM,2769
|
9
|
-
hie_rag/vectordb.py,sha256=UVdAinxUDhDqwbFbeXaLVdzN6uC4nu5l7rWi600d8BU,11065
|
10
|
-
hie_rag-0.1.3.dist-info/licenses/LICENSE,sha256=IwAxruLb1UG8F0KZtfnV6MJq10FRAxWM-XOTWkWsJt4,632
|
11
|
-
hie_rag-0.1.3.dist-info/METADATA,sha256=U_s4BPalfUt8xQWqj1mHNJJC7IEPZuXSmeVaHOBNhn4,1699
|
12
|
-
hie_rag-0.1.3.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
13
|
-
hie_rag-0.1.3.dist-info/top_level.txt,sha256=tN2S3VpMUl6oLWL9sN4xIh4o2na_zjnW8rHiwPFf0T8,8
|
14
|
-
hie_rag-0.1.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|