hie-rag 0.2.0__tar.gz → 0.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hie_rag-0.2.0 → hie_rag-0.2.2}/PKG-INFO +1 -1
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag/hie_rag.py +5 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag/process.py +2 -2
- hie_rag-0.2.2/hie_rag/split.py +129 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag/tree_index.py +4 -4
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag/utils.py +51 -3
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag.egg-info/PKG-INFO +1 -1
- {hie_rag-0.2.0 → hie_rag-0.2.2}/pyproject.toml +1 -1
- hie_rag-0.2.2/test/test-utils.py +56 -0
- hie_rag-0.2.0/hie_rag/split.py +0 -120
- hie_rag-0.2.0/test/test-utils.py +0 -54
- {hie_rag-0.2.0 → hie_rag-0.2.2}/LICENSE +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/README.md +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag/__init__.py +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag/ai_client.py +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag/split_and_process.py +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag/vectordb.py +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag.egg-info/SOURCES.txt +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag.egg-info/dependency_links.txt +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag.egg-info/requires.txt +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/hie_rag.egg-info/top_level.txt +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/setup.cfg +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/test/test-process.py +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/test/test-split.py +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/test/test-vectordb.py +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/test/test.py +0 -0
- {hie_rag-0.2.0 → hie_rag-0.2.2}/test/test_split_and_process.py +0 -0
@@ -15,18 +15,23 @@ class HieRag:
|
|
15
15
|
|
16
16
|
def process_and_save_index_stream(self, file_name: str, uploaded_file: bytes, min_chunk_size, max_chunk_size):
|
17
17
|
yield {"status": "🔍 Extracting text..."}
|
18
|
+
print(f"Extracting text from {file_name}")
|
18
19
|
extracted_text = self.utils.extract_text(uploaded_file)
|
19
20
|
|
20
21
|
yield {"status": "✂️ Splitting into chunks..."}
|
22
|
+
print(f"Splitting text into chunks with min size {min_chunk_size} and max size {max_chunk_size}")
|
21
23
|
result_split = self.split.split(extracted_text, min_chunk_size=min_chunk_size, max_chunk_size=max_chunk_size)
|
22
24
|
|
23
25
|
yield {"status": "🧠 Processing chunks..."}
|
26
|
+
print(f"Processing {len(result_split)} chunks")
|
24
27
|
result_process = self.process.process_chunks(result_split)
|
25
28
|
|
26
29
|
yield {"status": "🌲 Building tree index..."}
|
30
|
+
print(f"Building tree index with {len(result_process)} chunks")
|
27
31
|
tree_index = self.tree_index.tree_index(file_name = file_name, chunk_metadata=result_process)
|
28
32
|
|
29
33
|
yield {"status": "💾 Saving to vector DB..."}
|
34
|
+
print(f"Saving tree index with {len(tree_index.get('chunks', []))} chunks to vector DB")
|
30
35
|
save_result = self.vector_db.save_index(tree_index)
|
31
36
|
|
32
37
|
file_id = save_result.get("file_id", "unknown")
|
@@ -9,8 +9,8 @@ from .utils import Utils
|
|
9
9
|
|
10
10
|
|
11
11
|
class Process:
|
12
|
-
def __init__(self, base_url=None):
|
13
|
-
self.client = ChatOllama(model=
|
12
|
+
def __init__(self, base_url=None, model="llama3.2:latest"):
|
13
|
+
self.client = ChatOllama(model=model)
|
14
14
|
self.utils = Utils(base_url=base_url)
|
15
15
|
|
16
16
|
def _generate_metadata(self, chunk: str) -> Dict:
|
@@ -0,0 +1,129 @@
|
|
1
|
+
import re
|
2
|
+
from collections import deque
|
3
|
+
from typing import List, Tuple
|
4
|
+
|
5
|
+
from .utils import Utils
|
6
|
+
|
7
|
+
|
8
|
+
class Split:
|
9
|
+
def __init__(self, base_url: str = None):
|
10
|
+
"""
|
11
|
+
Initializes the Split object with default or user-defined thresholds.
|
12
|
+
"""
|
13
|
+
self.utils = Utils(base_url=base_url)
|
14
|
+
|
15
|
+
def _custom_split(self, text: str):
|
16
|
+
stripped = text.strip()
|
17
|
+
# 以「空白行」作為段落切點
|
18
|
+
raw_paragraphs = re.split(r'\n\s*\n+', stripped)
|
19
|
+
|
20
|
+
result = []
|
21
|
+
for para in raw_paragraphs:
|
22
|
+
# 把段落內所有換行改成空格
|
23
|
+
single_line = para.replace('\r\n', ' ').replace('\r', ' ').replace('\n', ' ')
|
24
|
+
cleaned = single_line.strip()
|
25
|
+
if cleaned:
|
26
|
+
result.append(cleaned)
|
27
|
+
return result
|
28
|
+
|
29
|
+
def _split_large_chunk(self, paragraphs: List[str], embeddings: List[List[float]]) -> (List[str], List[str]):
|
30
|
+
"""
|
31
|
+
Splits 'paragraphs' by finding the least similar boundary using 'embeddings'
|
32
|
+
(which are precomputed for these paragraphs only). Returns (left_part, right_part).
|
33
|
+
"""
|
34
|
+
# If there are 0 or 1 paragraphs, no need to split
|
35
|
+
if len(paragraphs) < 2:
|
36
|
+
return paragraphs, []
|
37
|
+
|
38
|
+
# We'll assume 'embeddings' is already the same length as 'paragraphs'.
|
39
|
+
if len(embeddings) < 2:
|
40
|
+
# Can't compute consecutive similarities with fewer than 2 embeddings
|
41
|
+
return paragraphs, []
|
42
|
+
|
43
|
+
# Find the least similar consecutive boundary
|
44
|
+
window_size = 3
|
45
|
+
split_index = self.utils.get_windowed_least_similar(embeddings, window_size=window_size)
|
46
|
+
|
47
|
+
left_part = paragraphs[:split_index + 1]
|
48
|
+
right_part = paragraphs[split_index + 1:]
|
49
|
+
return left_part, right_part
|
50
|
+
|
51
|
+
def split(
|
52
|
+
self,
|
53
|
+
extracted_text: str,
|
54
|
+
min_chunk_size: int = 300,
|
55
|
+
max_chunk_size: int = 500
|
56
|
+
) -> List[str]:
|
57
|
+
|
58
|
+
# 1) Build a deque of triples, so we never mutate three separate lists:
|
59
|
+
# paras = [p.strip() for p in extracted_text.split("\n\n") if p.strip()]
|
60
|
+
paras = self._custom_split(extracted_text)
|
61
|
+
|
62
|
+
if not paras:
|
63
|
+
return []
|
64
|
+
|
65
|
+
tokens = [self.utils.count_tokens(p) for p in paras]
|
66
|
+
embs = self.utils.list_embeddings(paras)
|
67
|
+
D: deque[Tuple[str,List[float],int]] = deque(
|
68
|
+
zip(paras, embs, tokens)
|
69
|
+
)
|
70
|
+
|
71
|
+
final_chunks: List[str] = []
|
72
|
+
|
73
|
+
# 2) As long as there’s anything left in D, build one chunk at a time:
|
74
|
+
while D:
|
75
|
+
cur_paras: List[str] = []
|
76
|
+
cur_embs: List[List[float]] = []
|
77
|
+
cur_tokens: List[int] = []
|
78
|
+
total_tokens = 0
|
79
|
+
|
80
|
+
# 2a) Guarantee we hit at least min_chunk_size
|
81
|
+
while D and total_tokens < min_chunk_size:
|
82
|
+
p, e, t = D.popleft()
|
83
|
+
# if even this one p would bust max, you might choose to take it alone
|
84
|
+
if total_tokens + t > max_chunk_size and total_tokens > 0:
|
85
|
+
# push it back for the next round
|
86
|
+
D.appendleft((p,e,t))
|
87
|
+
break
|
88
|
+
cur_paras.append(p)
|
89
|
+
cur_embs .append(e)
|
90
|
+
cur_tokens.append(t)
|
91
|
+
total_tokens += t
|
92
|
+
|
93
|
+
# if we ran out before min and have something -> emit it
|
94
|
+
if total_tokens < min_chunk_size and not D:
|
95
|
+
final_chunks.append(" ".join(cur_paras))
|
96
|
+
break
|
97
|
+
|
98
|
+
# 2b) Greedily fill until just under max_chunk_size
|
99
|
+
while D and total_tokens + D[0][2] <= max_chunk_size:
|
100
|
+
p, e, t = D.popleft()
|
101
|
+
cur_paras.append(p)
|
102
|
+
cur_embs .append(e)
|
103
|
+
cur_tokens.append(t)
|
104
|
+
total_tokens += t
|
105
|
+
|
106
|
+
# 3) Now we have between min and max tokens: split at the least-similar boundary
|
107
|
+
if cur_paras:
|
108
|
+
left, right = self._split_large_chunk(cur_paras, cur_embs)
|
109
|
+
|
110
|
+
# Count tokens in “left” to see if it meets min_chunk_size
|
111
|
+
left_token_count = sum(self.utils.count_tokens(p) for p in left)
|
112
|
+
|
113
|
+
if left_token_count >= min_chunk_size:
|
114
|
+
# If left is big enough, emit it
|
115
|
+
final_chunks.append(" ".join(left))
|
116
|
+
|
117
|
+
# Push “right” (the remainder) back onto D for subsequent chunks
|
118
|
+
for rp, re, rt in reversed(list(zip(
|
119
|
+
cur_paras[len(left):],
|
120
|
+
cur_embs [len(left):],
|
121
|
+
cur_tokens[len(left):]
|
122
|
+
))):
|
123
|
+
D.appendleft((rp, re, rt))
|
124
|
+
else:
|
125
|
+
# If “left” is too small, just emit the entire cur_paras as one chunk
|
126
|
+
final_chunks.append(" ".join(cur_paras))
|
127
|
+
# (We do NOT push anything back, because cur_paras is fully consumed.)
|
128
|
+
|
129
|
+
return final_chunks
|
@@ -10,8 +10,8 @@ from .utils import Utils
|
|
10
10
|
|
11
11
|
|
12
12
|
class TreeIndex:
|
13
|
-
def __init__(self, base_url: str):
|
14
|
-
self.client = ChatOllama(model=
|
13
|
+
def __init__(self, base_url: str, model="llama3.2:latest"):
|
14
|
+
self.client = ChatOllama(model=model)
|
15
15
|
self.utils = Utils(base_url=base_url)
|
16
16
|
|
17
17
|
def _convert_to_string(self, chunk_metadata: dict) -> str:
|
@@ -32,8 +32,8 @@ class TreeIndex:
|
|
32
32
|
|
33
33
|
NOTE:
|
34
34
|
1. 請輸出繁體中文
|
35
|
-
2. The summary should be concise
|
36
|
-
3. The summary should be
|
35
|
+
2. The summary should be concise with details and better than the individual summaries.
|
36
|
+
3. The summary should be long enough to cover all the main points of the text.
|
37
37
|
|
38
38
|
Summaries:
|
39
39
|
{summaries}
|
@@ -12,7 +12,7 @@ from .ai_client import AiClient
|
|
12
12
|
|
13
13
|
|
14
14
|
class Utils:
|
15
|
-
def __init__(self, base_url
|
15
|
+
def __init__(self, base_url: str):
|
16
16
|
# self.client = OpenAI(api_key=api_key)
|
17
17
|
self.client = AiClient(base_url=base_url)
|
18
18
|
|
@@ -24,13 +24,16 @@ class Utils:
|
|
24
24
|
# Accept both raw bytes and file-like objects with `.read()`
|
25
25
|
if isinstance(uploaded_file, bytes):
|
26
26
|
file_bytes = uploaded_file
|
27
|
+
suffix = ".bin" # fallback generic extension
|
27
28
|
elif hasattr(uploaded_file, "read"):
|
28
29
|
file_bytes = uploaded_file.read()
|
30
|
+
filename = getattr(uploaded_file, "name", None) or getattr(uploaded_file, "filename", None)
|
31
|
+
suffix = os.path.splitext(filename)[-1] if filename else ".bin"
|
29
32
|
else:
|
30
33
|
raise TypeError("Unsupported file type: must be bytes or file-like object")
|
31
34
|
|
32
35
|
# Write to temp file for MarkItDown to process
|
33
|
-
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
36
|
+
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as temp_file:
|
34
37
|
temp_file_path = temp_file.name
|
35
38
|
temp_file.write(file_bytes)
|
36
39
|
|
@@ -71,4 +74,49 @@ class Utils:
|
|
71
74
|
# Find the index where consecutive similarity is lowest
|
72
75
|
split_index = np.argmin(consecutive_similarities)
|
73
76
|
|
74
|
-
return split_index
|
77
|
+
return split_index
|
78
|
+
|
79
|
+
def get_windowed_least_similar(
|
80
|
+
self,
|
81
|
+
embeddings: list,
|
82
|
+
window_size: int = 3
|
83
|
+
) -> int:
|
84
|
+
"""
|
85
|
+
對 embeddings 做滑動窗口:對每個可能的分割位置 i(0 <= i < len-1),
|
86
|
+
將 [max(0, i-window_size+1) .. i] 這 window_size 句平均後的向量
|
87
|
+
與 [i+1 .. min(len, i+window_size)] 這 window_size 句平均後的向量做 cosine 相似度,
|
88
|
+
回傳相似度最低的那個 i。
|
89
|
+
"""
|
90
|
+
if len(embeddings) < 2:
|
91
|
+
# 根本沒得分割
|
92
|
+
return 0
|
93
|
+
|
94
|
+
# 把 list-of-lists 轉成 numpy array (shape: [n_sentences, dim_emb])
|
95
|
+
embs = np.array(embeddings)
|
96
|
+
n = embs.shape[0]
|
97
|
+
|
98
|
+
best_index = 0
|
99
|
+
lowest_sim = float('inf')
|
100
|
+
|
101
|
+
for i in range(n - 1):
|
102
|
+
# 前半段:從 pre_start 到 i (inclusive)
|
103
|
+
pre_start = max(0, i - window_size + 1)
|
104
|
+
pre_group = embs[pre_start : i + 1] # shape: (<=window_size, dim)
|
105
|
+
|
106
|
+
# 後半段:從 i+1 到 post_end-1
|
107
|
+
post_end = min(n, i + 1 + window_size)
|
108
|
+
post_group = embs[i + 1 : post_end] # shape: (<=window_size, dim)
|
109
|
+
|
110
|
+
# 計算平均向量
|
111
|
+
# (也可以改成加總:np.sum(...);不過平均比較常見且 scale 感覺一致)
|
112
|
+
pre_avg = np.mean(pre_group, axis=0).reshape(1, -1) # shape: (1, dim)
|
113
|
+
post_avg = np.mean(post_group, axis=0).reshape(1, -1) # shape: (1, dim)
|
114
|
+
|
115
|
+
# 計算 cosine similarity
|
116
|
+
sim = float(cosine_similarity(pre_avg, post_avg)[0][0])
|
117
|
+
|
118
|
+
if sim < lowest_sim:
|
119
|
+
lowest_sim = sim
|
120
|
+
best_index = i
|
121
|
+
|
122
|
+
return best_index
|
@@ -0,0 +1,56 @@
|
|
1
|
+
import os
|
2
|
+
import sys
|
3
|
+
|
4
|
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
5
|
+
|
6
|
+
from dotenv import load_dotenv
|
7
|
+
|
8
|
+
from hie_rag.utils import Utils
|
9
|
+
|
10
|
+
load_dotenv()
|
11
|
+
|
12
|
+
utils = Utils(base_url=os.getenv("BASE_URL"))
|
13
|
+
|
14
|
+
with open("test2.pdf", "rb") as uploaded_file:
|
15
|
+
extracted_text = utils.extract_text(uploaded_file)
|
16
|
+
|
17
|
+
# Count tokens for the first 100 words
|
18
|
+
# result_count_tokens = utils.count_tokens(extracted_text[:100])
|
19
|
+
result_count_tokens = utils.count_tokens(extracted_text)
|
20
|
+
print(f"Token count: {result_count_tokens}")
|
21
|
+
|
22
|
+
# # Get embeddings for the text slices
|
23
|
+
# result_list_embeddings = utils.list_embeddings([
|
24
|
+
# extracted_text[:100],
|
25
|
+
# extracted_text[100:200],
|
26
|
+
# extracted_text[200:300]
|
27
|
+
# ])
|
28
|
+
|
29
|
+
# # Get the embedding for the first 100 words
|
30
|
+
# result_get_embedding = utils.get_embedding(extracted_text[:100])
|
31
|
+
|
32
|
+
# # Find the index of least similar consecutive embeddings
|
33
|
+
# result_get_consecutive_least_similar = utils.get_consecutive_least_similar(result_list_embeddings)
|
34
|
+
|
35
|
+
# # Write results to the text file
|
36
|
+
# with open("test-utils-result-new", "w", encoding="utf-8") as file:
|
37
|
+
# file.write("Extracted Text:\n")
|
38
|
+
# file.write(extracted_text + "\n\n")
|
39
|
+
# file.write("====================================\n\n")
|
40
|
+
|
41
|
+
# file.write("Count of Tokens (First 100 words):\n")
|
42
|
+
# file.write(str(result_count_tokens) + "\n\n")
|
43
|
+
# file.write("====================================\n\n")
|
44
|
+
|
45
|
+
# file.write("List of Embeddings:\n")
|
46
|
+
# file.write(str(result_list_embeddings) + "\n\n")
|
47
|
+
# file.write("====================================\n\n")
|
48
|
+
|
49
|
+
# file.write("Embedding of First 100 words:\n")
|
50
|
+
# file.write(str(result_get_embedding) + "\n\n")
|
51
|
+
# file.write("====================================\n\n")
|
52
|
+
|
53
|
+
# file.write("Index of Least Similar Consecutive Embeddings:\n")
|
54
|
+
# file.write(str(result_get_consecutive_least_similar) + "\n")
|
55
|
+
|
56
|
+
# print("Results written to a txt file.")
|
hie_rag-0.2.0/hie_rag/split.py
DELETED
@@ -1,120 +0,0 @@
|
|
1
|
-
from typing import List
|
2
|
-
|
3
|
-
from .utils import Utils
|
4
|
-
|
5
|
-
|
6
|
-
class Split:
|
7
|
-
def __init__(self, base_url: str = None):
|
8
|
-
"""
|
9
|
-
Initializes the Split object with default or user-defined thresholds.
|
10
|
-
"""
|
11
|
-
self.utils = Utils(base_url=base_url)
|
12
|
-
|
13
|
-
def _split_large_chunk(self, paragraphs: List[str], embeddings: List[List[float]]) -> (List[str], List[str]):
|
14
|
-
"""
|
15
|
-
Splits 'paragraphs' by finding the least similar boundary using 'embeddings'
|
16
|
-
(which are precomputed for these paragraphs only). Returns (left_part, right_part).
|
17
|
-
"""
|
18
|
-
# If there are 0 or 1 paragraphs, no need to split
|
19
|
-
if len(paragraphs) < 2:
|
20
|
-
return paragraphs, []
|
21
|
-
|
22
|
-
# We'll assume 'embeddings' is already the same length as 'paragraphs'.
|
23
|
-
if len(embeddings) < 2:
|
24
|
-
# Can't compute consecutive similarities with fewer than 2 embeddings
|
25
|
-
return paragraphs, []
|
26
|
-
|
27
|
-
# Find the least similar consecutive boundary
|
28
|
-
split_index = self.utils.get_consecutive_least_similar(embeddings)
|
29
|
-
|
30
|
-
left_part = paragraphs[:split_index + 1]
|
31
|
-
right_part = paragraphs[split_index + 1:]
|
32
|
-
return left_part, right_part
|
33
|
-
|
34
|
-
def split(
|
35
|
-
self,
|
36
|
-
extracted_text: str,
|
37
|
-
min_chunk_size: int = 300,
|
38
|
-
max_chunk_size: int = 500
|
39
|
-
) -> List[str]:
|
40
|
-
"""
|
41
|
-
Splits the input text into chunks of token-size between [min_chunk_size, max_chunk_size].
|
42
|
-
Once a chunk is in that range, we find the "least similar" boundary, store the left side,
|
43
|
-
and re-insert the right side for further splitting.
|
44
|
-
"""
|
45
|
-
paragraphs = [p.strip() for p in extracted_text.split("\n\n") if p.strip()]
|
46
|
-
if not paragraphs:
|
47
|
-
return []
|
48
|
-
|
49
|
-
# Precompute once
|
50
|
-
paragraphs_tokens = [self.utils.count_tokens(p) for p in paragraphs]
|
51
|
-
paragraphs_embeddings = self.utils.list_embeddings(paragraphs)
|
52
|
-
|
53
|
-
final_chunks = []
|
54
|
-
idx = 0
|
55
|
-
n = len(paragraphs)
|
56
|
-
|
57
|
-
while idx < n:
|
58
|
-
chunk_paragraphs = []
|
59
|
-
chunk_embeddings = []
|
60
|
-
chunk_tokens = [] # Keep track of tokens in this chunk
|
61
|
-
current_tokens = 0
|
62
|
-
|
63
|
-
# 1) Accumulate until we at least exceed min_chunk_size or run out
|
64
|
-
while idx < n and current_tokens < min_chunk_size:
|
65
|
-
if current_tokens + paragraphs_tokens[idx] <= max_chunk_size:
|
66
|
-
chunk_paragraphs.append(paragraphs[idx])
|
67
|
-
chunk_embeddings.append(paragraphs_embeddings[idx])
|
68
|
-
chunk_tokens.append(paragraphs_tokens[idx])
|
69
|
-
current_tokens += paragraphs_tokens[idx]
|
70
|
-
idx += 1
|
71
|
-
else:
|
72
|
-
# This paragraph alone might exceed max_chunk_size -> handle as you see fit
|
73
|
-
break
|
74
|
-
|
75
|
-
# If we haven't hit min_chunk_size but are out of paragraphs, store remainder and quit
|
76
|
-
if current_tokens < min_chunk_size and idx >= n:
|
77
|
-
if chunk_paragraphs:
|
78
|
-
final_chunks.append(" ".join(chunk_paragraphs))
|
79
|
-
break
|
80
|
-
|
81
|
-
# 2) Keep adding while we're under max_chunk_size
|
82
|
-
while idx < n:
|
83
|
-
if current_tokens + paragraphs_tokens[idx] <= max_chunk_size:
|
84
|
-
chunk_paragraphs.append(paragraphs[idx])
|
85
|
-
chunk_embeddings.append(paragraphs_embeddings[idx])
|
86
|
-
chunk_tokens.append(paragraphs_tokens[idx])
|
87
|
-
current_tokens += paragraphs_tokens[idx]
|
88
|
-
idx += 1
|
89
|
-
else:
|
90
|
-
break
|
91
|
-
|
92
|
-
# Now we have between min_chunk_size and max_chunk_size tokens in 'chunk_paragraphs'
|
93
|
-
if chunk_paragraphs:
|
94
|
-
# 3) Split at the "least similar" boundary
|
95
|
-
left_part, right_part = self._split_large_chunk(
|
96
|
-
chunk_paragraphs, chunk_embeddings
|
97
|
-
)
|
98
|
-
|
99
|
-
# We'll figure out how many paragraphs ended up in the left part
|
100
|
-
used_count = len(left_part)
|
101
|
-
leftover_count = len(right_part)
|
102
|
-
|
103
|
-
# Store left side
|
104
|
-
final_chunks.append(" ".join(left_part))
|
105
|
-
|
106
|
-
# If there's leftover, reinsert it into the main lists
|
107
|
-
if leftover_count > 0:
|
108
|
-
# Slices for leftover
|
109
|
-
leftover_embeddings = chunk_embeddings[used_count:]
|
110
|
-
leftover_tokens = chunk_tokens[used_count:]
|
111
|
-
|
112
|
-
# Re-insert them at index=idx
|
113
|
-
paragraphs[idx:idx] = right_part
|
114
|
-
paragraphs_embeddings[idx:idx] = leftover_embeddings
|
115
|
-
paragraphs_tokens[idx:idx] = leftover_tokens
|
116
|
-
|
117
|
-
# Recompute n, in case the paragraphs list has grown
|
118
|
-
n = len(paragraphs)
|
119
|
-
|
120
|
-
return final_chunks
|
hie_rag-0.2.0/test/test-utils.py
DELETED
@@ -1,54 +0,0 @@
|
|
1
|
-
import os
|
2
|
-
import sys
|
3
|
-
|
4
|
-
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
5
|
-
|
6
|
-
from dotenv import load_dotenv
|
7
|
-
|
8
|
-
from hie_rag.utils import Utils
|
9
|
-
|
10
|
-
load_dotenv()
|
11
|
-
|
12
|
-
utils = Utils(base_url=os.getenv("BASE_URL"))
|
13
|
-
|
14
|
-
with open("test.pdf", "rb") as uploaded_file:
|
15
|
-
extracted_text = utils.extract_text(uploaded_file)
|
16
|
-
|
17
|
-
# Count tokens for the first 100 words
|
18
|
-
result_count_tokens = utils.count_tokens(extracted_text[:100])
|
19
|
-
|
20
|
-
# Get embeddings for the text slices
|
21
|
-
result_list_embeddings = utils.list_embeddings([
|
22
|
-
extracted_text[:100],
|
23
|
-
extracted_text[100:200],
|
24
|
-
extracted_text[200:300]
|
25
|
-
])
|
26
|
-
|
27
|
-
# Get the embedding for the first 100 words
|
28
|
-
result_get_embedding = utils.get_embedding(extracted_text[:100])
|
29
|
-
|
30
|
-
# Find the index of least similar consecutive embeddings
|
31
|
-
result_get_consecutive_least_similar = utils.get_consecutive_least_similar(result_list_embeddings)
|
32
|
-
|
33
|
-
# Write results to the text file
|
34
|
-
with open("test-utils-result-new", "w", encoding="utf-8") as file:
|
35
|
-
file.write("Extracted Text:\n")
|
36
|
-
file.write(extracted_text + "\n\n")
|
37
|
-
file.write("====================================\n\n")
|
38
|
-
|
39
|
-
file.write("Count of Tokens (First 100 words):\n")
|
40
|
-
file.write(str(result_count_tokens) + "\n\n")
|
41
|
-
file.write("====================================\n\n")
|
42
|
-
|
43
|
-
file.write("List of Embeddings:\n")
|
44
|
-
file.write(str(result_list_embeddings) + "\n\n")
|
45
|
-
file.write("====================================\n\n")
|
46
|
-
|
47
|
-
file.write("Embedding of First 100 words:\n")
|
48
|
-
file.write(str(result_get_embedding) + "\n\n")
|
49
|
-
file.write("====================================\n\n")
|
50
|
-
|
51
|
-
file.write("Index of Least Similar Consecutive Embeddings:\n")
|
52
|
-
file.write(str(result_get_consecutive_least_similar) + "\n")
|
53
|
-
|
54
|
-
print("Results written to a txt file.")
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|