semantic-chunker-langchain 0.1.2__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/PKG-INFO +7 -5
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/README.md +6 -4
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/pyproject.toml +1 -1
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/LICENSE +0 -0
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/semantic_chunker_langchain/__init__.py +0 -0
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/semantic_chunker_langchain/chunker.py +0 -0
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/semantic_chunker_langchain/extractors/pdf.py +0 -0
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/semantic_chunker_langchain/outputs/formatter.py +0 -0
- {semantic_chunker_langchain-0.1.2 → semantic_chunker_langchain-0.1.4}/semantic_chunker_langchain/utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: semantic-chunker-langchain
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.4
|
4
4
|
Summary: Token-aware, LangChain-compatible semantic chunker with PDF and layout support
|
5
5
|
License: MIT
|
6
6
|
Author: Prajwal Shivaji Mandale
|
@@ -22,7 +22,8 @@ Description-Content-Type: text/markdown
|
|
22
22
|
|
23
23
|
# Semantic Chunker for LangChain
|
24
24
|
|
25
|
-
|
25
|
+
Hitting limits on passing the larger context to your limited character token limit llm model not anymore this chunker solves the problem
|
26
|
+
It is a **token-aware**, **LangChain-compatible** chunker that splits text (from PDF, markdown, or plain text) into semantically coherent chunks while respecting model token limits.
|
26
27
|
|
27
28
|
---
|
28
29
|
|
@@ -42,7 +43,7 @@ A **token-aware**, **LangChain-compatible** chunker that splits text (from PDF,
|
|
42
43
|
|
43
44
|
---
|
44
45
|
|
45
|
-
##
|
46
|
+
## 📆 Installation
|
46
47
|
|
47
48
|
```bash
|
48
49
|
pip install semantic-chunker-langchain
|
@@ -62,6 +63,7 @@ semantic-chunker sample.pdf --txt chunks.txt --json chunks.json
|
|
62
63
|
|
63
64
|
### 🔸 From Code
|
64
65
|
|
66
|
+
```python
|
65
67
|
from semantic_chunker_langchain.chunker import SemanticChunker, SimpleSemanticChunker
|
66
68
|
from semantic_chunker_langchain.extractors.pdf import extract_pdf
|
67
69
|
from semantic_chunker_langchain.outputs.formatter import write_to_txt
|
@@ -79,7 +81,7 @@ write_to_txt(chunks, "output.txt")
|
|
79
81
|
# Using SimpleSemanticChunker
|
80
82
|
simple_chunker = SimpleSemanticChunker(model_name="gpt-3.5-turbo")
|
81
83
|
simple_chunks = simple_chunker.split_documents(docs)
|
82
|
-
|
84
|
+
```
|
83
85
|
|
84
86
|
### 🔸 Convert to Retriever
|
85
87
|
|
@@ -90,7 +92,7 @@ retriever = chunker.to_retriever(chunks, embedding=OpenAIEmbeddings())
|
|
90
92
|
|
91
93
|
---
|
92
94
|
|
93
|
-
##
|
95
|
+
## 📊 Testing
|
94
96
|
|
95
97
|
```bash
|
96
98
|
poetry run pytest tests/
|
@@ -1,6 +1,7 @@
|
|
1
1
|
# Semantic Chunker for LangChain
|
2
2
|
|
3
|
-
|
3
|
+
Hitting limits on passing the larger context to your limited character token limit llm model not anymore this chunker solves the problem
|
4
|
+
It is a **token-aware**, **LangChain-compatible** chunker that splits text (from PDF, markdown, or plain text) into semantically coherent chunks while respecting model token limits.
|
4
5
|
|
5
6
|
---
|
6
7
|
|
@@ -20,7 +21,7 @@ A **token-aware**, **LangChain-compatible** chunker that splits text (from PDF,
|
|
20
21
|
|
21
22
|
---
|
22
23
|
|
23
|
-
##
|
24
|
+
## 📆 Installation
|
24
25
|
|
25
26
|
```bash
|
26
27
|
pip install semantic-chunker-langchain
|
@@ -40,6 +41,7 @@ semantic-chunker sample.pdf --txt chunks.txt --json chunks.json
|
|
40
41
|
|
41
42
|
### 🔸 From Code
|
42
43
|
|
44
|
+
```python
|
43
45
|
from semantic_chunker_langchain.chunker import SemanticChunker, SimpleSemanticChunker
|
44
46
|
from semantic_chunker_langchain.extractors.pdf import extract_pdf
|
45
47
|
from semantic_chunker_langchain.outputs.formatter import write_to_txt
|
@@ -57,7 +59,7 @@ write_to_txt(chunks, "output.txt")
|
|
57
59
|
# Using SimpleSemanticChunker
|
58
60
|
simple_chunker = SimpleSemanticChunker(model_name="gpt-3.5-turbo")
|
59
61
|
simple_chunks = simple_chunker.split_documents(docs)
|
60
|
-
|
62
|
+
```
|
61
63
|
|
62
64
|
### 🔸 Convert to Retriever
|
63
65
|
|
@@ -68,7 +70,7 @@ retriever = chunker.to_retriever(chunks, embedding=OpenAIEmbeddings())
|
|
68
70
|
|
69
71
|
---
|
70
72
|
|
71
|
-
##
|
73
|
+
## 📊 Testing
|
72
74
|
|
73
75
|
```bash
|
74
76
|
poetry run pytest tests/
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "semantic-chunker-langchain"
|
3
|
-
version = "0.1.
|
3
|
+
version = "0.1.4"
|
4
4
|
description = "Token-aware, LangChain-compatible semantic chunker with PDF and layout support"
|
5
5
|
authors = ["Prajwal Shivaji Mandale <prajwal.mandale333@gmail.com>","Sudhnwa Ghorpade <sudhnwa.ghorpade@gmail.com>"]
|
6
6
|
license = "MIT"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|