omni-split 0.0.1rc0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of omni-split might be problematic. Click here for more details.

Files changed (31) hide show
  1. omni_split-0.0.1rc0/LICENSE +21 -0
  2. omni_split-0.0.1rc0/PKG-INFO +139 -0
  3. omni_split-0.0.1rc0/README.md +109 -0
  4. omni_split-0.0.1rc0/omni_split/__init__.py +16 -0
  5. omni_split-0.0.1rc0/omni_split/base/__init__.py +0 -0
  6. omni_split-0.0.1rc0/omni_split/base/chonkie_base.py +139 -0
  7. omni_split-0.0.1rc0/omni_split/base/chonkie_tokenizer.py +285 -0
  8. omni_split-0.0.1rc0/omni_split/base/chonkie_types.py +519 -0
  9. omni_split-0.0.1rc0/omni_split/base/md2json_list.py +303 -0
  10. omni_split-0.0.1rc0/omni_split/base/md_json_list2chunk.py +310 -0
  11. omni_split-0.0.1rc0/omni_split/base/native_text_split_utils4content2.py +306 -0
  12. omni_split-0.0.1rc0/omni_split/main.py +73 -0
  13. omni_split-0.0.1rc0/omni_split/model/text_chunker_tokenizer/qwen_tokenizer.json +303282 -0
  14. omni_split-0.0.1rc0/omni_split/omni_split.py +93 -0
  15. omni_split-0.0.1rc0/omni_split/sub_chunker/__init__.py +0 -0
  16. omni_split-0.0.1rc0/omni_split/sub_chunker/document_split.py +32 -0
  17. omni_split-0.0.1rc0/omni_split/sub_chunker/markdown_split.py +47 -0
  18. omni_split-0.0.1rc0/omni_split/sub_chunker/text_split.py +343 -0
  19. omni_split-0.0.1rc0/omni_split/test.py +80 -0
  20. omni_split-0.0.1rc0/omni_split/utils/__init__.py +0 -0
  21. omni_split-0.0.1rc0/omni_split/utils/base_utils.py +181 -0
  22. omni_split-0.0.1rc0/omni_split/utils/download_test_doc.py +61 -0
  23. omni_split-0.0.1rc0/omni_split.egg-info/PKG-INFO +139 -0
  24. omni_split-0.0.1rc0/omni_split.egg-info/SOURCES.txt +29 -0
  25. omni_split-0.0.1rc0/omni_split.egg-info/dependency_links.txt +1 -0
  26. omni_split-0.0.1rc0/omni_split.egg-info/requires.txt +6 -0
  27. omni_split-0.0.1rc0/omni_split.egg-info/top_level.txt +1 -0
  28. omni_split-0.0.1rc0/pyproject.toml +3 -0
  29. omni_split-0.0.1rc0/setup.cfg +4 -0
  30. omni_split-0.0.1rc0/setup.py +32 -0
  31. omni_split-0.0.1rc0/tests/test.py +86 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 M5Stack Technology CO LTD
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,139 @@
1
+ Metadata-Version: 2.4
2
+ Name: omni_split
3
+ Version: 0.0.1rc0
4
+ Summary: A comprehensive document splitting toolkit
5
+ Home-page: https://github.com/dinobot22/omni_split
6
+ Author: dinobot22
7
+ Author-email: 2802701695yyb@gmail.com
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.7
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Requires-Dist: mistletoe
15
+ Requires-Dist: transformers
16
+ Requires-Dist: markitdown[docx,pptx,xls,xlsx]
17
+ Requires-Dist: python-docx
18
+ Requires-Dist: loguru
19
+ Requires-Dist: wand
20
+ Dynamic: author
21
+ Dynamic: author-email
22
+ Dynamic: classifier
23
+ Dynamic: description
24
+ Dynamic: description-content-type
25
+ Dynamic: home-page
26
+ Dynamic: license-file
27
+ Dynamic: requires-dist
28
+ Dynamic: requires-python
29
+ Dynamic: summary
30
+
31
+
32
+
33
+
34
+ # omni_split: Split commonly used document (md, doc etc.) forms for RAG that support LLM.
35
+ ---
36
+ <img src="./docs/icon.png" alt="omni_split" >
37
+
38
+ ---
39
+ ### note: All other text formats are highly recommended to be converted to Markdown, and we focus on optimizing documents for Markdown.
40
+ ---
41
+ # usage
42
+ ### install
43
+ ```bash
44
+ pip install omni_split
45
+ ```
46
+ ### use case
47
+ ```python
48
+ import json
49
+ from omni_split import OmniSplit
50
+ from omni_split import word_preprocessing_and_return_bytesIO
51
+ from omni_split import download_files_to_test_doc
52
+
53
+ ### == step 2: download test_doc file ==
54
+
55
+ doc_dict = download_files_to_test_doc()
56
+ text_doc_file_path = doc_dict["text_test.txt"]
57
+ json_list_doc_file_path = doc_dict["json_list_test.json"]
58
+ markdown_doc_file_path = doc_dict["markdown_test.md"]
59
+ word_doc_file_path = doc_dict["docx_test.docx"]
60
+
61
+
62
+ ### == step 3: split to chunk ==
63
+
64
+ omni_spliter = OmniSplit()
65
+
66
+ ## note: test text split
67
+ test_text = True
68
+ if test_text:
69
+ with open(text_doc_file_path, "r") as f:
70
+ text_content = "".join(f.readlines())
71
+ res = omni_spliter.text_chunk_func(text_content,txt_chunk_size=1000)
72
+ for item in res:
73
+ print(item)
74
+ print("------------")
75
+ print("=" * 10)
76
+
77
+ ## note: test markdown json split
78
+ test_markdown = True
79
+ if test_markdown:
80
+ with open(json_list_doc_file_path, "r") as f:
81
+ md_content_json = json.load(f)
82
+ res = omni_spliter.markdown_json_chunk_func(md_content_json)
83
+ for item in res:
84
+ print(item)
85
+ print("------------")
86
+ print("=" * 10)
87
+
88
+ res = omni_spliter.markdown_json_chunk_func(md_content_json, clear_model=True)
89
+ for item in res:
90
+ print(item)
91
+ print("------------")
92
+ print("=" * 10)
93
+
94
+ ## note: test markdown split
95
+ test_markdown = True
96
+ if test_markdown:
97
+ with open(markdown_doc_file_path, "r") as f:
98
+ md_content = f.read()
99
+ res = omni_spliter.markdown_chunk_func(md_content)
100
+ for item in res:
101
+ print(item)
102
+ print("------------")
103
+ print("=" * 10)
104
+
105
+ res = omni_spliter.markdown_chunk_func(md_content, clear_model=True)
106
+ for item in res:
107
+ print(item)
108
+ print("------------")
109
+ print("=" * 10)
110
+
111
+
112
+ ## note: test word split
113
+ test_document = True
114
+ if test_document:
115
+
116
+ new_doc_io = word_preprocessing_and_return_bytesIO(word_doc_file_path)
117
+ res = omni_spliter.document_chunk_func(new_doc_io, txt_chunk_size=1000, clear_model=False)
118
+ for item in res:
119
+ print(item)
120
+ print("------------")
121
+ print("=" * 10)
122
+
123
+ res = omni_spliter.document_chunk_func(new_doc_io, txt_chunk_size=1000, clear_model=False, save_local_images_dir="./images")
124
+ for item in res:
125
+ print(item)
126
+ print("------------")
127
+ print("=" * 10)
128
+
129
+ res = omni_spliter.document_chunk_func(new_doc_io, txt_chunk_size=1000, clear_model=True)
130
+ for item in res:
131
+ print(item)
132
+ print("------------")
133
+ print("=" * 10)
134
+
135
+ ```
136
+ # Reminder of dependency:
137
+ To automatically convert binary metafiles(e.g. x-wmf.) in Word to PNG, you need to install ImageMagick on Linux.
138
+ Try to install:
139
+ https://docs.wand-py.org/en/latest/guide/install.html
@@ -0,0 +1,109 @@
1
+
2
+
3
+
4
+ # omni_split: Split commonly used document (md, doc etc.) forms for RAG that support LLM.
5
+ ---
6
+ <img src="./docs/icon.png" alt="omni_split" >
7
+
8
+ ---
9
+ ### note: All other text formats are highly recommended to be converted to Markdown, and we focus on optimizing documents for Markdown.
10
+ ---
11
+ # usage
12
+ ### install
13
+ ```bash
14
+ pip install omni_split
15
+ ```
16
+ ### use case
17
+ ```python
18
+ import json
19
+ from omni_split import OmniSplit
20
+ from omni_split import word_preprocessing_and_return_bytesIO
21
+ from omni_split import download_files_to_test_doc
22
+
23
+ ### == step 2: download test_doc file ==
24
+
25
+ doc_dict = download_files_to_test_doc()
26
+ text_doc_file_path = doc_dict["text_test.txt"]
27
+ json_list_doc_file_path = doc_dict["json_list_test.json"]
28
+ markdown_doc_file_path = doc_dict["markdown_test.md"]
29
+ word_doc_file_path = doc_dict["docx_test.docx"]
30
+
31
+
32
+ ### == step 3: split to chunk ==
33
+
34
+ omni_spliter = OmniSplit()
35
+
36
+ ## note: test text split
37
+ test_text = True
38
+ if test_text:
39
+ with open(text_doc_file_path, "r") as f:
40
+ text_content = "".join(f.readlines())
41
+ res = omni_spliter.text_chunk_func(text_content,txt_chunk_size=1000)
42
+ for item in res:
43
+ print(item)
44
+ print("------------")
45
+ print("=" * 10)
46
+
47
+ ## note: test markdown json split
48
+ test_markdown = True
49
+ if test_markdown:
50
+ with open(json_list_doc_file_path, "r") as f:
51
+ md_content_json = json.load(f)
52
+ res = omni_spliter.markdown_json_chunk_func(md_content_json)
53
+ for item in res:
54
+ print(item)
55
+ print("------------")
56
+ print("=" * 10)
57
+
58
+ res = omni_spliter.markdown_json_chunk_func(md_content_json, clear_model=True)
59
+ for item in res:
60
+ print(item)
61
+ print("------------")
62
+ print("=" * 10)
63
+
64
+ ## note: test markdown split
65
+ test_markdown = True
66
+ if test_markdown:
67
+ with open(markdown_doc_file_path, "r") as f:
68
+ md_content = f.read()
69
+ res = omni_spliter.markdown_chunk_func(md_content)
70
+ for item in res:
71
+ print(item)
72
+ print("------------")
73
+ print("=" * 10)
74
+
75
+ res = omni_spliter.markdown_chunk_func(md_content, clear_model=True)
76
+ for item in res:
77
+ print(item)
78
+ print("------------")
79
+ print("=" * 10)
80
+
81
+
82
+ ## note: test word split
83
+ test_document = True
84
+ if test_document:
85
+
86
+ new_doc_io = word_preprocessing_and_return_bytesIO(word_doc_file_path)
87
+ res = omni_spliter.document_chunk_func(new_doc_io, txt_chunk_size=1000, clear_model=False)
88
+ for item in res:
89
+ print(item)
90
+ print("------------")
91
+ print("=" * 10)
92
+
93
+ res = omni_spliter.document_chunk_func(new_doc_io, txt_chunk_size=1000, clear_model=False, save_local_images_dir="./images")
94
+ for item in res:
95
+ print(item)
96
+ print("------------")
97
+ print("=" * 10)
98
+
99
+ res = omni_spliter.document_chunk_func(new_doc_io, txt_chunk_size=1000, clear_model=True)
100
+ for item in res:
101
+ print(item)
102
+ print("------------")
103
+ print("=" * 10)
104
+
105
+ ```
106
+ # Reminder of dependency:
107
+ To automatically convert binary metafiles(e.g. x-wmf.) in Word to PNG, you need to install ImageMagick on Linux.
108
+ Try to install:
109
+ https://docs.wand-py.org/en/latest/guide/install.html
@@ -0,0 +1,16 @@
1
+ from .omni_split import OmniSplit
2
+ from .utils.base_utils import word_preprocessing_and_return_bytesIO
3
+ from .utils.download_test_doc import download_files_to_test_doc
4
+
5
+ __version__ = "0.0.1"
6
+ __name__ = "omni_split"
7
+ __author__ = "dinobot22"
8
+
9
+ __all__ = [
10
+ "__name__",
11
+ "__version__",
12
+ "__author__",
13
+ "OmniSplit",
14
+ "word_preprocessing_and_return_bytesIO",
15
+ "download_files_to_test_doc"
16
+ ]
File without changes
@@ -0,0 +1,139 @@
1
+ """Base classes for chunking text."""
2
+
3
+ import warnings
4
+ from abc import ABC, abstractmethod
5
+ from multiprocessing import Pool, cpu_count
6
+ from typing import Any, Callable, List, Union
7
+
8
+ from tqdm import tqdm
9
+
10
+ from .chonkie_tokenizer import Tokenizer
11
+ from .chonkie_types import Chunk
12
+
13
+
14
+ class BaseChunker(ABC):
15
+ """Abstract base class for all chunker implementations.
16
+
17
+ All chunker implementations should inherit from this class and implement
18
+ the chunk() method according to their specific chunking strategy.
19
+ """
20
+
21
+ def __init__(self, tokenizer_or_token_counter: Union[str, Any, Callable[[str], int]]):
22
+ """Initialize the chunker with a tokenizer.
23
+
24
+ Args:
25
+ tokenizer_or_token_counter (Union[str, Any]): String, tokenizer object, or token counter object
26
+
27
+ """
28
+ self.tokenizer = Tokenizer(tokenizer_or_token_counter)
29
+
30
+ # Set whether to use multiprocessing or not
31
+ self._use_multiprocessing = True
32
+
33
+ @abstractmethod
34
+ def chunk(self, text: str) -> List[Chunk]:
35
+ """Split text into chunks according to the implementation strategy.
36
+
37
+ Args:
38
+ text: Input text to be chunked
39
+
40
+ Returns:
41
+ List of Chunk objects containing the chunked text and metadata
42
+
43
+ """
44
+ pass
45
+
46
+ def _determine_optimal_workers(self) -> int:
47
+ """Determine optimal number of workers based on system resources."""
48
+ try:
49
+ # Get CPU cores
50
+ cpu_cores = cpu_count()
51
+
52
+ # Never use more than 75% of available cores
53
+ max_workers = max(1, int(cpu_cores * 0.75))
54
+
55
+ # Cap at 8 workers
56
+ return min(max_workers, 8)
57
+
58
+ except Exception as e:
59
+ warnings.warn(f"Error determining optimal workers: {e}. Using single process.")
60
+ return 1
61
+
62
+ def _process_batch_sequential(self, texts: List[str], show_progress_bar: bool = True) -> List[List[Chunk]]:
63
+ """Process a batch of texts sequentially."""
64
+ return [
65
+ self.chunk(t)
66
+ for t in tqdm(
67
+ texts,
68
+ desc="🦛",
69
+ disable=not show_progress_bar,
70
+ unit="doc",
71
+ bar_format="{desc} ch{bar:20}nk {percentage:3.0f}% • {n_fmt}/{total_fmt} docs chunked [{elapsed}<{remaining}, {rate_fmt}] 🌱",
72
+ ascii=" o",
73
+ )
74
+ ]
75
+
76
+ def _process_batch_multiprocessing(self, texts: List[str], show_progress_bar: bool = True) -> List[List[Chunk]]:
77
+ """Process a batch of texts using multiprocessing."""
78
+ num_workers = self._determine_optimal_workers()
79
+ total = len(texts)
80
+ chunksize = max(1, min(total // (num_workers * 16), 10)) # Optimize chunk size
81
+
82
+ with Pool(processes=num_workers) as pool:
83
+ results = []
84
+ with tqdm(
85
+ total=total,
86
+ desc="🦛",
87
+ disable=not show_progress_bar,
88
+ unit="doc",
89
+ bar_format="{desc} ch{bar:20}nk {percentage:3.0f}% • {n_fmt}/{total_fmt} docs chunked [{elapsed}<{remaining}, {rate_fmt}] 🌱",
90
+ ascii=" o",
91
+ ) as pbar:
92
+ for result in pool.imap(self.chunk, texts, chunksize=chunksize):
93
+ results.append(result)
94
+ pbar.update()
95
+ return results
96
+
97
+ def chunk_batch(
98
+ self,
99
+ texts: List[str],
100
+ show_progress_bar: bool = True,
101
+ ) -> List[List[Chunk]]:
102
+ """Split a List of texts into their respective chunks.
103
+
104
+ By default, this method uses multiprocessing to parallelize the chunking process.
105
+
106
+ Args:
107
+ texts: List of input texts to be chunked.
108
+ show_progress_bar: Whether to show a progress bar.
109
+
110
+ Returns:
111
+ List of lists of Chunk objects containing the chunked text and metadata
112
+
113
+ """
114
+ if self._use_multiprocessing:
115
+ return self._process_batch_multiprocessing(texts, show_progress_bar)
116
+ else:
117
+ return self._process_batch_sequential(texts, show_progress_bar)
118
+
119
+ def __call__(self, text: Union[str, List[str]], show_progress_bar: bool = True) -> Union[List[Chunk], List[List[Chunk]]]:
120
+ """Make the chunker callable directly.
121
+
122
+ Args:
123
+ text: Input text or list of texts to be chunked
124
+ show_progress_bar: Whether to show a progress bar (for batch chunking)
125
+
126
+ Returns:
127
+ List of Chunk objects or list of lists of Chunk
128
+
129
+ """
130
+ if isinstance(text, str):
131
+ return self.chunk(text)
132
+ elif isinstance(text, list):
133
+ return self.chunk_batch(text, show_progress_bar)
134
+ else:
135
+ raise ValueError("Input must be a string or a list of strings.")
136
+
137
+ def __repr__(self) -> str:
138
+ """Return string representation of the chunker."""
139
+ return f"{self.__class__.__name__}()"