bulk-chain 0.25.1__tar.gz → 0.25.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/PKG-INFO +32 -16
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/README.md +31 -15
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/llm_base.py +1 -1
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/service_llm.py +5 -14
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/demo.py +1 -2
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/infer.py +0 -1
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain.egg-info/PKG-INFO +32 -16
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/setup.py +1 -1
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/LICENSE +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/__init__.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/api.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/__init__.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/service_args.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/service_batch.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/service_data.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/service_dict.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/service_json.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/service_schema.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/utils.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain/core/utils_logger.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain.egg-info/SOURCES.txt +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain.egg-info/dependency_links.txt +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain.egg-info/requires.txt +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/bulk_chain.egg-info/top_level.txt +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/setup.cfg +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/test/test.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/test/test_api.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/test/test_args_seeking.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/test/test_cmdargs.py +0 -0
- {bulk_chain-0.25.1 → bulk_chain-0.25.2}/test/test_provider_batching.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: bulk_chain
|
|
3
|
-
Version: 0.25.
|
|
3
|
+
Version: 0.25.2
|
|
4
4
|
Summary: A lightweight, no-strings-attached Chain-of-Thought framework for your LLM, ensuring reliable results for bulk input requests.
|
|
5
5
|
Home-page: https://github.com/nicolay-r/bulk-chain
|
|
6
6
|
Author: Nicolay Rusnachenko
|
|
@@ -18,7 +18,7 @@ License-File: LICENSE
|
|
|
18
18
|
Requires-Dist: tqdm
|
|
19
19
|
Requires-Dist: source-iter==0.24.3
|
|
20
20
|
|
|
21
|
-
# bulk-chain 0.25.
|
|
21
|
+
# bulk-chain 0.25.2
|
|
22
22
|

|
|
23
23
|
[](https://colab.research.google.com/github/nicolay-r/bulk-chain/blob/master/bulk_chain_tutorial.ipynb)
|
|
24
24
|
[](https://x.com/nicolayr_/status/1847969224636961033)
|
|
@@ -30,6 +30,8 @@ Requires-Dist: source-iter==0.24.3
|
|
|
30
30
|
|
|
31
31
|
<p align="center">
|
|
32
32
|
<a href="https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm"><b>Third-party providers hosting</b>↗️</a>
|
|
33
|
+
<br>
|
|
34
|
+
<a href="https://github.com/nicolay-r/bulk-chain/blob/master/README.md#demo-mode">👉<b>demo</b>👈</a>
|
|
33
35
|
</p>
|
|
34
36
|
|
|
35
37
|
A no-strings-attached **framework** for your LLM that allows applying Chain-of-Thought-alike [prompt `schema`](#chain-of-thought-schema) towards a massive textual collections using custom **[third-party providers ↗️](https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm)**.
|
|
@@ -86,11 +88,32 @@ Preliminary steps:
|
|
|
86
88
|
1. Define your [schema](#chain-of-thought-schema) ([Example for Sentiment Analysis](/ext/schema/thor_cot_schema.json)))
|
|
87
89
|
2. Wrap or pick **LLM model** from the [<b>Third-party providers hosting</b>↗️](https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm).
|
|
88
90
|
|
|
89
|
-
##
|
|
91
|
+
## Shell
|
|
90
92
|
|
|
91
|
-
|
|
93
|
+
### Demo Mode
|
|
92
94
|
|
|
93
|
-
|
|
95
|
+
**demo mode** to interact with LLM via command line with LLM output streaming support.
|
|
96
|
+
The video below illustrates an example of application for sentiment analysis on author opinion extraction towards mentioned object in text.
|
|
97
|
+
|
|
98
|
+
Quck start with launching demo:
|
|
99
|
+
1. ⬇️ Download [replicate](https://replicate.com/) provider for `bulk-chain`:
|
|
100
|
+
2. 📜 Setup your reasoning `thor_cot_schema.json` according to the [following example ↗️](test/schema/thor_cot_schema.json)
|
|
101
|
+
3. 🚀 Launch `demo.py` as follows:
|
|
102
|
+
```bash
|
|
103
|
+
python3 -m bulk_chain.demo \
|
|
104
|
+
--schema "test/schema/thor_cot_schema.json" \
|
|
105
|
+
--adapter "dynamic:replicate_104.py:Replicate" \
|
|
106
|
+
%%m \
|
|
107
|
+
--model_name "meta/meta-llama-3-70b-instruct" \
|
|
108
|
+
--api_token "<REPLICATE-API-TOKEN>" \
|
|
109
|
+
--stream
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
📺 This video showcase application of the [↗️ Sentiment Analysis Schema](https://github.com/nicolay-r/bulk-chain/blob/master/test/schema/thor_cot_schema.json) towards [LLaMA-3-70B-Instruct](https://replicate.com/meta/meta-llama-3-70b-instruct) hosted by Replicate for reasoning over submitted texts
|
|
113
|
+

|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
### Inference Mode
|
|
94
117
|
|
|
95
118
|
> **NOTE:** You have to install `source-iter` and `tqdm` packages that actual [dependencies](dependencies.txt) of this project
|
|
96
119
|
|
|
@@ -110,17 +133,10 @@ python3 -m bulk_chain.infer \
|
|
|
110
133
|
--api_token "<REPLICATE-API-TOKEN>"
|
|
111
134
|
```
|
|
112
135
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
--schema "test/schema/thor_cot_schema.json" \
|
|
118
|
-
--adapter "dynamic:replicate_104.py:Replicate" \
|
|
119
|
-
%%m \
|
|
120
|
-
--model_name "meta/meta-llama-3-70b-instruct" \
|
|
121
|
-
--api_token "<REPLICATE-API-TOKEN>" \
|
|
122
|
-
--stream
|
|
123
|
-
```
|
|
136
|
+
## API
|
|
137
|
+
|
|
138
|
+
Please take a look at the [**related Wiki page**](https://github.com/nicolay-r/bulk-chain/wiki)
|
|
139
|
+
|
|
124
140
|
|
|
125
141
|
# Embed your LLM
|
|
126
142
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# bulk-chain 0.25.
|
|
1
|
+
# bulk-chain 0.25.2
|
|
2
2
|

|
|
3
3
|
[](https://colab.research.google.com/github/nicolay-r/bulk-chain/blob/master/bulk_chain_tutorial.ipynb)
|
|
4
4
|
[](https://x.com/nicolayr_/status/1847969224636961033)
|
|
@@ -10,6 +10,8 @@
|
|
|
10
10
|
|
|
11
11
|
<p align="center">
|
|
12
12
|
<a href="https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm"><b>Third-party providers hosting</b>↗️</a>
|
|
13
|
+
<br>
|
|
14
|
+
<a href="https://github.com/nicolay-r/bulk-chain/blob/master/README.md#demo-mode">👉<b>demo</b>👈</a>
|
|
13
15
|
</p>
|
|
14
16
|
|
|
15
17
|
A no-strings-attached **framework** for your LLM that allows applying Chain-of-Thought-alike [prompt `schema`](#chain-of-thought-schema) towards a massive textual collections using custom **[third-party providers ↗️](https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm)**.
|
|
@@ -66,11 +68,32 @@ Preliminary steps:
|
|
|
66
68
|
1. Define your [schema](#chain-of-thought-schema) ([Example for Sentiment Analysis](/ext/schema/thor_cot_schema.json)))
|
|
67
69
|
2. Wrap or pick **LLM model** from the [<b>Third-party providers hosting</b>↗️](https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm).
|
|
68
70
|
|
|
69
|
-
##
|
|
71
|
+
## Shell
|
|
70
72
|
|
|
71
|
-
|
|
73
|
+
### Demo Mode
|
|
72
74
|
|
|
73
|
-
|
|
75
|
+
**demo mode** to interact with LLM via command line with LLM output streaming support.
|
|
76
|
+
The video below illustrates an example of application for sentiment analysis on author opinion extraction towards mentioned object in text.
|
|
77
|
+
|
|
78
|
+
Quck start with launching demo:
|
|
79
|
+
1. ⬇️ Download [replicate](https://replicate.com/) provider for `bulk-chain`:
|
|
80
|
+
2. 📜 Setup your reasoning `thor_cot_schema.json` according to the [following example ↗️](test/schema/thor_cot_schema.json)
|
|
81
|
+
3. 🚀 Launch `demo.py` as follows:
|
|
82
|
+
```bash
|
|
83
|
+
python3 -m bulk_chain.demo \
|
|
84
|
+
--schema "test/schema/thor_cot_schema.json" \
|
|
85
|
+
--adapter "dynamic:replicate_104.py:Replicate" \
|
|
86
|
+
%%m \
|
|
87
|
+
--model_name "meta/meta-llama-3-70b-instruct" \
|
|
88
|
+
--api_token "<REPLICATE-API-TOKEN>" \
|
|
89
|
+
--stream
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
📺 This video showcase application of the [↗️ Sentiment Analysis Schema](https://github.com/nicolay-r/bulk-chain/blob/master/test/schema/thor_cot_schema.json) towards [LLaMA-3-70B-Instruct](https://replicate.com/meta/meta-llama-3-70b-instruct) hosted by Replicate for reasoning over submitted texts
|
|
93
|
+

|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
### Inference Mode
|
|
74
97
|
|
|
75
98
|
> **NOTE:** You have to install `source-iter` and `tqdm` packages that actual [dependencies](dependencies.txt) of this project
|
|
76
99
|
|
|
@@ -90,17 +113,10 @@ python3 -m bulk_chain.infer \
|
|
|
90
113
|
--api_token "<REPLICATE-API-TOKEN>"
|
|
91
114
|
```
|
|
92
115
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
--schema "test/schema/thor_cot_schema.json" \
|
|
98
|
-
--adapter "dynamic:replicate_104.py:Replicate" \
|
|
99
|
-
%%m \
|
|
100
|
-
--model_name "meta/meta-llama-3-70b-instruct" \
|
|
101
|
-
--api_token "<REPLICATE-API-TOKEN>" \
|
|
102
|
-
--stream
|
|
103
|
-
```
|
|
116
|
+
## API
|
|
117
|
+
|
|
118
|
+
Please take a look at the [**related Wiki page**](https://github.com/nicolay-r/bulk-chain/wiki)
|
|
119
|
+
|
|
104
120
|
|
|
105
121
|
# Embed your LLM
|
|
106
122
|
|
|
@@ -8,21 +8,12 @@ def pad_str(text, pad):
|
|
|
8
8
|
return text.rjust(len(text) + pad, ' ')
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
def
|
|
12
|
-
lines = []
|
|
13
|
-
for text in content.split('\n'):
|
|
14
|
-
for i in range(0, len(text), width):
|
|
15
|
-
line = handle_line(text[i:i + width])
|
|
16
|
-
lines.append(line)
|
|
17
|
-
return '\n'.join(lines)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def nice_output(text, width, pad=4, remove_new_line=False):
|
|
11
|
+
def nice_output(text, remove_new_line=False):
|
|
21
12
|
short_text = text.replace("\n", "") if remove_new_line else text
|
|
22
|
-
return
|
|
13
|
+
return short_text
|
|
23
14
|
|
|
24
15
|
|
|
25
|
-
def chat_with_lm(lm, preset_dict=None, chain=None, model_name=None,
|
|
16
|
+
def chat_with_lm(lm, preset_dict=None, chain=None, model_name=None, pad=0):
|
|
26
17
|
assert (isinstance(lm, BaseLM))
|
|
27
18
|
assert (isinstance(chain, list))
|
|
28
19
|
assert (isinstance(model_name, str) or model_name is None)
|
|
@@ -81,7 +72,7 @@ def chat_with_lm(lm, preset_dict=None, chain=None, model_name=None, line_width=8
|
|
|
81
72
|
# Returning meta information, passed to LLM.
|
|
82
73
|
streamed_logger.info(pad_str(f"{model_name} (ask [{chain_ind+1}/{len(chain)}]) ->", pad=pad))
|
|
83
74
|
streamed_logger.info("\n")
|
|
84
|
-
streamed_logger.info(nice_output(actual_prompt,
|
|
75
|
+
streamed_logger.info(nice_output(actual_prompt, remove_new_line=True))
|
|
85
76
|
streamed_logger.info("\n\n")
|
|
86
77
|
|
|
87
78
|
# Response.
|
|
@@ -89,7 +80,7 @@ def chat_with_lm(lm, preset_dict=None, chain=None, model_name=None, line_width=8
|
|
|
89
80
|
streamed_logger.info(pad_str(f"{model_name} (resp [{chain_ind+1}/{len(chain)}])->", pad=pad))
|
|
90
81
|
streamed_logger.info("\n")
|
|
91
82
|
if isinstance(response, str):
|
|
92
|
-
streamed_logger.info(nice_output(response,
|
|
83
|
+
streamed_logger.info(nice_output(response, remove_new_line=False))
|
|
93
84
|
buffer = [response]
|
|
94
85
|
else:
|
|
95
86
|
buffer = []
|
|
@@ -81,5 +81,4 @@ if __name__ == '__main__':
|
|
|
81
81
|
preset_dict[key] = value
|
|
82
82
|
|
|
83
83
|
# Launch Demo.
|
|
84
|
-
chat_with_lm(llm, preset_dict=preset_dict, chain=schema.chain, model_name=llm_model_name
|
|
85
|
-
line_width=120)
|
|
84
|
+
chat_with_lm(llm, preset_dict=preset_dict, chain=schema.chain, model_name=llm_model_name)
|
|
@@ -68,7 +68,6 @@ if __name__ == '__main__':
|
|
|
68
68
|
|
|
69
69
|
parser = argparse.ArgumentParser(description="Infer Instruct LLM inference based on CoT schema")
|
|
70
70
|
parser.add_argument('--adapter', dest='adapter', type=str, default=None)
|
|
71
|
-
parser.add_argument('--attempts', dest='attempts', type=int, default=None)
|
|
72
71
|
parser.add_argument('--id-col', dest='id_col', type=str, default="uid")
|
|
73
72
|
parser.add_argument('--src', dest='src', type=str, nargs="?", default=None)
|
|
74
73
|
parser.add_argument('--schema', dest='schema', type=str, default=None,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: bulk_chain
|
|
3
|
-
Version: 0.25.
|
|
3
|
+
Version: 0.25.2
|
|
4
4
|
Summary: A lightweight, no-strings-attached Chain-of-Thought framework for your LLM, ensuring reliable results for bulk input requests.
|
|
5
5
|
Home-page: https://github.com/nicolay-r/bulk-chain
|
|
6
6
|
Author: Nicolay Rusnachenko
|
|
@@ -18,7 +18,7 @@ License-File: LICENSE
|
|
|
18
18
|
Requires-Dist: tqdm
|
|
19
19
|
Requires-Dist: source-iter==0.24.3
|
|
20
20
|
|
|
21
|
-
# bulk-chain 0.25.
|
|
21
|
+
# bulk-chain 0.25.2
|
|
22
22
|

|
|
23
23
|
[](https://colab.research.google.com/github/nicolay-r/bulk-chain/blob/master/bulk_chain_tutorial.ipynb)
|
|
24
24
|
[](https://x.com/nicolayr_/status/1847969224636961033)
|
|
@@ -30,6 +30,8 @@ Requires-Dist: source-iter==0.24.3
|
|
|
30
30
|
|
|
31
31
|
<p align="center">
|
|
32
32
|
<a href="https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm"><b>Third-party providers hosting</b>↗️</a>
|
|
33
|
+
<br>
|
|
34
|
+
<a href="https://github.com/nicolay-r/bulk-chain/blob/master/README.md#demo-mode">👉<b>demo</b>👈</a>
|
|
33
35
|
</p>
|
|
34
36
|
|
|
35
37
|
A no-strings-attached **framework** for your LLM that allows applying Chain-of-Thought-alike [prompt `schema`](#chain-of-thought-schema) towards a massive textual collections using custom **[third-party providers ↗️](https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm)**.
|
|
@@ -86,11 +88,32 @@ Preliminary steps:
|
|
|
86
88
|
1. Define your [schema](#chain-of-thought-schema) ([Example for Sentiment Analysis](/ext/schema/thor_cot_schema.json)))
|
|
87
89
|
2. Wrap or pick **LLM model** from the [<b>Third-party providers hosting</b>↗️](https://github.com/nicolay-r/nlp-thirdgate?tab=readme-ov-file#llm).
|
|
88
90
|
|
|
89
|
-
##
|
|
91
|
+
## Shell
|
|
90
92
|
|
|
91
|
-
|
|
93
|
+
### Demo Mode
|
|
92
94
|
|
|
93
|
-
|
|
95
|
+
**demo mode** to interact with LLM via command line with LLM output streaming support.
|
|
96
|
+
The video below illustrates an example of application for sentiment analysis on author opinion extraction towards mentioned object in text.
|
|
97
|
+
|
|
98
|
+
Quck start with launching demo:
|
|
99
|
+
1. ⬇️ Download [replicate](https://replicate.com/) provider for `bulk-chain`:
|
|
100
|
+
2. 📜 Setup your reasoning `thor_cot_schema.json` according to the [following example ↗️](test/schema/thor_cot_schema.json)
|
|
101
|
+
3. 🚀 Launch `demo.py` as follows:
|
|
102
|
+
```bash
|
|
103
|
+
python3 -m bulk_chain.demo \
|
|
104
|
+
--schema "test/schema/thor_cot_schema.json" \
|
|
105
|
+
--adapter "dynamic:replicate_104.py:Replicate" \
|
|
106
|
+
%%m \
|
|
107
|
+
--model_name "meta/meta-llama-3-70b-instruct" \
|
|
108
|
+
--api_token "<REPLICATE-API-TOKEN>" \
|
|
109
|
+
--stream
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
📺 This video showcase application of the [↗️ Sentiment Analysis Schema](https://github.com/nicolay-r/bulk-chain/blob/master/test/schema/thor_cot_schema.json) towards [LLaMA-3-70B-Instruct](https://replicate.com/meta/meta-llama-3-70b-instruct) hosted by Replicate for reasoning over submitted texts
|
|
113
|
+

|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
### Inference Mode
|
|
94
117
|
|
|
95
118
|
> **NOTE:** You have to install `source-iter` and `tqdm` packages that actual [dependencies](dependencies.txt) of this project
|
|
96
119
|
|
|
@@ -110,17 +133,10 @@ python3 -m bulk_chain.infer \
|
|
|
110
133
|
--api_token "<REPLICATE-API-TOKEN>"
|
|
111
134
|
```
|
|
112
135
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
--schema "test/schema/thor_cot_schema.json" \
|
|
118
|
-
--adapter "dynamic:replicate_104.py:Replicate" \
|
|
119
|
-
%%m \
|
|
120
|
-
--model_name "meta/meta-llama-3-70b-instruct" \
|
|
121
|
-
--api_token "<REPLICATE-API-TOKEN>" \
|
|
122
|
-
--stream
|
|
123
|
-
```
|
|
136
|
+
## API
|
|
137
|
+
|
|
138
|
+
Please take a look at the [**related Wiki page**](https://github.com/nicolay-r/bulk-chain/wiki)
|
|
139
|
+
|
|
124
140
|
|
|
125
141
|
# Embed your LLM
|
|
126
142
|
|
|
@@ -15,7 +15,7 @@ def get_requirements(filenames):
|
|
|
15
15
|
|
|
16
16
|
setup(
|
|
17
17
|
name='bulk_chain',
|
|
18
|
-
version='0.25.
|
|
18
|
+
version='0.25.2',
|
|
19
19
|
python_requires=">=3.6",
|
|
20
20
|
description='A lightweight, no-strings-attached Chain-of-Thought framework for your LLM, '
|
|
21
21
|
'ensuring reliable results for bulk input requests.',
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|