llm-ie 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_ie/__init__.py +4 -4
- llm_ie/asset/prompt_guide/AttributeExtractor_prompt_guide.txt +52 -0
- llm_ie/engines.py +497 -250
- llm_ie/extractors.py +479 -681
- llm_ie/prompt_editor.py +13 -13
- {llm_ie-1.0.0.dist-info → llm_ie-1.2.0.dist-info}/METADATA +2 -2
- {llm_ie-1.0.0.dist-info → llm_ie-1.2.0.dist-info}/RECORD +8 -7
- {llm_ie-1.0.0.dist-info → llm_ie-1.2.0.dist-info}/WHEEL +0 -0
llm_ie/prompt_editor.py
CHANGED
|
@@ -67,7 +67,7 @@ class PromptEditor:
|
|
|
67
67
|
return prompt
|
|
68
68
|
|
|
69
69
|
|
|
70
|
-
def rewrite(self, draft:str
|
|
70
|
+
def rewrite(self, draft:str) -> str:
|
|
71
71
|
"""
|
|
72
72
|
This method inputs a prompt draft and rewrites it following the extractor's guideline.
|
|
73
73
|
"""
|
|
@@ -79,10 +79,10 @@ class PromptEditor:
|
|
|
79
79
|
prompt_template=rewrite_prompt_template)
|
|
80
80
|
messages = [{"role": "system", "content": self.system_prompt},
|
|
81
81
|
{"role": "user", "content": prompt}]
|
|
82
|
-
res = self.inference_engine.chat(messages, verbose=True
|
|
82
|
+
res = self.inference_engine.chat(messages, verbose=True)
|
|
83
83
|
return res
|
|
84
84
|
|
|
85
|
-
def comment(self, draft:str
|
|
85
|
+
def comment(self, draft:str) -> str:
|
|
86
86
|
"""
|
|
87
87
|
This method inputs a prompt draft and comment following the extractor's guideline.
|
|
88
88
|
"""
|
|
@@ -94,11 +94,11 @@ class PromptEditor:
|
|
|
94
94
|
prompt_template=comment_prompt_template)
|
|
95
95
|
messages = [{"role": "system", "content": self.system_prompt},
|
|
96
96
|
{"role": "user", "content": prompt}]
|
|
97
|
-
res = self.inference_engine.chat(messages, verbose=True
|
|
97
|
+
res = self.inference_engine.chat(messages, verbose=True)
|
|
98
98
|
return res
|
|
99
99
|
|
|
100
100
|
|
|
101
|
-
def _terminal_chat(self
|
|
101
|
+
def _terminal_chat(self):
|
|
102
102
|
"""
|
|
103
103
|
This method runs an interactive chat session in the terminal to help users write prompt templates.
|
|
104
104
|
"""
|
|
@@ -126,11 +126,11 @@ class PromptEditor:
|
|
|
126
126
|
# Chat
|
|
127
127
|
messages.append({"role": "user", "content": user_input})
|
|
128
128
|
print(f"{Fore.BLUE}Assistant: {Style.RESET_ALL}", end="")
|
|
129
|
-
response = self.inference_engine.chat(messages, verbose=True
|
|
129
|
+
response = self.inference_engine.chat(messages, verbose=True)
|
|
130
130
|
messages.append({"role": "assistant", "content": response})
|
|
131
131
|
|
|
132
132
|
|
|
133
|
-
def _IPython_chat(self
|
|
133
|
+
def _IPython_chat(self):
|
|
134
134
|
"""
|
|
135
135
|
This method runs an interactive chat session in Jupyter/IPython using ipywidgets to help users write prompt templates.
|
|
136
136
|
"""
|
|
@@ -186,7 +186,7 @@ class PromptEditor:
|
|
|
186
186
|
|
|
187
187
|
# Get assistant's response and append it to conversation
|
|
188
188
|
print("Assistant: ", end="")
|
|
189
|
-
response = self.inference_engine.chat(messages, verbose=True
|
|
189
|
+
response = self.inference_engine.chat(messages, verbose=True)
|
|
190
190
|
messages.append({"role": "assistant", "content": response})
|
|
191
191
|
|
|
192
192
|
# Display the assistant's response
|
|
@@ -200,16 +200,16 @@ class PromptEditor:
|
|
|
200
200
|
display(input_box)
|
|
201
201
|
display(output_area)
|
|
202
202
|
|
|
203
|
-
def chat(self
|
|
203
|
+
def chat(self):
|
|
204
204
|
"""
|
|
205
205
|
External method that detects the environment and calls the appropriate chat method.
|
|
206
206
|
"""
|
|
207
207
|
if 'ipykernel' in sys.modules:
|
|
208
|
-
self._IPython_chat(
|
|
208
|
+
self._IPython_chat()
|
|
209
209
|
else:
|
|
210
|
-
self._terminal_chat(
|
|
210
|
+
self._terminal_chat()
|
|
211
211
|
|
|
212
|
-
def chat_stream(self, messages: List[Dict[str, str]]
|
|
212
|
+
def chat_stream(self, messages: List[Dict[str, str]]) -> Generator[str, None, None]:
|
|
213
213
|
"""
|
|
214
214
|
This method processes messages and yields response chunks from the inference engine.
|
|
215
215
|
This is for frontend App.
|
|
@@ -239,5 +239,5 @@ class PromptEditor:
|
|
|
239
239
|
{"role": "user", "content": prompt}] + messages
|
|
240
240
|
|
|
241
241
|
|
|
242
|
-
stream_generator = self.inference_engine.chat(messages, stream=True
|
|
242
|
+
stream_generator = self.inference_engine.chat(messages, stream=True)
|
|
243
243
|
yield from stream_generator
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: llm-ie
|
|
3
|
-
Version: 1.
|
|
4
|
-
Summary:
|
|
3
|
+
Version: 1.2.0
|
|
4
|
+
Summary: A comprehensive toolkit that provides building blocks for LLM-based named entity recognition, attribute extraction, and relation extraction pipelines.
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Enshuo (David) Hsu
|
|
7
7
|
Requires-Python: >=3.11,<4.0
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
llm_ie/__init__.py,sha256=
|
|
1
|
+
llm_ie/__init__.py,sha256=rLP01qXkIisX0WLzZOv6y494Braw89g5JLmA6ZyrGGA,1590
|
|
2
2
|
llm_ie/asset/PromptEditor_prompts/chat.txt,sha256=Fq62voV0JQ8xBRcxS1Nmdd7DkHs1fGYb-tmNwctZZK0,118
|
|
3
3
|
llm_ie/asset/PromptEditor_prompts/comment.txt,sha256=C_lxx-dlOlFJ__jkHKosZ8HsNAeV1aowh2B36nIipBY,159
|
|
4
4
|
llm_ie/asset/PromptEditor_prompts/rewrite.txt,sha256=JAwY9vm1jSmKf2qcLBYUvrSmME2EJH36bALmkwZDWYQ,178
|
|
@@ -9,6 +9,7 @@ llm_ie/asset/default_prompts/ReviewFrameExtractor_addition_review_prompt.txt,sha
|
|
|
9
9
|
llm_ie/asset/default_prompts/ReviewFrameExtractor_revision_review_prompt.txt,sha256=lGGjdeFpzZEc56w-EtQDMyYFs7A3DQAM32sT42Nf_08,293
|
|
10
10
|
llm_ie/asset/default_prompts/SentenceReviewFrameExtractor_addition_review_prompt.txt,sha256=Of11LFuXLB249oekFelzlIeoAB0cATReqWgFTvhNz_8,329
|
|
11
11
|
llm_ie/asset/default_prompts/SentenceReviewFrameExtractor_revision_review_prompt.txt,sha256=kNJQK7NdoCx13TXGY8HYGrW_v4SEaErK8j9qIzd70CM,291
|
|
12
|
+
llm_ie/asset/prompt_guide/AttributeExtractor_prompt_guide.txt,sha256=w2amKipinuJtCiyPsgWsjaJRwTpS1qOBDuPPtPCMeQA,2120
|
|
12
13
|
llm_ie/asset/prompt_guide/BasicFrameExtractor_prompt_guide.txt,sha256=-Cli7rwu4wM4vSmkG0nInNkpStUhRqKESQ3oqD38pbE,10395
|
|
13
14
|
llm_ie/asset/prompt_guide/BasicReviewFrameExtractor_prompt_guide.txt,sha256=-Cli7rwu4wM4vSmkG0nInNkpStUhRqKESQ3oqD38pbE,10395
|
|
14
15
|
llm_ie/asset/prompt_guide/BinaryRelationExtractor_prompt_guide.txt,sha256=Z6Yc2_QRqroWcJ13owNJbo78I0wpS4XXDsOjXFR-aPk,2166
|
|
@@ -19,9 +20,9 @@ llm_ie/asset/prompt_guide/SentenceFrameExtractor_prompt_guide.txt,sha256=97_-y_v
|
|
|
19
20
|
llm_ie/asset/prompt_guide/SentenceReviewFrameExtractor_prompt_guide.txt,sha256=97_-y_vHMLG4Kb8fLsGgibLxB-3mest8k3LHfLo5h-I,10465
|
|
20
21
|
llm_ie/chunkers.py,sha256=24h9l-Ldyx3EgfYicFqGhV_b-XofUS3yovC1nBWdDoo,5143
|
|
21
22
|
llm_ie/data_types.py,sha256=72-3bzzYpo7KZpD9bjoroWT2eiM0zmWyDkBr2nHoBV0,18559
|
|
22
|
-
llm_ie/engines.py,sha256=
|
|
23
|
-
llm_ie/extractors.py,sha256=
|
|
24
|
-
llm_ie/prompt_editor.py,sha256=
|
|
25
|
-
llm_ie-1.
|
|
26
|
-
llm_ie-1.
|
|
27
|
-
llm_ie-1.
|
|
23
|
+
llm_ie/engines.py,sha256=uE5sag1YeKBYBFF4gY7rYZK9e1ttatf9T7bV_xSg9Pk,36075
|
|
24
|
+
llm_ie/extractors.py,sha256=aCRqKhjSoKTAWZ3WhX_O6V-S_rIvYhPsk78nZLDpQw8,95149
|
|
25
|
+
llm_ie/prompt_editor.py,sha256=zh7Es5Ta2qSTgHtfF9Y9ZKXs4DMue6XlyRt9O6_Uk6c,10962
|
|
26
|
+
llm_ie-1.2.0.dist-info/METADATA,sha256=X9zsMDwBAq1QzIkX8SSbmwLsEFiiAVeNeA0GTiNkAkQ,728
|
|
27
|
+
llm_ie-1.2.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
28
|
+
llm_ie-1.2.0.dist-info/RECORD,,
|
|
File without changes
|