llm-ie 1.2.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llm_ie/engines.py
CHANGED
|
@@ -185,10 +185,11 @@ class Qwen3LLMConfig(LLMConfig):
|
|
|
185
185
|
|
|
186
186
|
|
|
187
187
|
class OpenAIReasoningLLMConfig(LLMConfig):
|
|
188
|
-
def __init__(self, reasoning_effort:str=
|
|
188
|
+
def __init__(self, reasoning_effort:str=None, **kwargs):
|
|
189
189
|
"""
|
|
190
190
|
The OpenAI "o" series configuration.
|
|
191
|
-
1. The reasoning effort
|
|
191
|
+
1. The reasoning effort as one of {"low", "medium", "high"}.
|
|
192
|
+
For models that do not support setting reasoning effort (e.g., o1-mini, o1-preview), set to None.
|
|
192
193
|
2. The temperature parameter is not supported and will be ignored.
|
|
193
194
|
3. The system prompt is not supported and will be concatenated to the next user prompt.
|
|
194
195
|
|
|
@@ -198,11 +199,12 @@ class OpenAIReasoningLLMConfig(LLMConfig):
|
|
|
198
199
|
the reasoning effort. Must be one of {"low", "medium", "high"}. Default is "low".
|
|
199
200
|
"""
|
|
200
201
|
super().__init__(**kwargs)
|
|
201
|
-
if reasoning_effort not
|
|
202
|
-
|
|
202
|
+
if reasoning_effort is not None:
|
|
203
|
+
if reasoning_effort not in ["low", "medium", "high"]:
|
|
204
|
+
raise ValueError("reasoning_effort must be one of {'low', 'medium', 'high'}.")
|
|
203
205
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
+
self.reasoning_effort = reasoning_effort
|
|
207
|
+
self.params["reasoning_effort"] = self.reasoning_effort
|
|
206
208
|
|
|
207
209
|
if "temperature" in self.params:
|
|
208
210
|
warnings.warn("Reasoning models do not support temperature parameter. Will be ignored.", UserWarning)
|
llm_ie/prompt_editor.py
CHANGED
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
import sys
|
|
2
|
+
import warnings
|
|
2
3
|
from typing import List, Dict, Generator
|
|
3
4
|
import importlib.resources
|
|
4
5
|
from llm_ie.engines import InferenceEngine
|
|
5
6
|
from llm_ie.extractors import FrameExtractor
|
|
6
7
|
import re
|
|
8
|
+
import json
|
|
7
9
|
from colorama import Fore, Style
|
|
8
10
|
|
|
9
11
|
|
|
@@ -40,7 +42,9 @@ class PromptEditor:
|
|
|
40
42
|
file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('system.txt')
|
|
41
43
|
with open(file_path, 'r') as f:
|
|
42
44
|
self.system_prompt = f.read()
|
|
43
|
-
|
|
45
|
+
|
|
46
|
+
# internal memory (history messages) for the `chat` method
|
|
47
|
+
self.messages = []
|
|
44
48
|
|
|
45
49
|
def _apply_prompt_template(self, text_content:Dict[str,str], prompt_template:str) -> str:
|
|
46
50
|
"""
|
|
@@ -70,6 +74,7 @@ class PromptEditor:
|
|
|
70
74
|
def rewrite(self, draft:str) -> str:
|
|
71
75
|
"""
|
|
72
76
|
This method inputs a prompt draft and rewrites it following the extractor's guideline.
|
|
77
|
+
This method is stateless.
|
|
73
78
|
"""
|
|
74
79
|
file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('rewrite.txt')
|
|
75
80
|
with open(file_path, 'r') as f:
|
|
@@ -85,6 +90,7 @@ class PromptEditor:
|
|
|
85
90
|
def comment(self, draft:str) -> str:
|
|
86
91
|
"""
|
|
87
92
|
This method inputs a prompt draft and comment following the extractor's guideline.
|
|
93
|
+
This method is stateless.
|
|
88
94
|
"""
|
|
89
95
|
file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('comment.txt')
|
|
90
96
|
with open(file_path, 'r') as f:
|
|
@@ -97,22 +103,64 @@ class PromptEditor:
|
|
|
97
103
|
res = self.inference_engine.chat(messages, verbose=True)
|
|
98
104
|
return res
|
|
99
105
|
|
|
106
|
+
def clear_messages(self):
|
|
107
|
+
"""
|
|
108
|
+
Clears the current chat history.
|
|
109
|
+
"""
|
|
110
|
+
self.messages = []
|
|
100
111
|
|
|
101
|
-
def
|
|
112
|
+
def export_chat(self, file_path: str):
|
|
102
113
|
"""
|
|
103
|
-
|
|
114
|
+
Exports the current chat history to a JSON file.
|
|
115
|
+
|
|
116
|
+
Parameters
|
|
117
|
+
----------
|
|
118
|
+
file_path : str
|
|
119
|
+
path to the file where the chat history will be saved.
|
|
120
|
+
Should have a .json extension.
|
|
104
121
|
"""
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
chat_prompt_template = f.read()
|
|
122
|
+
if not self.messages:
|
|
123
|
+
raise ValueError("Chat history is empty. Nothing to export.")
|
|
108
124
|
|
|
109
|
-
|
|
110
|
-
|
|
125
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
126
|
+
json.dump(self.messages, f, indent=4)
|
|
111
127
|
|
|
112
|
-
|
|
113
|
-
|
|
128
|
+
def import_chat(self, file_path: str):
|
|
129
|
+
"""
|
|
130
|
+
Imports a chat history from a JSON file, overwriting the current history.
|
|
131
|
+
|
|
132
|
+
Parameters
|
|
133
|
+
----------
|
|
134
|
+
file_path : str
|
|
135
|
+
The path to the .json file containing the chat history.
|
|
136
|
+
"""
|
|
137
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
138
|
+
loaded_messages = json.load(f)
|
|
139
|
+
|
|
140
|
+
# Validate the loaded messages format.
|
|
141
|
+
if not isinstance(loaded_messages, list):
|
|
142
|
+
raise TypeError("Invalid format: The file should contain a JSON list of messages.")
|
|
143
|
+
for message in loaded_messages:
|
|
144
|
+
if not (isinstance(message, dict) and 'role' in message and 'content' in message):
|
|
145
|
+
raise ValueError("Invalid format: Each message must be a dictionary with 'role' and 'content' keys.")
|
|
114
146
|
|
|
147
|
+
self.messages = loaded_messages
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def _terminal_chat(self):
|
|
151
|
+
"""
|
|
152
|
+
This method runs an interactive chat session in the terminal to help users write prompt templates.
|
|
153
|
+
"""
|
|
115
154
|
print(f'Welcome to the interactive chat! Type "{Fore.RED}exit{Style.RESET_ALL}" or {Fore.YELLOW}control + C{Style.RESET_ALL} to end the conversation.')
|
|
155
|
+
if len(self.messages) > 0:
|
|
156
|
+
print(f"\nPrevious conversation:")
|
|
157
|
+
for message in self.messages:
|
|
158
|
+
if message["role"] == "user":
|
|
159
|
+
print(f"{Fore.GREEN}\nUser: {Style.RESET_ALL}")
|
|
160
|
+
print(message["content"])
|
|
161
|
+
elif message["role"] == "assistant":
|
|
162
|
+
print(f"{Fore.BLUE}Assistant: {Style.RESET_ALL}", end="")
|
|
163
|
+
print(message["content"])
|
|
116
164
|
|
|
117
165
|
while True:
|
|
118
166
|
# Get user input
|
|
@@ -124,10 +172,10 @@ class PromptEditor:
|
|
|
124
172
|
break
|
|
125
173
|
|
|
126
174
|
# Chat
|
|
127
|
-
messages.append({"role": "user", "content": user_input})
|
|
175
|
+
self.messages.append({"role": "user", "content": user_input})
|
|
128
176
|
print(f"{Fore.BLUE}Assistant: {Style.RESET_ALL}", end="")
|
|
129
|
-
response = self.inference_engine.chat(messages, verbose=True)
|
|
130
|
-
messages.append({"role": "assistant", "content": response})
|
|
177
|
+
response = self.inference_engine.chat(self.messages, verbose=True)
|
|
178
|
+
self.messages.append({"role": "assistant", "content": response})
|
|
131
179
|
|
|
132
180
|
|
|
133
181
|
def _IPython_chat(self):
|
|
@@ -144,19 +192,6 @@ class PromptEditor:
|
|
|
144
192
|
raise ImportError("IPython not found. Please install IPython (```pip install ipython```).")
|
|
145
193
|
from IPython.display import display, HTML
|
|
146
194
|
|
|
147
|
-
# Load the chat prompt template from the resources
|
|
148
|
-
file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('chat.txt')
|
|
149
|
-
with open(file_path, 'r') as f:
|
|
150
|
-
chat_prompt_template = f.read()
|
|
151
|
-
|
|
152
|
-
# Prepare the initial system message with the prompt guideline
|
|
153
|
-
prompt = self._apply_prompt_template(text_content={"prompt_guideline": self.prompt_guide},
|
|
154
|
-
prompt_template=chat_prompt_template)
|
|
155
|
-
|
|
156
|
-
# Initialize conversation messages
|
|
157
|
-
messages = [{"role": "system", "content": self.system_prompt},
|
|
158
|
-
{"role": "user", "content": prompt}]
|
|
159
|
-
|
|
160
195
|
# Widgets for user input and chat output
|
|
161
196
|
input_box = widgets.Text(placeholder="Type your message here...")
|
|
162
197
|
output_area = widgets.Output()
|
|
@@ -164,6 +199,13 @@ class PromptEditor:
|
|
|
164
199
|
# Display initial instructions
|
|
165
200
|
with output_area:
|
|
166
201
|
display(HTML('Welcome to the interactive chat! Type "<span style="color: red;">exit</span>" to end the conversation.'))
|
|
202
|
+
if len(self.messages) > 0:
|
|
203
|
+
display(HTML(f'<p style="color: red;">Previous messages:</p>'))
|
|
204
|
+
for message in self.messages:
|
|
205
|
+
if message["role"] == "user":
|
|
206
|
+
display(HTML(f'<p style="color: green;">User: {message["content"]}</p>'))
|
|
207
|
+
elif message["role"] == "assistant":
|
|
208
|
+
display(HTML(f'<p style="color: blue;">Assistant: {message["content"]}</p>'))
|
|
167
209
|
|
|
168
210
|
def handle_input(sender):
|
|
169
211
|
user_input = input_box.value
|
|
@@ -177,7 +219,7 @@ class PromptEditor:
|
|
|
177
219
|
return
|
|
178
220
|
|
|
179
221
|
# Append user message to conversation
|
|
180
|
-
messages.append({"role": "user", "content": user_input})
|
|
222
|
+
self.messages.append({"role": "user", "content": user_input})
|
|
181
223
|
print(f"User: {user_input}")
|
|
182
224
|
|
|
183
225
|
# Display the user message
|
|
@@ -186,8 +228,8 @@ class PromptEditor:
|
|
|
186
228
|
|
|
187
229
|
# Get assistant's response and append it to conversation
|
|
188
230
|
print("Assistant: ", end="")
|
|
189
|
-
response = self.inference_engine.chat(messages, verbose=True)
|
|
190
|
-
messages.append({"role": "assistant", "content": response})
|
|
231
|
+
response = self.inference_engine.chat(self.messages, verbose=True)
|
|
232
|
+
self.messages.append({"role": "assistant", "content": response})
|
|
191
233
|
|
|
192
234
|
# Display the assistant's response
|
|
193
235
|
with output_area:
|
|
@@ -203,7 +245,20 @@ class PromptEditor:
|
|
|
203
245
|
def chat(self):
|
|
204
246
|
"""
|
|
205
247
|
External method that detects the environment and calls the appropriate chat method.
|
|
248
|
+
This method use and updates the `messages` list (internal memory).
|
|
249
|
+
This method is stateful.
|
|
206
250
|
"""
|
|
251
|
+
# Check if the conversation is empty, if so, load the initial chat prompt template.
|
|
252
|
+
if len(self.messages) == 0:
|
|
253
|
+
file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('chat.txt')
|
|
254
|
+
with open(file_path, 'r') as f:
|
|
255
|
+
chat_prompt_template = f.read()
|
|
256
|
+
|
|
257
|
+
guideline = self._apply_prompt_template(text_content={"prompt_guideline": self.prompt_guide},
|
|
258
|
+
prompt_template=chat_prompt_template)
|
|
259
|
+
|
|
260
|
+
self.messages = [{"role": "system", "content": self.system_prompt + guideline}]
|
|
261
|
+
|
|
207
262
|
if 'ipykernel' in sys.modules:
|
|
208
263
|
self._IPython_chat()
|
|
209
264
|
else:
|
|
@@ -213,6 +268,7 @@ class PromptEditor:
|
|
|
213
268
|
"""
|
|
214
269
|
This method processes messages and yields response chunks from the inference engine.
|
|
215
270
|
This is for frontend App.
|
|
271
|
+
This method is stateless.
|
|
216
272
|
|
|
217
273
|
Parameters:
|
|
218
274
|
----------
|
|
@@ -232,12 +288,10 @@ class PromptEditor:
|
|
|
232
288
|
with open(file_path, 'r') as f:
|
|
233
289
|
chat_prompt_template = f.read()
|
|
234
290
|
|
|
235
|
-
|
|
236
|
-
|
|
291
|
+
guideline = self._apply_prompt_template(text_content={"prompt_guideline": self.prompt_guide},
|
|
292
|
+
prompt_template=chat_prompt_template)
|
|
237
293
|
|
|
238
|
-
messages = [{"role": "system", "content": self.system_prompt}
|
|
239
|
-
{"role": "user", "content": prompt}] + messages
|
|
240
|
-
|
|
294
|
+
messages = [{"role": "system", "content": self.system_prompt + guideline}] + messages
|
|
241
295
|
|
|
242
296
|
stream_generator = self.inference_engine.chat(messages, stream=True)
|
|
243
297
|
yield from stream_generator
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: llm-ie
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.1
|
|
4
4
|
Summary: A comprehensive toolkit that provides building blocks for LLM-based named entity recognition, attribute extraction, and relation extraction pipelines.
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Enshuo (David) Hsu
|
|
@@ -20,9 +20,9 @@ llm_ie/asset/prompt_guide/SentenceFrameExtractor_prompt_guide.txt,sha256=97_-y_v
|
|
|
20
20
|
llm_ie/asset/prompt_guide/SentenceReviewFrameExtractor_prompt_guide.txt,sha256=97_-y_vHMLG4Kb8fLsGgibLxB-3mest8k3LHfLo5h-I,10465
|
|
21
21
|
llm_ie/chunkers.py,sha256=24h9l-Ldyx3EgfYicFqGhV_b-XofUS3yovC1nBWdDoo,5143
|
|
22
22
|
llm_ie/data_types.py,sha256=72-3bzzYpo7KZpD9bjoroWT2eiM0zmWyDkBr2nHoBV0,18559
|
|
23
|
-
llm_ie/engines.py,sha256=
|
|
23
|
+
llm_ie/engines.py,sha256=x8ZAzYkSfnFGyZP9fBjKp3eHSkuGneq94PDP4Zinvq0,36250
|
|
24
24
|
llm_ie/extractors.py,sha256=aCRqKhjSoKTAWZ3WhX_O6V-S_rIvYhPsk78nZLDpQw8,95149
|
|
25
|
-
llm_ie/prompt_editor.py,sha256=
|
|
26
|
-
llm_ie-1.2.
|
|
27
|
-
llm_ie-1.2.
|
|
28
|
-
llm_ie-1.2.
|
|
25
|
+
llm_ie/prompt_editor.py,sha256=x900oDyAozdKlXrF8SDz7wAQ-ilE7orcKLdk_aM25fM,13150
|
|
26
|
+
llm_ie-1.2.1.dist-info/METADATA,sha256=F-dAtCt3xjBX3yh1qm9E07fWWG9q0VAp0cZx9pBfbZU,728
|
|
27
|
+
llm_ie-1.2.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
28
|
+
llm_ie-1.2.1.dist-info/RECORD,,
|
|
File without changes
|