llm-ie 1.4.0__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llm_ie/extractors.py
CHANGED
|
@@ -205,7 +205,6 @@ class StructExtractor(Extractor):
|
|
|
205
205
|
gen_text = self.inference_engine.chat(
|
|
206
206
|
messages=messages,
|
|
207
207
|
verbose=verbose,
|
|
208
|
-
stream=False,
|
|
209
208
|
messages_logger=messages_logger
|
|
210
209
|
)
|
|
211
210
|
|
|
@@ -289,9 +288,8 @@ class StructExtractor(Extractor):
|
|
|
289
288
|
|
|
290
289
|
current_gen_text = ""
|
|
291
290
|
|
|
292
|
-
response_stream = self.inference_engine.
|
|
293
|
-
messages=messages
|
|
294
|
-
stream=True
|
|
291
|
+
response_stream = self.inference_engine.chat_stream(
|
|
292
|
+
messages=messages
|
|
295
293
|
)
|
|
296
294
|
for chunk in response_stream:
|
|
297
295
|
yield chunk
|
|
@@ -865,7 +863,6 @@ class DirectFrameExtractor(FrameExtractor):
|
|
|
865
863
|
gen_text = self.inference_engine.chat(
|
|
866
864
|
messages=messages,
|
|
867
865
|
verbose=verbose,
|
|
868
|
-
stream=False,
|
|
869
866
|
messages_logger=messages_logger
|
|
870
867
|
)
|
|
871
868
|
|
|
@@ -949,9 +946,8 @@ class DirectFrameExtractor(FrameExtractor):
|
|
|
949
946
|
|
|
950
947
|
current_gen_text = ""
|
|
951
948
|
|
|
952
|
-
response_stream = self.inference_engine.
|
|
953
|
-
messages=messages
|
|
954
|
-
stream=True
|
|
949
|
+
response_stream = self.inference_engine.chat_stream(
|
|
950
|
+
messages=messages
|
|
955
951
|
)
|
|
956
952
|
for chunk in response_stream:
|
|
957
953
|
yield chunk
|
|
@@ -1348,7 +1344,6 @@ class ReviewFrameExtractor(DirectFrameExtractor):
|
|
|
1348
1344
|
initial = self.inference_engine.chat(
|
|
1349
1345
|
messages=messages,
|
|
1350
1346
|
verbose=verbose,
|
|
1351
|
-
stream=False,
|
|
1352
1347
|
messages_logger=messages_logger
|
|
1353
1348
|
)
|
|
1354
1349
|
|
|
@@ -1362,7 +1357,6 @@ class ReviewFrameExtractor(DirectFrameExtractor):
|
|
|
1362
1357
|
review = self.inference_engine.chat(
|
|
1363
1358
|
messages=messages,
|
|
1364
1359
|
verbose=verbose,
|
|
1365
|
-
stream=False,
|
|
1366
1360
|
messages_logger=messages_logger
|
|
1367
1361
|
)
|
|
1368
1362
|
|
|
@@ -1450,9 +1444,8 @@ class ReviewFrameExtractor(DirectFrameExtractor):
|
|
|
1450
1444
|
|
|
1451
1445
|
yield f"{Fore.BLUE}Extraction:{Style.RESET_ALL}\n"
|
|
1452
1446
|
|
|
1453
|
-
response_stream = self.inference_engine.
|
|
1454
|
-
messages=messages
|
|
1455
|
-
stream=True
|
|
1447
|
+
response_stream = self.inference_engine.chat_stream(
|
|
1448
|
+
messages=messages
|
|
1456
1449
|
)
|
|
1457
1450
|
|
|
1458
1451
|
initial = ""
|
|
@@ -1466,9 +1459,8 @@ class ReviewFrameExtractor(DirectFrameExtractor):
|
|
|
1466
1459
|
messages.append({'role': 'assistant', 'content': initial})
|
|
1467
1460
|
messages.append({'role': 'user', 'content': self.review_prompt})
|
|
1468
1461
|
|
|
1469
|
-
response_stream = self.inference_engine.
|
|
1470
|
-
messages=messages
|
|
1471
|
-
stream=True
|
|
1462
|
+
response_stream = self.inference_engine.chat_stream(
|
|
1463
|
+
messages=messages
|
|
1472
1464
|
)
|
|
1473
1465
|
|
|
1474
1466
|
for chunk in response_stream:
|
|
@@ -1910,7 +1902,6 @@ class AttributeExtractor(Extractor):
|
|
|
1910
1902
|
gen_text = self.inference_engine.chat(
|
|
1911
1903
|
messages=messages,
|
|
1912
1904
|
verbose=verbose,
|
|
1913
|
-
stream=False,
|
|
1914
1905
|
messages_logger=messages_logger
|
|
1915
1906
|
)
|
|
1916
1907
|
|
llm_ie/prompt_editor.py
CHANGED
|
@@ -270,5 +270,5 @@ class PromptEditor:
|
|
|
270
270
|
|
|
271
271
|
messages = [{"role": "system", "content": self.system_prompt + guideline}] + messages
|
|
272
272
|
|
|
273
|
-
stream_generator = self.inference_engine.
|
|
273
|
+
stream_generator = self.inference_engine.chat_stream(messages)
|
|
274
274
|
yield from stream_generator
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: llm-ie
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.1
|
|
4
4
|
Summary: A comprehensive toolkit that provides building blocks for LLM-based named entity recognition, attribute extraction, and relation extraction pipelines.
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Enshuo (David) Hsu
|
|
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.12
|
|
12
12
|
Requires-Dist: colorama (>=0.4.6,<0.5.0)
|
|
13
13
|
Requires-Dist: json_repair (>=0.30)
|
|
14
|
-
Requires-Dist: llm-inference-engine (>=0.1.
|
|
14
|
+
Requires-Dist: llm-inference-engine (>=0.1.5)
|
|
15
15
|
Requires-Dist: nest_asyncio (>=1.6.0,<2.0.0)
|
|
16
16
|
Requires-Dist: nltk (>=3.8,<4.0)
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
@@ -23,9 +23,9 @@ llm_ie/asset/prompt_guide/StructExtractor_prompt_guide.txt,sha256=x8L4n_LVl6ofQu
|
|
|
23
23
|
llm_ie/chunkers.py,sha256=b4APRwaLMU40QXVEhOK8m1DZi_jr-VCHAFwbMjqVBgA,11308
|
|
24
24
|
llm_ie/data_types.py,sha256=iG_jdqhpBi33xnsfFQYayCXNBK-2N-8u1xIhoKfJzRI,18294
|
|
25
25
|
llm_ie/engines.py,sha256=Lxzj0gfbUjaU8TpWWM7MqS71Vmpqdq_mIHoLiXqOmXs,1089
|
|
26
|
-
llm_ie/extractors.py,sha256=
|
|
27
|
-
llm_ie/prompt_editor.py,sha256=
|
|
26
|
+
llm_ie/extractors.py,sha256=hA7VoWZU2z6aWXfg5rTwFAmK5L0weILbCGWUaUNJU9w,114859
|
|
27
|
+
llm_ie/prompt_editor.py,sha256=ZAr6A9HRbqKWumVa5kRgcnH2rXdHSmPhYP1Hdp3Ic2o,12049
|
|
28
28
|
llm_ie/utils.py,sha256=k6M4l8GsKOMcmO6UwONQ353Zk-TeoBj6HXGjlAn-JE0,3679
|
|
29
|
-
llm_ie-1.4.
|
|
30
|
-
llm_ie-1.4.
|
|
31
|
-
llm_ie-1.4.
|
|
29
|
+
llm_ie-1.4.1.dist-info/METADATA,sha256=Tfp40uGnIbgQa-1IxtPYJpiViOaeXbOKGYi08yCCUzg,768
|
|
30
|
+
llm_ie-1.4.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
31
|
+
llm_ie-1.4.1.dist-info/RECORD,,
|
|
File without changes
|