llm-ie 0.2.2__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llm_ie/prompt_editor.py CHANGED
@@ -1,8 +1,14 @@
1
- import os
1
+ import sys
2
+ from typing import Dict
2
3
  import importlib.resources
3
4
  from llm_ie.engines import InferenceEngine
4
5
  from llm_ie.extractors import FrameExtractor
6
+ import re
7
+ from colorama import Fore, Style
8
+ import ipywidgets as widgets
9
+ from IPython.display import display, HTML
5
10
 
11
+
6
12
  class PromptEditor:
7
13
  def __init__(self, inference_engine:InferenceEngine, extractor:FrameExtractor):
8
14
  """
@@ -18,16 +24,48 @@ class PromptEditor:
18
24
  self.inference_engine = inference_engine
19
25
  self.prompt_guide = extractor.get_prompt_guide()
20
26
 
27
+ file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('system.txt')
28
+ with open(file_path, 'r') as f:
29
+ self.system_prompt = f.read()
30
+
31
+
32
+ def _apply_prompt_template(self, text_content:Dict[str,str], prompt_template:str) -> str:
33
+ """
34
+ This method applies text_content to prompt_template and returns a prompt.
35
+
36
+ Parameters
37
+ ----------
38
+ text_content : Dict[str,str]
39
+ the input text content to put in prompt template.
40
+ all the keys must be included in the prompt template placeholder {{<placeholder name>}}.
41
+
42
+ Returns : str
43
+ a prompt.
44
+ """
45
+ pattern = re.compile(r'{{(.*?)}}')
46
+ placeholders = pattern.findall(prompt_template)
47
+ if len(placeholders) != len(text_content):
48
+ raise ValueError(f"Expect text_content ({len(text_content)}) and prompt template placeholder ({len(placeholders)}) to have equal size.")
49
+ if not all([k in placeholders for k, _ in text_content.items()]):
50
+ raise ValueError(f"All keys in text_content ({text_content.keys()}) must match placeholders in prompt template ({placeholders}).")
51
+
52
+ prompt = pattern.sub(lambda match: re.sub(r'\\', r'\\\\', text_content[match.group(1)]), prompt_template)
53
+
54
+ return prompt
55
+
56
+
21
57
  def rewrite(self, draft:str) -> str:
22
58
  """
23
59
  This method inputs a prompt draft and rewrites it following the extractor's guideline.
24
60
  """
25
61
  file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('rewrite.txt')
26
62
  with open(file_path, 'r') as f:
27
- prompt = f.read()
63
+ rewrite_prompt_template = f.read()
28
64
 
29
- prompt = prompt.replace("{{draft}}", draft).replace("{{prompt_guideline}}", self.prompt_guide)
30
- messages = [{"role": "user", "content": prompt}]
65
+ prompt = self._apply_prompt_template(text_content={"draft": draft, "prompt_guideline": self.prompt_guide},
66
+ prompt_template=rewrite_prompt_template)
67
+ messages = [{"role": "system", "content": self.system_prompt},
68
+ {"role": "user", "content": prompt}]
31
69
  res = self.inference_engine.chat(messages, stream=True)
32
70
  return res
33
71
 
@@ -37,9 +75,113 @@ class PromptEditor:
37
75
  """
38
76
  file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('comment.txt')
39
77
  with open(file_path, 'r') as f:
40
- prompt = f.read()
78
+ comment_prompt_template = f.read()
41
79
 
42
- prompt = prompt.replace("{{draft}}", draft).replace("{{prompt_guideline}}", self.prompt_guide)
43
- messages = [{"role": "user", "content": prompt}]
80
+ prompt = self._apply_prompt_template(text_content={"draft": draft, "prompt_guideline": self.prompt_guide},
81
+ prompt_template=comment_prompt_template)
82
+ messages = [{"role": "system", "content": self.system_prompt},
83
+ {"role": "user", "content": prompt}]
44
84
  res = self.inference_engine.chat(messages, stream=True)
45
- return res
85
+ return res
86
+
87
+
88
+ def _terminal_chat(self):
89
+ """
90
+ This method runs an interactive chat session in the terminal to help users write prompt templates.
91
+ """
92
+ file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('chat.txt')
93
+ with open(file_path, 'r') as f:
94
+ chat_prompt_template = f.read()
95
+
96
+ prompt = self._apply_prompt_template(text_content={"prompt_guideline": self.prompt_guide},
97
+ prompt_template=chat_prompt_template)
98
+
99
+ messages = [{"role": "system", "content": self.system_prompt},
100
+ {"role": "user", "content": prompt}]
101
+
102
+ print(f'Welcome to the interactive chat! Type "{Fore.RED}exit{Style.RESET_ALL}" or {Fore.YELLOW}control + C{Style.RESET_ALL} to end the conversation.')
103
+
104
+ while True:
105
+ # Get user input
106
+ user_input = input(f"{Fore.GREEN}\nUser: {Style.RESET_ALL}")
107
+
108
+ # Exit condition
109
+ if user_input.lower() == 'exit':
110
+ print(f"{Fore.YELLOW}Interactive chat ended. Goodbye!{Style.RESET_ALL}")
111
+ break
112
+
113
+ # Chat
114
+ messages.append({"role": "user", "content": user_input})
115
+ print(f"{Fore.BLUE}Assistant: {Style.RESET_ALL}", end="")
116
+ response = self.inference_engine.chat(messages, stream=True)
117
+ messages.append({"role": "assistant", "content": response})
118
+
119
+
120
+ def _IPython_chat(self):
121
+ """
122
+ This method runs an interactive chat session in Jupyter/IPython using ipywidgets to help users write prompt templates.
123
+ """
124
+ # Load the chat prompt template from the resources
125
+ file_path = importlib.resources.files('llm_ie.asset.PromptEditor_prompts').joinpath('chat.txt')
126
+ with open(file_path, 'r') as f:
127
+ chat_prompt_template = f.read()
128
+
129
+ # Prepare the initial system message with the prompt guideline
130
+ prompt = self._apply_prompt_template(text_content={"prompt_guideline": self.prompt_guide},
131
+ prompt_template=chat_prompt_template)
132
+
133
+ # Initialize conversation messages
134
+ messages = [{"role": "system", "content": self.system_prompt},
135
+ {"role": "user", "content": prompt}]
136
+
137
+ # Widgets for user input and chat output
138
+ input_box = widgets.Text(placeholder="Type your message here...")
139
+ output_area = widgets.Output()
140
+
141
+ # Display initial instructions
142
+ with output_area:
143
+ display(HTML('Welcome to the interactive chat! Type "<span style="color: red;">exit</span>" to end the conversation.'))
144
+
145
+ def handle_input(sender):
146
+ user_input = input_box.value
147
+ input_box.value = '' # Clear the input box after submission
148
+
149
+ # Exit condition
150
+ if user_input.strip().lower() == 'exit':
151
+ with output_area:
152
+ display(HTML('<p style="color: orange;">Interactive chat ended. Goodbye!</p>'))
153
+ input_box.disabled = True # Disable the input box after exiting
154
+ return
155
+
156
+ # Append user message to conversation
157
+ messages.append({"role": "user", "content": user_input})
158
+ print(f"User: {user_input}")
159
+
160
+ # Display the user message
161
+ with output_area:
162
+ display(HTML(f'<pre><span style="color: green;">User: </span>{user_input}</pre>'))
163
+
164
+ # Get assistant's response and append it to conversation
165
+ print("Assistant: ", end="")
166
+ response = self.inference_engine.chat(messages, stream=True)
167
+ messages.append({"role": "assistant", "content": response})
168
+
169
+ # Display the assistant's response
170
+ with output_area:
171
+ display(HTML(f'<pre><span style="color: blue;">Assistant: </span>{response}</pre>'))
172
+
173
+ # Bind the user input to the handle_input function
174
+ input_box.on_submit(handle_input)
175
+
176
+ # Display the input box and output area
177
+ display(input_box)
178
+ display(output_area)
179
+
180
+ def chat(self):
181
+ """
182
+ External method that detects the environment and calls the appropriate chat method.
183
+ """
184
+ if 'ipykernel' in sys.modules:
185
+ self._IPython_chat()
186
+ else:
187
+ self._terminal_chat()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llm-ie
3
- Version: 0.2.2
3
+ Version: 0.3.1
4
4
  Summary: An LLM-powered tool that transforms everyday language into robust information extraction pipelines.
5
5
  License: MIT
6
6
  Author: Enshuo (David) Hsu
@@ -9,6 +9,7 @@ Classifier: License :: OSI Approved :: MIT License
9
9
  Classifier: Programming Language :: Python :: 3
10
10
  Classifier: Programming Language :: Python :: 3.11
11
11
  Classifier: Programming Language :: Python :: 3.12
12
+ Requires-Dist: colorama (>=0.4.6,<0.5.0)
12
13
  Requires-Dist: nltk (>=3.8,<4.0)
13
14
  Description-Content-Type: text/markdown
14
15
 
@@ -20,6 +21,13 @@ Description-Content-Type: text/markdown
20
21
 
21
22
  An LLM-powered tool that transforms everyday language into robust information extraction pipelines.
22
23
 
24
+ | Features | Support |
25
+ |----------|----------|
26
+ | **LLM Agent for prompt writing** | :white_check_mark: Interactive chat, Python functions |
27
+ | **Named Entity Recognition (NER)** | :white_check_mark: Document-level, Sentence-level |
28
+ | **Entity Attributes Extraction** | :white_check_mark: Flexible formats |
29
+ | **Relation Extraction (RE)** | :white_check_mark: Binary & Multiclass relations |
30
+
23
31
  ## Table of Contents
24
32
  - [Overview](#overview)
25
33
  - [Prerequisite](#prerequisite)
@@ -35,12 +43,12 @@ An LLM-powered tool that transforms everyday language into robust information ex
35
43
  - [RelationExtractor](#relationextractor)
36
44
 
37
45
  ## Overview
38
- LLM-IE is a toolkit that provides robust information extraction utilities for frame-based information extraction. Since prompt design has a significant impact on generative information extraction with LLMs, it also provides a built-in LLM editor to help with prompt writing. The flowchart below demonstrates the workflow starting from a casual language request.
46
+ LLM-IE is a toolkit that provides robust information extraction utilities for named entity, entity attributes, and entity relation extraction. Since prompt design has a significant impact on generative information extraction with LLMs, it has a built-in LLM agent ("editor") to help with prompt writing. The flowchart below demonstrates the workflow starting from a casual language request to output visualization.
39
47
 
40
48
  <div align="center"><img src="doc_asset/readme_img/LLM-IE flowchart.png" width=800 ></div>
41
49
 
42
50
  ## Prerequisite
43
- At least one LLM inference engine is required. There are built-in supports for 🦙 [Llama-cpp-python](https://github.com/abetlen/llama-cpp-python), <img src="https://avatars.githubusercontent.com/u/151674099?s=48&v=4" alt="Icon" width="20"/> [Ollama](https://github.com/ollama/ollama), 🤗 [Huggingface_hub](https://github.com/huggingface/huggingface_hub), <img src=doc_asset/readme_img/openai-logomark.png width=16 /> [OpenAI API](https://platform.openai.com/docs/api-reference/introduction), and <img src=doc_asset/readme_img/vllm-logo.png width=20 /> vLLM. For installation guides, please refer to those projects. Other inference engines can be configured through the [InferenceEngine](src/llm_ie/engines.py) abstract class. See [LLM Inference Engine](#llm-inference-engine) section below.
51
+ At least one LLM inference engine is required. There are built-in supports for 🦙 [Llama-cpp-python](https://github.com/abetlen/llama-cpp-python), <img src="https://avatars.githubusercontent.com/u/151674099?s=48&v=4" alt="Icon" width="20"/> [Ollama](https://github.com/ollama/ollama), 🤗 [Huggingface_hub](https://github.com/huggingface/huggingface_hub), <img src=doc_asset/readme_img/openai-logomark.png width=16 /> [OpenAI API](https://platform.openai.com/docs/api-reference/introduction), and <img src=doc_asset/readme_img/vllm-logo.png width=20 /> [vLLM](https://github.com/vllm-project/vllm). For installation guides, please refer to those projects. Other inference engines can be configured through the [InferenceEngine](src/llm_ie/engines.py) abstract class. See [LLM Inference Engine](#llm-inference-engine) section below.
44
52
 
45
53
  ## Installation
46
54
  The Python package is available on PyPI.
@@ -125,21 +133,26 @@ We start with a casual description:
125
133
 
126
134
  *"Extract diagnosis from the clinical note. Make sure to include diagnosis date and status."*
127
135
 
128
- The ```PromptEditor``` rewrites it following the schema required by the ```BasicFrameExtractor```.
129
-
130
- ```python
136
+ Define the AI prompt editor.
137
+ ```python
138
+ from llm_ie.engines import OllamaInferenceEngine
131
139
  from llm_ie.extractors import BasicFrameExtractor
132
140
  from llm_ie.prompt_editor import PromptEditor
133
141
 
134
- # Describe the task in casual language
135
- prompt_draft = "Extract diagnosis from the clinical note. Make sure to include diagnosis date and status."
136
-
137
- # Use LLM editor to generate a formal prompt template with standard extraction schema
142
+ # Define a LLM inference engine
143
+ llm = OllamaInferenceEngine(model_name="llama3.1:8b-instruct-q8_0")
144
+ # Define LLM prompt editor
138
145
  editor = PromptEditor(llm, BasicFrameExtractor)
139
- prompt_template = editor.rewrite(prompt_draft)
146
+ # Start chat
147
+ editor.chat()
140
148
  ```
141
149
 
142
- The editor generates a prompt template as below:
150
+ This opens an interactive session:
151
+ <div align="left"><img src=doc_asset/readme_img/terminal_chat.PNG width=1000 ></div>
152
+
153
+
154
+ The ```PromptEditor``` drafts a prompt template following the schema required by the ```BasicFrameExtractor```:
155
+
143
156
  ```
144
157
  # Task description
145
158
  The paragraph below contains a clinical note with diagnoses listed. Please carefully review it and extract the diagnoses, including the diagnosis date and status.
@@ -165,6 +178,8 @@ If there is no specific date or status, just omit those keys.
165
178
  Below is the clinical note:
166
179
  {{input}}
167
180
  ```
181
+
182
+
168
183
  #### Information extraction pipeline
169
184
  Now we apply the prompt template to build an information extraction pipeline.
170
185
 
@@ -202,15 +217,33 @@ from llm_ie.data_types import LLMInformationExtractionDocument
202
217
  doc = LLMInformationExtractionDocument(doc_id="Synthesized medical note",
203
218
  text=note_text)
204
219
  # Add frames to a document
205
- for frame in frames:
206
- doc.add_frame(frame, valid_mode="span", create_id=True)
220
+ doc.add_frames(frames, create_id=True)
207
221
 
208
222
  # Save document to file (.llmie)
209
223
  doc.save("<your filename>.llmie")
210
224
  ```
211
225
 
226
+ To visualize the extracted frames, we use the ```viz_serve()``` method.
227
+ ```python
228
+ doc.viz_serve()
229
+ ```
230
+ A Flask APP starts at port 5000 (default).
231
+ ```
232
+ * Serving Flask app 'ie_viz.utilities'
233
+ * Debug mode: off
234
+ WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
235
+ * Running on all addresses (0.0.0.0)
236
+ * Running on http://127.0.0.1:5000
237
+ Press CTRL+C to quit
238
+ 127.0.0.1 - - [03/Oct/2024 23:36:22] "GET / HTTP/1.1" 200 -
239
+ ```
240
+
241
+ <div align="left"><img src="doc_asset/readme_img/llm-ie_demo.PNG" width=1000 ></div>
242
+
243
+
212
244
  ## Examples
213
- - [Write prompt templates with AI editors](demo/prompt_template_writing.ipynb)
245
+ - [Interactive chat with LLM prompt editors](demo/prompt_template_writing_via_chat.ipynb)
246
+ - [Write prompt templates with LLM prompt editors](demo/prompt_template_writing.ipynb)
214
247
  - [NER + RE for Drug, Strength, Frequency](demo/medication_relation_extraction.ipynb)
215
248
 
216
249
  ## User Guide
@@ -435,7 +468,30 @@ print(BasicFrameExtractor.get_prompt_guide())
435
468
  ```
436
469
 
437
470
  ### Prompt Editor
438
- The prompt editor is an LLM agent that reviews, comments and rewrites a prompt following the defined schema of each extractor. It is recommended to use prompt editor iteratively:
471
+ The prompt editor is an LLM agent that help users write prompt templates following the defined schema and guideline of each extractor. Chat with the promtp editor:
472
+
473
+ ```python
474
+ from llm_ie.prompt_editor import PromptEditor
475
+ from llm_ie.extractors import BasicFrameExtractor
476
+ from llm_ie.engines import OllamaInferenceEngine
477
+
478
+ # Define an LLM inference engine
479
+ ollama = OllamaInferenceEngine(model_name="llama3.1:8b-instruct-q8_0")
480
+
481
+ # Define editor
482
+ editor = PromptEditor(ollama, BasicFrameExtractor)
483
+
484
+ editor.chat()
485
+ ```
486
+
487
+ In a terminal environment, an interactive chat session will start:
488
+ <div align="left"><img src=doc_asset/readme_img/terminal_chat.PNG width=1000 ></div>
489
+
490
+ In the Jupyter/IPython environment, an ipywidgets session will start:
491
+ <div align="left"><img src=doc_asset/readme_img/IPython_chat.PNG width=1000 ></div>
492
+
493
+
494
+ We can also use the `rewrite()` and `comment()` methods to programmingly interact with the prompt editor:
439
495
  1. start with a casual description of the task
440
496
  2. have the prompt editor generate a prompt template as the starting point
441
497
  3. manually revise the prompt template
@@ -581,40 +637,29 @@ print(BasicFrameExtractor.get_prompt_guide())
581
637
  ```
582
638
 
583
639
  ```
584
- Prompt template design:
585
- 1. Task description
586
- 2. Schema definition
587
- 3. Output format definition
588
- 4. Additional hints
589
- 5. Input placeholder
640
+ Prompt Template Design:
590
641
 
591
- Example:
642
+ 1. Task Description:
643
+ Provide a detailed description of the task, including the background and the type of task (e.g., named entity recognition).
592
644
 
593
- # Task description
594
- The paragraph below is from the Food and Drug Administration (FDA) Clinical Pharmacology Section of Labeling for Human Prescription Drug and Biological Products, Adverse reactions section. Please carefully review it and extract the adverse reactions and percentages. Note that each adverse reaction is nested under a clinical trial and potentially an arm. Your output should take that into consideration.
645
+ 2. Schema Definition:
646
+ List the key concepts that should be extracted, and provide clear definitions for each one.
595
647
 
596
- # Schema definition
597
- Your output should contain:
598
- "ClinicalTrial" which is the name of the trial,
599
- If applicable, "Arm" which is the arm within the clinical trial,
600
- "AdverseReaction" which is the name of the adverse reaction,
601
- If applicable, "Percentage" which is the occurance of the adverse reaction within the trial and arm,
602
- "Evidence" which is the EXACT sentence in the text where you found the AdverseReaction from
648
+ 3. Output Format Definition:
649
+ The output should be a JSON list, where each element is a dictionary representing a frame (an entity along with its attributes). Each dictionary must include a key that holds the entity text. This key can be named "entity_text" or anything else depend on the context. The attributes can either be flat (e.g., {"entity_text": "<entity_text>", "attr1": "<attr1>", "attr2": "<attr2>"}) or nested (e.g., {"entity_text": "<entity_text>", "attributes": {"attr1": "<attr1>", "attr2": "<attr2>"}}).
603
650
 
604
- # Output format definition
605
- Your output should follow JSON format, for example:
606
- [
607
- {"ClinicalTrial": "<Clinical trial name or number>", "Arm": "<name of arm>", "AdverseReaction": "<Adverse reaction text>", "Percentage": "<a percent>", "Evidence": "<exact sentence from the text>"},
608
- {"ClinicalTrial": "<Clinical trial name or number>", "Arm": "<name of arm>", "AdverseReaction": "<Adverse reaction text>", "Percentage": "<a percent>", "Evidence": "<exact sentence from the text>"}
609
- ]
651
+ 4. Optional: Hints:
652
+ Provide itemized hints for the information extractors to guide the extraction process.
653
+
654
+ 5. Optional: Examples:
655
+ Include examples in the format:
656
+ Input: ...
657
+ Output: ...
610
658
 
611
- # Additional hints
612
- Your output should be 100% based on the provided content. DO NOT output fake numbers.
613
- If there is no specific arm, just omit the "Arm" key. If the percentage is not reported, just omit the "Percentage" key. The "Evidence" should always be provided.
659
+ 6. Input Placeholder:
660
+ The template must include a placeholder in the format {{<placeholder_name>}} for the input text. The placeholder name can be customized as needed.
614
661
 
615
- # Input placeholder
616
- Below is the Adverse reactions section:
617
- {{input}}
662
+ ......
618
663
  ```
619
664
  </details>
620
665
 
@@ -0,0 +1,23 @@
1
+ llm_ie/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ llm_ie/asset/PromptEditor_prompts/chat.txt,sha256=Fq62voV0JQ8xBRcxS1Nmdd7DkHs1fGYb-tmNwctZZK0,118
3
+ llm_ie/asset/PromptEditor_prompts/comment.txt,sha256=C_lxx-dlOlFJ__jkHKosZ8HsNAeV1aowh2B36nIipBY,159
4
+ llm_ie/asset/PromptEditor_prompts/rewrite.txt,sha256=JAwY9vm1jSmKf2qcLBYUvrSmME2EJH36bALmkwZDWYQ,178
5
+ llm_ie/asset/PromptEditor_prompts/system.txt,sha256=QwGTIJvp-5u2P8CkGt_rabttlN1puHQwIBNquUm1ZHo,730
6
+ llm_ie/asset/default_prompts/ReviewFrameExtractor_addition_review_prompt.txt,sha256=pKes8BOAoJJgmo_IQh2ISKiMh_rDPl_rDUU_VgDQ4o4,273
7
+ llm_ie/asset/default_prompts/ReviewFrameExtractor_revision_review_prompt.txt,sha256=9Nwkr2U_3ZSk01xDtgiFJVABi6FkC8Izdq7zrzFfLRg,235
8
+ llm_ie/asset/default_prompts/SentenceReviewFrameExtractor_addition_review_prompt.txt,sha256=Of11LFuXLB249oekFelzlIeoAB0cATReqWgFTvhNz_8,329
9
+ llm_ie/asset/default_prompts/SentenceReviewFrameExtractor_revision_review_prompt.txt,sha256=kNJQK7NdoCx13TXGY8HYGrW_v4SEaErK8j9qIzd70CM,291
10
+ llm_ie/asset/prompt_guide/BasicFrameExtractor_prompt_guide.txt,sha256=m7iX4Qjsf1N2V1mbjE-x4F-qPGZA2qGJbUCdpets394,9293
11
+ llm_ie/asset/prompt_guide/BinaryRelationExtractor_prompt_guide.txt,sha256=Z6Yc2_QRqroWcJ13owNJbo78I0wpS4XXDsOjXFR-aPk,2166
12
+ llm_ie/asset/prompt_guide/MultiClassRelationExtractor_prompt_guide.txt,sha256=EQ9Jmh0CQmlfkWqXx6_apuEZUKK3WIrdpAvfbTX2_No,3011
13
+ llm_ie/asset/prompt_guide/ReviewFrameExtractor_prompt_guide.txt,sha256=m7iX4Qjsf1N2V1mbjE-x4F-qPGZA2qGJbUCdpets394,9293
14
+ llm_ie/asset/prompt_guide/SentenceCoTFrameExtractor_prompt_guide.txt,sha256=T4NsO33s3KSJml-klzXAJiYox0kiuxGo-ou2a2Ig2SY,14225
15
+ llm_ie/asset/prompt_guide/SentenceFrameExtractor_prompt_guide.txt,sha256=oKH_QeDgpw771ZdHk3L7DYz2Jvfm7OolUoTiJyMJI30,9541
16
+ llm_ie/asset/prompt_guide/SentenceReviewFrameExtractor_prompt_guide.txt,sha256=oKH_QeDgpw771ZdHk3L7DYz2Jvfm7OolUoTiJyMJI30,9541
17
+ llm_ie/data_types.py,sha256=hPz3WOeAzfn2QKmb0CxHmRdQWZQ4G9zq8U-RJBVFdYk,14329
18
+ llm_ie/engines.py,sha256=PTYs7s_iCPmI-yFUCVCPY_cMGS77ma2VGoz4rdNkODI,9308
19
+ llm_ie/extractors.py,sha256=xgkicRzBPRaQPiKWmQJ5b_aiNv9VEc85jzBA7cQXic8,58331
20
+ llm_ie/prompt_editor.py,sha256=3h_2yIe7OV4auv4Vb9Zdx2q26UhC0xp9c4tt_yDr78I,8144
21
+ llm_ie-0.3.1.dist-info/METADATA,sha256=eJCzg7G_ivz0CcP9KycSeHo986se6tqA8cKLtQyTtw4,41266
22
+ llm_ie-0.3.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
23
+ llm_ie-0.3.1.dist-info/RECORD,,
@@ -1,15 +0,0 @@
1
- llm_ie/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- llm_ie/asset/PromptEditor_prompts/comment.txt,sha256=C_lxx-dlOlFJ__jkHKosZ8HsNAeV1aowh2B36nIipBY,159
3
- llm_ie/asset/PromptEditor_prompts/rewrite.txt,sha256=bYLOix7DUBlcWv-Q0JZ5kDnZ9OEXBt_AGDN0TydLB8o,191
4
- llm_ie/asset/prompt_guide/BasicFrameExtractor_prompt_guide.txt,sha256=XbnU8byLGGUA3A3lT0bb2Hw-ggzhcqD3ZuKzduod2ww,1944
5
- llm_ie/asset/prompt_guide/BinaryRelationExtractor_prompt_guide.txt,sha256=z9Xg0fdFbVVwnTYcUTcAUvEIWhF075W8qGxN-Vj7xdo,1548
6
- llm_ie/asset/prompt_guide/MultiClassRelationExtractor_prompt_guide.txt,sha256=D5DphUHw8SUERUVdcIjUynuTmYJa6-PwBlF7FzxNsvQ,2276
7
- llm_ie/asset/prompt_guide/ReviewFrameExtractor_prompt_guide.txt,sha256=XbnU8byLGGUA3A3lT0bb2Hw-ggzhcqD3ZuKzduod2ww,1944
8
- llm_ie/asset/prompt_guide/SentenceFrameExtractor_prompt_guide.txt,sha256=8nj9OLPJMtr9Soi5JU3Xk-HC7pKNoI54xA_A4u7I5j4,2620
9
- llm_ie/data_types.py,sha256=hPz3WOeAzfn2QKmb0CxHmRdQWZQ4G9zq8U-RJBVFdYk,14329
10
- llm_ie/engines.py,sha256=m9ytGUX61jEy9SmVHbb90mrfGMAwC6dV-v7Jke1U7Ho,9296
11
- llm_ie/extractors.py,sha256=EVuHqW1lW0RpGmnRNmX4ih6ppfvy2gYOAOgc8Pngfkw,44103
12
- llm_ie/prompt_editor.py,sha256=dbu7A3O7O7Iw2v-xCgrTFH1-wTLAGf4SHDqdeS-He2Q,1869
13
- llm_ie-0.2.2.dist-info/METADATA,sha256=V7gi1wZN_FlaW2ZfdsGKtEGi2s-AjIvssrkjjd7ubWU,40052
14
- llm_ie-0.2.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
15
- llm_ie-0.2.2.dist-info/RECORD,,
File without changes