tasks-prompts-chain 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tasks_prompts_chain/tasks_prompts_chain.py +18 -53
- {tasks_prompts_chain-0.0.2.dist-info → tasks_prompts_chain-0.0.4.dist-info}/METADATA +36 -20
- tasks_prompts_chain-0.0.4.dist-info/RECORD +6 -0
- tasks_prompts_chain-0.0.2.dist-info/RECORD +0 -6
- {tasks_prompts_chain-0.0.2.dist-info → tasks_prompts_chain-0.0.4.dist-info}/WHEEL +0 -0
- {tasks_prompts_chain-0.0.2.dist-info → tasks_prompts_chain-0.0.4.dist-info}/licenses/LICENSE +0 -0
@@ -89,6 +89,7 @@ class TasksPromptsChain:
|
|
89
89
|
self.final_result_placeholder = final_result_placeholder or "final_result"
|
90
90
|
self._results = {}
|
91
91
|
self._output_template = None
|
92
|
+
self._final_output_template = None
|
92
93
|
self._current_stream_buffer = ""
|
93
94
|
|
94
95
|
def set_output_template(self, template: str) -> None:
|
@@ -117,9 +118,10 @@ class TasksPromptsChain:
|
|
117
118
|
output = output.replace(f"{{{{{placeholder}}}}}", value or "")
|
118
119
|
# Replace current streaming placeholder
|
119
120
|
output = output.replace(f"{{{{{self.final_result_placeholder}}}}}", self._current_stream_buffer)
|
121
|
+
self._final_output_template=output
|
120
122
|
return output
|
121
123
|
|
122
|
-
async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]
|
124
|
+
async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]) -> AsyncGenerator[str, None]:
|
123
125
|
"""
|
124
126
|
Execute a chain of prompts sequentially, with placeholder replacement.
|
125
127
|
|
@@ -130,8 +132,6 @@ class TasksPromptsChain:
|
|
130
132
|
"output_format": str,
|
131
133
|
"output_placeholder": str
|
132
134
|
}
|
133
|
-
temperature (float): Temperature parameter for response generation (0.0 to 1.0)
|
134
|
-
|
135
135
|
Returns:
|
136
136
|
List[str]: List of responses for each prompt
|
137
137
|
"""
|
@@ -181,14 +181,13 @@ class TasksPromptsChain:
|
|
181
181
|
delta = chunk.choices[0].delta.content
|
182
182
|
response_content += delta
|
183
183
|
self._current_stream_buffer = response_content
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
self._results[prompt_template.output_placeholder] = response_content
|
184
|
+
self._format_current_stream()
|
185
|
+
responses.append(response_content)
|
186
|
+
# Store response with placeholder if specified
|
187
|
+
if prompt_template.output_placeholder:
|
188
|
+
placeholder_values[prompt_template.output_placeholder] = response_content
|
189
|
+
self._results[prompt_template.output_placeholder] = response_content
|
190
|
+
yield response_content
|
192
191
|
|
193
192
|
except Exception as e:
|
194
193
|
raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
|
@@ -209,6 +208,14 @@ class TasksPromptsChain:
|
|
209
208
|
"""
|
210
209
|
return self._results.get(placeholder)
|
211
210
|
|
211
|
+
def get_final_result_within_template(self) -> Optional[str]:
|
212
|
+
"""
|
213
|
+
Get tje final result
|
214
|
+
Returns:
|
215
|
+
Optional[str]: The response for that placeholder if it exists, None otherwise
|
216
|
+
"""
|
217
|
+
return self._final_output_template
|
218
|
+
|
212
219
|
def template_output(self, template: str) -> None:
|
213
220
|
"""
|
214
221
|
Set the output template for streaming responses.
|
@@ -224,45 +231,3 @@ class TasksPromptsChain:
|
|
224
231
|
raise Exception("template_output must be called before execute_chain")
|
225
232
|
self.set_output_template(template)
|
226
233
|
|
227
|
-
def execute_chain_with_system(self, prompts: List[str], system_prompt: str, temperature: float = 0.7) -> List[str]:
|
228
|
-
"""
|
229
|
-
Execute a chain of prompts with a system prompt included.
|
230
|
-
|
231
|
-
Args:
|
232
|
-
prompts (List[str]): List of prompts to process in sequence
|
233
|
-
system_prompt (str): System prompt to set context for all interactions
|
234
|
-
temperature (float): Temperature parameter for response generation (0.0 to 1.0)
|
235
|
-
|
236
|
-
Returns:
|
237
|
-
List[str]: List of responses for each prompt
|
238
|
-
"""
|
239
|
-
responses = []
|
240
|
-
context = ""
|
241
|
-
|
242
|
-
try:
|
243
|
-
for i, prompt in enumerate(prompts):
|
244
|
-
# Combine previous context with current prompt if not the first prompt
|
245
|
-
full_prompt = f"{context}\n{prompt}" if context else prompt
|
246
|
-
|
247
|
-
response = self.client.chat.completions.create(
|
248
|
-
model=self.model,
|
249
|
-
messages=[
|
250
|
-
{"role": "system", "content": system_prompt},
|
251
|
-
{"role": "user", "content": full_prompt}
|
252
|
-
],
|
253
|
-
temperature=temperature,
|
254
|
-
max_tokens=4120,
|
255
|
-
stream=True
|
256
|
-
)
|
257
|
-
|
258
|
-
# Extract the response content
|
259
|
-
response_content = response.choices[0].message.content
|
260
|
-
responses.append(response_content)
|
261
|
-
|
262
|
-
# Update context for next iteration
|
263
|
-
context = response_content
|
264
|
-
|
265
|
-
except Exception as e:
|
266
|
-
raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
|
267
|
-
|
268
|
-
return responses
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tasks_prompts_chain
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.4
|
4
4
|
Summary: A Python library for creating and executing chains of prompts using OpenAI's SDK with streaming support and template formatting.
|
5
5
|
Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
|
6
6
|
Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
|
@@ -33,6 +33,10 @@ Please install typing-extensions and openai python packages
|
|
33
33
|
pip install typing-extensions
|
34
34
|
pip install openai
|
35
35
|
```
|
36
|
+
To Install the library:
|
37
|
+
```
|
38
|
+
pip install tasks_prompts_chain
|
39
|
+
```
|
36
40
|
|
37
41
|
## Installation from source code
|
38
42
|
|
@@ -85,25 +89,6 @@ async def main():
|
|
85
89
|
|
86
90
|
## Advanced Usage
|
87
91
|
|
88
|
-
### Using Templates
|
89
|
-
|
90
|
-
```python
|
91
|
-
# Set output template before execution
|
92
|
-
chain.template_output("""
|
93
|
-
<result>
|
94
|
-
<design>
|
95
|
-
### Design Concept:
|
96
|
-
{{design_concept}}
|
97
|
-
</design>
|
98
|
-
|
99
|
-
<colors>
|
100
|
-
### Color Palette:
|
101
|
-
{{color_palette}}
|
102
|
-
</colors>
|
103
|
-
</result>
|
104
|
-
""")
|
105
|
-
```
|
106
|
-
|
107
92
|
### Using System Prompts
|
108
93
|
|
109
94
|
```python
|
@@ -127,6 +112,34 @@ chain = TasksPromptsChain(
|
|
127
112
|
)
|
128
113
|
```
|
129
114
|
|
115
|
+
### Using Templates
|
116
|
+
|
117
|
+
You must call this set method befor the excution of the prompting query (chain.execute_chain(prompts))
|
118
|
+
|
119
|
+
```python
|
120
|
+
# Set output template before execution
|
121
|
+
chain.template_output("""
|
122
|
+
<result>
|
123
|
+
<design>
|
124
|
+
### Design Concept:
|
125
|
+
{{design_concept}}
|
126
|
+
</design>
|
127
|
+
|
128
|
+
<colors>
|
129
|
+
### Color Palette:
|
130
|
+
{{color_palette}}
|
131
|
+
</colors>
|
132
|
+
</result>
|
133
|
+
""")
|
134
|
+
```
|
135
|
+
then retrieves the final result within the template :
|
136
|
+
|
137
|
+
```python
|
138
|
+
# print out the final result in the well formated template
|
139
|
+
print(chain.get_final_result_within_template())
|
140
|
+
```
|
141
|
+
|
142
|
+
|
130
143
|
## API Reference
|
131
144
|
|
132
145
|
### TasksPromptsChain Class
|
@@ -148,6 +161,9 @@ chain = TasksPromptsChain(
|
|
148
161
|
- `template_output(template: str) -> None`
|
149
162
|
- Sets the output template format
|
150
163
|
|
164
|
+
- `get_final_result_within_template(self) -> Optional[str]`
|
165
|
+
- Retrieves the final query result with the defined template in template_output();
|
166
|
+
|
151
167
|
- `get_result(placeholder: str) -> Optional[str]`
|
152
168
|
- Retrieves a specific result by placeholder
|
153
169
|
|
@@ -0,0 +1,6 @@
|
|
1
|
+
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
+
tasks_prompts_chain/tasks_prompts_chain.py,sha256=NwF9PmIHXbEzEusx7B5NTph3zgrJ-SuN74eCc2dRTPI,9986
|
3
|
+
tasks_prompts_chain-0.0.4.dist-info/METADATA,sha256=pfN6RkU9RnR7fJm0i4_PnBKBGknzjd6URaXm0184HwA,5334
|
4
|
+
tasks_prompts_chain-0.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
+
tasks_prompts_chain-0.0.4.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
6
|
+
tasks_prompts_chain-0.0.4.dist-info/RECORD,,
|
@@ -1,6 +0,0 @@
|
|
1
|
-
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
-
tasks_prompts_chain/tasks_prompts_chain.py,sha256=epD997ELkacml3pJibbgKEo-IhlKwLx8J5uJtuJ6uWw,11416
|
3
|
-
tasks_prompts_chain-0.0.2.dist-info/METADATA,sha256=u88TuESsoYaag4rhQ9JqFFM8hOikSazIWHTgqv-c740,4841
|
4
|
-
tasks_prompts_chain-0.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
-
tasks_prompts_chain-0.0.2.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
6
|
-
tasks_prompts_chain-0.0.2.dist-info/RECORD,,
|
File without changes
|
{tasks_prompts_chain-0.0.2.dist-info → tasks_prompts_chain-0.0.4.dist-info}/licenses/LICENSE
RENAMED
File without changes
|