tasks-prompts-chain 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tasks_prompts_chain/tasks_prompts_chain.py +7 -53
- {tasks_prompts_chain-0.0.3.dist-info → tasks_prompts_chain-0.0.4.dist-info}/METADATA +32 -20
- tasks_prompts_chain-0.0.4.dist-info/RECORD +6 -0
- tasks_prompts_chain-0.0.3.dist-info/RECORD +0 -6
- {tasks_prompts_chain-0.0.3.dist-info → tasks_prompts_chain-0.0.4.dist-info}/WHEEL +0 -0
- {tasks_prompts_chain-0.0.3.dist-info → tasks_prompts_chain-0.0.4.dist-info}/licenses/LICENSE +0 -0
@@ -121,7 +121,7 @@ class TasksPromptsChain:
|
|
121
121
|
self._final_output_template=output
|
122
122
|
return output
|
123
123
|
|
124
|
-
async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]
|
124
|
+
async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]) -> AsyncGenerator[str, None]:
|
125
125
|
"""
|
126
126
|
Execute a chain of prompts sequentially, with placeholder replacement.
|
127
127
|
|
@@ -132,8 +132,6 @@ class TasksPromptsChain:
|
|
132
132
|
"output_format": str,
|
133
133
|
"output_placeholder": str
|
134
134
|
}
|
135
|
-
temperature (float): Temperature parameter for response generation (0.0 to 1.0)
|
136
|
-
|
137
135
|
Returns:
|
138
136
|
List[str]: List of responses for each prompt
|
139
137
|
"""
|
@@ -184,14 +182,12 @@ class TasksPromptsChain:
|
|
184
182
|
response_content += delta
|
185
183
|
self._current_stream_buffer = response_content
|
186
184
|
self._format_current_stream()
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
placeholder_values[prompt_template.output_placeholder] = response_content
|
194
|
-
self._results[prompt_template.output_placeholder] = response_content
|
185
|
+
responses.append(response_content)
|
186
|
+
# Store response with placeholder if specified
|
187
|
+
if prompt_template.output_placeholder:
|
188
|
+
placeholder_values[prompt_template.output_placeholder] = response_content
|
189
|
+
self._results[prompt_template.output_placeholder] = response_content
|
190
|
+
yield response_content
|
195
191
|
|
196
192
|
except Exception as e:
|
197
193
|
raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
|
@@ -235,45 +231,3 @@ class TasksPromptsChain:
|
|
235
231
|
raise Exception("template_output must be called before execute_chain")
|
236
232
|
self.set_output_template(template)
|
237
233
|
|
238
|
-
def execute_chain_with_system(self, prompts: List[str], system_prompt: str, temperature: float = 0.7) -> List[str]:
|
239
|
-
"""
|
240
|
-
Execute a chain of prompts with a system prompt included.
|
241
|
-
|
242
|
-
Args:
|
243
|
-
prompts (List[str]): List of prompts to process in sequence
|
244
|
-
system_prompt (str): System prompt to set context for all interactions
|
245
|
-
temperature (float): Temperature parameter for response generation (0.0 to 1.0)
|
246
|
-
|
247
|
-
Returns:
|
248
|
-
List[str]: List of responses for each prompt
|
249
|
-
"""
|
250
|
-
responses = []
|
251
|
-
context = ""
|
252
|
-
|
253
|
-
try:
|
254
|
-
for i, prompt in enumerate(prompts):
|
255
|
-
# Combine previous context with current prompt if not the first prompt
|
256
|
-
full_prompt = f"{context}\n{prompt}" if context else prompt
|
257
|
-
|
258
|
-
response = self.client.chat.completions.create(
|
259
|
-
model=self.model,
|
260
|
-
messages=[
|
261
|
-
{"role": "system", "content": system_prompt},
|
262
|
-
{"role": "user", "content": full_prompt}
|
263
|
-
],
|
264
|
-
temperature=temperature,
|
265
|
-
max_tokens=4120,
|
266
|
-
stream=True
|
267
|
-
)
|
268
|
-
|
269
|
-
# Extract the response content
|
270
|
-
response_content = response.choices[0].message.content
|
271
|
-
responses.append(response_content)
|
272
|
-
|
273
|
-
# Update context for next iteration
|
274
|
-
context = response_content
|
275
|
-
|
276
|
-
except Exception as e:
|
277
|
-
raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
|
278
|
-
|
279
|
-
return responses
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tasks_prompts_chain
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.4
|
4
4
|
Summary: A Python library for creating and executing chains of prompts using OpenAI's SDK with streaming support and template formatting.
|
5
5
|
Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
|
6
6
|
Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
|
@@ -89,25 +89,6 @@ async def main():
|
|
89
89
|
|
90
90
|
## Advanced Usage
|
91
91
|
|
92
|
-
### Using Templates
|
93
|
-
|
94
|
-
```python
|
95
|
-
# Set output template before execution
|
96
|
-
chain.template_output("""
|
97
|
-
<result>
|
98
|
-
<design>
|
99
|
-
### Design Concept:
|
100
|
-
{{design_concept}}
|
101
|
-
</design>
|
102
|
-
|
103
|
-
<colors>
|
104
|
-
### Color Palette:
|
105
|
-
{{color_palette}}
|
106
|
-
</colors>
|
107
|
-
</result>
|
108
|
-
""")
|
109
|
-
```
|
110
|
-
|
111
92
|
### Using System Prompts
|
112
93
|
|
113
94
|
```python
|
@@ -131,6 +112,34 @@ chain = TasksPromptsChain(
|
|
131
112
|
)
|
132
113
|
```
|
133
114
|
|
115
|
+
### Using Templates
|
116
|
+
|
117
|
+
You must call this set method befor the excution of the prompting query (chain.execute_chain(prompts))
|
118
|
+
|
119
|
+
```python
|
120
|
+
# Set output template before execution
|
121
|
+
chain.template_output("""
|
122
|
+
<result>
|
123
|
+
<design>
|
124
|
+
### Design Concept:
|
125
|
+
{{design_concept}}
|
126
|
+
</design>
|
127
|
+
|
128
|
+
<colors>
|
129
|
+
### Color Palette:
|
130
|
+
{{color_palette}}
|
131
|
+
</colors>
|
132
|
+
</result>
|
133
|
+
""")
|
134
|
+
```
|
135
|
+
then retrieves the final result within the template :
|
136
|
+
|
137
|
+
```python
|
138
|
+
# print out the final result in the well formated template
|
139
|
+
print(chain.get_final_result_within_template())
|
140
|
+
```
|
141
|
+
|
142
|
+
|
134
143
|
## API Reference
|
135
144
|
|
136
145
|
### TasksPromptsChain Class
|
@@ -152,6 +161,9 @@ chain = TasksPromptsChain(
|
|
152
161
|
- `template_output(template: str) -> None`
|
153
162
|
- Sets the output template format
|
154
163
|
|
164
|
+
- `get_final_result_within_template(self) -> Optional[str]`
|
165
|
+
- Retrieves the final query result with the defined template in template_output();
|
166
|
+
|
155
167
|
- `get_result(placeholder: str) -> Optional[str]`
|
156
168
|
- Retrieves a specific result by placeholder
|
157
169
|
|
@@ -0,0 +1,6 @@
|
|
1
|
+
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
+
tasks_prompts_chain/tasks_prompts_chain.py,sha256=NwF9PmIHXbEzEusx7B5NTph3zgrJ-SuN74eCc2dRTPI,9986
|
3
|
+
tasks_prompts_chain-0.0.4.dist-info/METADATA,sha256=pfN6RkU9RnR7fJm0i4_PnBKBGknzjd6URaXm0184HwA,5334
|
4
|
+
tasks_prompts_chain-0.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
+
tasks_prompts_chain-0.0.4.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
6
|
+
tasks_prompts_chain-0.0.4.dist-info/RECORD,,
|
@@ -1,6 +0,0 @@
|
|
1
|
-
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
-
tasks_prompts_chain/tasks_prompts_chain.py,sha256=P6vJpuuwteXItmudhOnsiArojINeXB5lJnHgxTsXjCA,11816
|
3
|
-
tasks_prompts_chain-0.0.3.dist-info/METADATA,sha256=MhEBZjJQ_7m8jH9RxxtEGSNn2ppEIR0DBoGIUmo25bA,4906
|
4
|
-
tasks_prompts_chain-0.0.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
-
tasks_prompts_chain-0.0.3.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
6
|
-
tasks_prompts_chain-0.0.3.dist-info/RECORD,,
|
File without changes
|
{tasks_prompts_chain-0.0.3.dist-info → tasks_prompts_chain-0.0.4.dist-info}/licenses/LICENSE
RENAMED
File without changes
|