tasks-prompts-chain 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tasks_prompts_chain/tasks_prompts_chain.py +7 -53
- {tasks_prompts_chain-0.0.3.dist-info → tasks_prompts_chain-0.0.5.dist-info}/METADATA +49 -22
- tasks_prompts_chain-0.0.5.dist-info/RECORD +6 -0
- tasks_prompts_chain-0.0.3.dist-info/RECORD +0 -6
- {tasks_prompts_chain-0.0.3.dist-info → tasks_prompts_chain-0.0.5.dist-info}/WHEEL +0 -0
- {tasks_prompts_chain-0.0.3.dist-info → tasks_prompts_chain-0.0.5.dist-info}/licenses/LICENSE +0 -0
@@ -121,7 +121,7 @@ class TasksPromptsChain:
|
|
121
121
|
self._final_output_template=output
|
122
122
|
return output
|
123
123
|
|
124
|
-
async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]
|
124
|
+
async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]) -> AsyncGenerator[str, None]:
|
125
125
|
"""
|
126
126
|
Execute a chain of prompts sequentially, with placeholder replacement.
|
127
127
|
|
@@ -132,8 +132,6 @@ class TasksPromptsChain:
|
|
132
132
|
"output_format": str,
|
133
133
|
"output_placeholder": str
|
134
134
|
}
|
135
|
-
temperature (float): Temperature parameter for response generation (0.0 to 1.0)
|
136
|
-
|
137
135
|
Returns:
|
138
136
|
List[str]: List of responses for each prompt
|
139
137
|
"""
|
@@ -184,14 +182,12 @@ class TasksPromptsChain:
|
|
184
182
|
response_content += delta
|
185
183
|
self._current_stream_buffer = response_content
|
186
184
|
self._format_current_stream()
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
placeholder_values[prompt_template.output_placeholder] = response_content
|
194
|
-
self._results[prompt_template.output_placeholder] = response_content
|
185
|
+
responses.append(response_content)
|
186
|
+
# Store response with placeholder if specified
|
187
|
+
if prompt_template.output_placeholder:
|
188
|
+
placeholder_values[prompt_template.output_placeholder] = response_content
|
189
|
+
self._results[prompt_template.output_placeholder] = response_content
|
190
|
+
yield delta
|
195
191
|
|
196
192
|
except Exception as e:
|
197
193
|
raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
|
@@ -235,45 +231,3 @@ class TasksPromptsChain:
|
|
235
231
|
raise Exception("template_output must be called before execute_chain")
|
236
232
|
self.set_output_template(template)
|
237
233
|
|
238
|
-
def execute_chain_with_system(self, prompts: List[str], system_prompt: str, temperature: float = 0.7) -> List[str]:
|
239
|
-
"""
|
240
|
-
Execute a chain of prompts with a system prompt included.
|
241
|
-
|
242
|
-
Args:
|
243
|
-
prompts (List[str]): List of prompts to process in sequence
|
244
|
-
system_prompt (str): System prompt to set context for all interactions
|
245
|
-
temperature (float): Temperature parameter for response generation (0.0 to 1.0)
|
246
|
-
|
247
|
-
Returns:
|
248
|
-
List[str]: List of responses for each prompt
|
249
|
-
"""
|
250
|
-
responses = []
|
251
|
-
context = ""
|
252
|
-
|
253
|
-
try:
|
254
|
-
for i, prompt in enumerate(prompts):
|
255
|
-
# Combine previous context with current prompt if not the first prompt
|
256
|
-
full_prompt = f"{context}\n{prompt}" if context else prompt
|
257
|
-
|
258
|
-
response = self.client.chat.completions.create(
|
259
|
-
model=self.model,
|
260
|
-
messages=[
|
261
|
-
{"role": "system", "content": system_prompt},
|
262
|
-
{"role": "user", "content": full_prompt}
|
263
|
-
],
|
264
|
-
temperature=temperature,
|
265
|
-
max_tokens=4120,
|
266
|
-
stream=True
|
267
|
-
)
|
268
|
-
|
269
|
-
# Extract the response content
|
270
|
-
response_content = response.choices[0].message.content
|
271
|
-
responses.append(response_content)
|
272
|
-
|
273
|
-
# Update context for next iteration
|
274
|
-
context = response_content
|
275
|
-
|
276
|
-
except Exception as e:
|
277
|
-
raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
|
278
|
-
|
279
|
-
return responses
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tasks_prompts_chain
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.5
|
4
4
|
Summary: A Python library for creating and executing chains of prompts using OpenAI's SDK with streaming support and template formatting.
|
5
5
|
Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
|
6
6
|
Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
|
@@ -59,8 +59,13 @@ from tasks_prompts_chain import TasksPromptsChain
|
|
59
59
|
async def main():
|
60
60
|
# Initialize the chain
|
61
61
|
chain = TasksPromptsChain(
|
62
|
-
|
63
|
-
|
62
|
+
model_options={
|
63
|
+
"model": "gpt-3.5-turbo",
|
64
|
+
"api_key": "your-api-key",
|
65
|
+
"temperature": 0.1,
|
66
|
+
"max_tokens": 4120,
|
67
|
+
"stream": True
|
68
|
+
},
|
64
69
|
final_result_placeholder="design_result"
|
65
70
|
)
|
66
71
|
|
@@ -89,8 +94,43 @@ async def main():
|
|
89
94
|
|
90
95
|
## Advanced Usage
|
91
96
|
|
97
|
+
### Using System Prompts
|
98
|
+
|
99
|
+
```python
|
100
|
+
chain = TasksPromptsChain(
|
101
|
+
model_options={
|
102
|
+
"model": "gpt-3.5-turbo",
|
103
|
+
"api_key": "your-api-key",
|
104
|
+
"temperature": 0.1,
|
105
|
+
"max_tokens": 4120,
|
106
|
+
"stream": True
|
107
|
+
},
|
108
|
+
final_result_placeholder="result",
|
109
|
+
system_prompt="You are a professional design expert specialized in luxury products",
|
110
|
+
system_apply_to_all_prompts=True
|
111
|
+
)
|
112
|
+
```
|
113
|
+
|
114
|
+
### Custom API Endpoint
|
115
|
+
|
116
|
+
```python
|
117
|
+
chain = TasksPromptsChain(
|
118
|
+
model_options={
|
119
|
+
"model": "your-custom-model",
|
120
|
+
"api_key": "your-api-key",
|
121
|
+
"base_url": "https://your-custom-endpoint.com/v1",
|
122
|
+
"temperature": 0.1,
|
123
|
+
"max_tokens": 4120,
|
124
|
+
"stream": True
|
125
|
+
},
|
126
|
+
final_result_placeholder="result",
|
127
|
+
)
|
128
|
+
```
|
129
|
+
|
92
130
|
### Using Templates
|
93
131
|
|
132
|
+
You must call this set method befor the excution of the prompting query (chain.execute_chain(prompts))
|
133
|
+
|
94
134
|
```python
|
95
135
|
# Set output template before execution
|
96
136
|
chain.template_output("""
|
@@ -107,29 +147,13 @@ chain.template_output("""
|
|
107
147
|
</result>
|
108
148
|
""")
|
109
149
|
```
|
110
|
-
|
111
|
-
### Using System Prompts
|
150
|
+
then retrieves the final result within the template :
|
112
151
|
|
113
152
|
```python
|
114
|
-
|
115
|
-
|
116
|
-
api_key="your-api-key",
|
117
|
-
final_result_placeholder="result",
|
118
|
-
system_prompt="You are a professional design expert specialized in luxury products",
|
119
|
-
system_apply_to_all_prompts=True
|
120
|
-
)
|
153
|
+
# print out the final result in the well formated template
|
154
|
+
print(chain.get_final_result_within_template())
|
121
155
|
```
|
122
156
|
|
123
|
-
### Custom API Endpoint
|
124
|
-
|
125
|
-
```python
|
126
|
-
chain = TasksPromptsChain(
|
127
|
-
model="gpt-3.5-turbo",
|
128
|
-
api_key="your-api-key",
|
129
|
-
final_result_placeholder="result",
|
130
|
-
base_url="https://your-custom-endpoint.com/v1"
|
131
|
-
)
|
132
|
-
```
|
133
157
|
|
134
158
|
## API Reference
|
135
159
|
|
@@ -152,6 +176,9 @@ chain = TasksPromptsChain(
|
|
152
176
|
- `template_output(template: str) -> None`
|
153
177
|
- Sets the output template format
|
154
178
|
|
179
|
+
- `get_final_result_within_template(self) -> Optional[str]`
|
180
|
+
- Retrieves the final query result with the defined template in template_output();
|
181
|
+
|
155
182
|
- `get_result(placeholder: str) -> Optional[str]`
|
156
183
|
- Retrieves a specific result by placeholder
|
157
184
|
|
@@ -0,0 +1,6 @@
|
|
1
|
+
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
+
tasks_prompts_chain/tasks_prompts_chain.py,sha256=Ng1r8GvpAEKcOxMriX35lrmB74xHv2lZBMn1Mxs3AqE,9975
|
3
|
+
tasks_prompts_chain-0.0.5.dist-info/METADATA,sha256=Cg_LG_ZN60YK-MPAkoIeblVwDVO7ATdmBRvbZKZQZgs,5778
|
4
|
+
tasks_prompts_chain-0.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
+
tasks_prompts_chain-0.0.5.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
6
|
+
tasks_prompts_chain-0.0.5.dist-info/RECORD,,
|
@@ -1,6 +0,0 @@
|
|
1
|
-
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
-
tasks_prompts_chain/tasks_prompts_chain.py,sha256=P6vJpuuwteXItmudhOnsiArojINeXB5lJnHgxTsXjCA,11816
|
3
|
-
tasks_prompts_chain-0.0.3.dist-info/METADATA,sha256=MhEBZjJQ_7m8jH9RxxtEGSNn2ppEIR0DBoGIUmo25bA,4906
|
4
|
-
tasks_prompts_chain-0.0.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
-
tasks_prompts_chain-0.0.3.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
6
|
-
tasks_prompts_chain-0.0.3.dist-info/RECORD,,
|
File without changes
|
{tasks_prompts_chain-0.0.3.dist-info → tasks_prompts_chain-0.0.5.dist-info}/licenses/LICENSE
RENAMED
File without changes
|