tasks-prompts-chain 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -49,7 +49,7 @@ class ModelOptions(TypedDict, total=False):
49
49
  class PromptTemplate:
50
50
  def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None):
51
51
  self.prompt = prompt
52
- self.output_format = OutputFormat(output_format.upper())
52
+ self.output_format = OutputFormat(output_format.upper()).value
53
53
  self.output_placeholder = output_placeholder
54
54
 
55
55
  class TasksPromptsChain:
@@ -155,39 +155,62 @@ class TasksPromptsChain:
155
155
  for placeholder, value in placeholder_values.items():
156
156
  current_prompt = current_prompt.replace(f"{{{{{placeholder}}}}}", value)
157
157
 
158
+ wants_json_output = prompt_template.output_format == OutputFormat.JSON.value
158
159
  # Format system message based on output format
159
160
  format_instruction = ""
160
- if prompt_template.output_format != OutputFormat.TEXT:
161
- format_instruction = f"\nPlease provide your response in {prompt_template.output_format.value} format."
161
+ if wants_json_output:
162
+ format_instruction = f"\nPlease provide your response in {prompt_template.output_format}."
162
163
 
163
164
  messages = []
164
165
  if self.system_prompt and (i == 0 or self.system_apply_to_all_prompts):
165
166
  messages.append({"role": "system", "content": self.system_prompt})
167
+
166
168
  messages.append({"role": "user", "content": current_prompt + format_instruction})
167
169
 
168
- stream = await self.client.chat.completions.create(
169
- model=self.model,
170
- messages=messages,
171
- temperature=self.temperature,
172
- max_tokens=self.max_tokens,
173
- stream=self.stream
174
- )
170
+ # Default completition.create parameters
171
+ create_kwargs = {
172
+ "model": self.model,
173
+ "messages": messages,
174
+ "temperature": self.temperature,
175
+ "max_tokens": self.max_tokens,
176
+ "stream": self.stream
177
+ }
178
+
179
+ # Check if used model is GPT
180
+ is_gpt_model = self.model.lower().startswith("gpt")
181
+ # GPT model text = {"format" : {"type": "json_object"}}
182
+ if is_gpt_model and wants_json_output:
183
+ create_kwargs["text"] = {
184
+ "format": {
185
+ "type": "json_object"
186
+ }
187
+ }
188
+
189
+ # Non Gpt model response_format={"type": "json_object|text"}
190
+ elif not is_gpt_model:
191
+ create_kwargs["response_format"] = {
192
+ "type": "json_object" if wants_json_output else "text"
193
+ }
194
+
195
+ stream = await self.client.chat.completions.create(**create_kwargs)
175
196
 
176
197
  response_content = ""
177
198
  self._current_stream_buffer = ""
178
199
 
179
200
  async for chunk in stream:
180
- if chunk.choices[0].delta.content is not None:
181
- delta = chunk.choices[0].delta.content
182
- response_content += delta
183
- self._current_stream_buffer = response_content
184
- self._format_current_stream()
185
- responses.append(response_content)
186
- # Store response with placeholder if specified
187
- if prompt_template.output_placeholder:
188
- placeholder_values[prompt_template.output_placeholder] = response_content
189
- self._results[prompt_template.output_placeholder] = response_content
190
- yield response_content
201
+ if not chunk.choices or not chunk.choices[0].delta or not chunk.choices[0].delta.content:
202
+ continue
203
+
204
+ delta = chunk.choices[0].delta.content
205
+ response_content += delta
206
+ self._current_stream_buffer = response_content
207
+ self._format_current_stream()
208
+ responses.append(response_content)
209
+ # Store response with placeholder if specified
210
+ if prompt_template.output_placeholder:
211
+ placeholder_values[prompt_template.output_placeholder] = response_content
212
+ self._results[prompt_template.output_placeholder] = response_content
213
+ yield delta
191
214
 
192
215
  except Exception as e:
193
216
  raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tasks_prompts_chain
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: A Python library for creating and executing chains of prompts using OpenAI's SDK with streaming support and template formatting.
5
5
  Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
6
6
  Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
@@ -59,8 +59,13 @@ from tasks_prompts_chain import TasksPromptsChain
59
59
  async def main():
60
60
  # Initialize the chain
61
61
  chain = TasksPromptsChain(
62
- model="gpt-3.5-turbo",
63
- api_key="your-api-key",
62
+ model_options={
63
+ "model": "gpt-3.5-turbo",
64
+ "api_key": "your-api-key",
65
+ "temperature": 0.1,
66
+ "max_tokens": 4120,
67
+ "stream": True
68
+ },
64
69
  final_result_placeholder="design_result"
65
70
  )
66
71
 
@@ -93,8 +98,13 @@ async def main():
93
98
 
94
99
  ```python
95
100
  chain = TasksPromptsChain(
96
- model="gpt-3.5-turbo",
97
- api_key="your-api-key",
101
+ model_options={
102
+ "model": "gpt-3.5-turbo",
103
+ "api_key": "your-api-key",
104
+ "temperature": 0.1,
105
+ "max_tokens": 4120,
106
+ "stream": True
107
+ },
98
108
  final_result_placeholder="result",
99
109
  system_prompt="You are a professional design expert specialized in luxury products",
100
110
  system_apply_to_all_prompts=True
@@ -105,10 +115,15 @@ chain = TasksPromptsChain(
105
115
 
106
116
  ```python
107
117
  chain = TasksPromptsChain(
108
- model="gpt-3.5-turbo",
109
- api_key="your-api-key",
118
+ model_options={
119
+ "model": "your-custom-model",
120
+ "api_key": "your-api-key",
121
+ "base_url": "https://your-custom-endpoint.com/v1",
122
+ "temperature": 0.1,
123
+ "max_tokens": 4120,
124
+ "stream": True
125
+ },
110
126
  final_result_placeholder="result",
111
- base_url="https://your-custom-endpoint.com/v1"
112
127
  )
113
128
  ```
114
129
 
@@ -195,6 +210,13 @@ Errors are raised with descriptive messages indicating the specific issue and pr
195
210
  4. Consider temperature settings based on your use case
196
211
  5. Use system prompts for consistent context
197
212
 
213
+ ## Compatible Models
214
+
215
+ - Llama3.3
216
+ - llama-4-scout
217
+ - claude-3-7-sonnet-20250219 (Other claude version)
218
+ - ChatGpt's
219
+
198
220
  ## License
199
221
 
200
222
  MIT License
@@ -0,0 +1,6 @@
1
+ tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
2
+ tasks_prompts_chain/tasks_prompts_chain.py,sha256=DUKe5PseHKA9ownVXQpawcdAKGcwHvHtx1U_st2hT_8,10924
3
+ tasks_prompts_chain-0.0.6.dist-info/METADATA,sha256=RNClygTof_WT8IryvMw4SeoVgWZeMYSKY5SlfLUwk4I,5892
4
+ tasks_prompts_chain-0.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ tasks_prompts_chain-0.0.6.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
6
+ tasks_prompts_chain-0.0.6.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
2
- tasks_prompts_chain/tasks_prompts_chain.py,sha256=NwF9PmIHXbEzEusx7B5NTph3zgrJ-SuN74eCc2dRTPI,9986
3
- tasks_prompts_chain-0.0.4.dist-info/METADATA,sha256=pfN6RkU9RnR7fJm0i4_PnBKBGknzjd6URaXm0184HwA,5334
4
- tasks_prompts_chain-0.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
- tasks_prompts_chain-0.0.4.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
6
- tasks_prompts_chain-0.0.4.dist-info/RECORD,,