tasks-prompts-chain 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -49,7 +49,7 @@ class ModelOptions(TypedDict, total=False):
49
49
  class PromptTemplate:
50
50
  def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None):
51
51
  self.prompt = prompt
52
- self.output_format = OutputFormat(output_format.upper())
52
+ self.output_format = OutputFormat(output_format.upper()).value
53
53
  self.output_placeholder = output_placeholder
54
54
 
55
55
  class TasksPromptsChain:
@@ -155,39 +155,62 @@ class TasksPromptsChain:
155
155
  for placeholder, value in placeholder_values.items():
156
156
  current_prompt = current_prompt.replace(f"{{{{{placeholder}}}}}", value)
157
157
 
158
+ wants_json_output = prompt_template.output_format == OutputFormat.JSON.value
158
159
  # Format system message based on output format
159
160
  format_instruction = ""
160
- if prompt_template.output_format != OutputFormat.TEXT:
161
- format_instruction = f"\nPlease provide your response in {prompt_template.output_format.value} format."
161
+ if wants_json_output:
162
+ format_instruction = f"\nPlease provide your response in {prompt_template.output_format}."
162
163
 
163
164
  messages = []
164
165
  if self.system_prompt and (i == 0 or self.system_apply_to_all_prompts):
165
166
  messages.append({"role": "system", "content": self.system_prompt})
167
+
166
168
  messages.append({"role": "user", "content": current_prompt + format_instruction})
167
169
 
168
- stream = await self.client.chat.completions.create(
169
- model=self.model,
170
- messages=messages,
171
- temperature=self.temperature,
172
- max_tokens=self.max_tokens,
173
- stream=self.stream
174
- )
170
+ # Default completition.create parameters
171
+ create_kwargs = {
172
+ "model": self.model,
173
+ "messages": messages,
174
+ "temperature": self.temperature,
175
+ "max_tokens": self.max_tokens,
176
+ "stream": self.stream
177
+ }
178
+
179
+ # Check if used model is GPT
180
+ is_gpt_model = self.model.lower().startswith("gpt")
181
+ # GPT model text = {"format" : {"type": "json_object"}}
182
+ if is_gpt_model and wants_json_output:
183
+ create_kwargs["text"] = {
184
+ "format": {
185
+ "type": "json_object"
186
+ }
187
+ }
188
+
189
+ # Non Gpt model response_format={"type": "json_object|text"}
190
+ elif not is_gpt_model:
191
+ create_kwargs["response_format"] = {
192
+ "type": "json_object" if wants_json_output else "text"
193
+ }
194
+
195
+ stream = await self.client.chat.completions.create(**create_kwargs)
175
196
 
176
197
  response_content = ""
177
198
  self._current_stream_buffer = ""
178
199
 
179
200
  async for chunk in stream:
180
- if chunk.choices[0].delta.content is not None:
181
- delta = chunk.choices[0].delta.content
182
- response_content += delta
183
- self._current_stream_buffer = response_content
184
- self._format_current_stream()
185
- responses.append(response_content)
186
- # Store response with placeholder if specified
187
- if prompt_template.output_placeholder:
188
- placeholder_values[prompt_template.output_placeholder] = response_content
189
- self._results[prompt_template.output_placeholder] = response_content
190
- yield delta
201
+ if not chunk.choices or not chunk.choices[0].delta or not chunk.choices[0].delta.content:
202
+ continue
203
+
204
+ delta = chunk.choices[0].delta.content
205
+ response_content += delta
206
+ self._current_stream_buffer = response_content
207
+ self._format_current_stream()
208
+ responses.append(response_content)
209
+ # Store response with placeholder if specified
210
+ if prompt_template.output_placeholder:
211
+ placeholder_values[prompt_template.output_placeholder] = response_content
212
+ self._results[prompt_template.output_placeholder] = response_content
213
+ yield delta
191
214
 
192
215
  except Exception as e:
193
216
  raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tasks_prompts_chain
3
- Version: 0.0.5
3
+ Version: 0.0.6
4
4
  Summary: A Python library for creating and executing chains of prompts using OpenAI's SDK with streaming support and template formatting.
5
5
  Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
6
6
  Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
@@ -210,6 +210,13 @@ Errors are raised with descriptive messages indicating the specific issue and pr
210
210
  4. Consider temperature settings based on your use case
211
211
  5. Use system prompts for consistent context
212
212
 
213
+ ## Compatible Models
214
+
215
+ - Llama3.3
216
+ - llama-4-scout
217
+ - claude-3-7-sonnet-20250219 (Other claude version)
218
+ - ChatGpt's
219
+
213
220
  ## License
214
221
 
215
222
  MIT License
@@ -0,0 +1,6 @@
1
+ tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
2
+ tasks_prompts_chain/tasks_prompts_chain.py,sha256=DUKe5PseHKA9ownVXQpawcdAKGcwHvHtx1U_st2hT_8,10924
3
+ tasks_prompts_chain-0.0.6.dist-info/METADATA,sha256=RNClygTof_WT8IryvMw4SeoVgWZeMYSKY5SlfLUwk4I,5892
4
+ tasks_prompts_chain-0.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ tasks_prompts_chain-0.0.6.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
6
+ tasks_prompts_chain-0.0.6.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
2
- tasks_prompts_chain/tasks_prompts_chain.py,sha256=Ng1r8GvpAEKcOxMriX35lrmB74xHv2lZBMn1Mxs3AqE,9975
3
- tasks_prompts_chain-0.0.5.dist-info/METADATA,sha256=Cg_LG_ZN60YK-MPAkoIeblVwDVO7ATdmBRvbZKZQZgs,5778
4
- tasks_prompts_chain-0.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
- tasks_prompts_chain-0.0.5.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
6
- tasks_prompts_chain-0.0.5.dist-info/RECORD,,