markdown-flow 0.2.17__tar.gz → 0.2.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (19) hide show
  1. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/PKG-INFO +1 -1
  2. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow/__init__.py +5 -5
  3. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow/constants.py +46 -19
  4. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow/core.py +44 -44
  5. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow/llm.py +4 -5
  6. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow.egg-info/PKG-INFO +1 -1
  7. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/tests/test_preserved_simple.py +28 -31
  8. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/LICENSE +0 -0
  9. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/README.md +0 -0
  10. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow/enums.py +0 -0
  11. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow/exceptions.py +0 -0
  12. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow/models.py +0 -0
  13. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow/utils.py +0 -0
  14. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow.egg-info/SOURCES.txt +0 -0
  15. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow.egg-info/dependency_links.txt +0 -0
  16. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/markdown_flow.egg-info/top_level.txt +0 -0
  17. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/pyproject.toml +0 -0
  18. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/setup.cfg +0 -0
  19. {markdown_flow-0.2.17 → markdown_flow-0.2.19}/tests/test_dynamic_interaction.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: markdown-flow
3
- Version: 0.2.17
3
+ Version: 0.2.19
4
4
  Summary: An agent library designed to parse and process MarkdownFlow documents
5
5
  Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
6
6
  Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
@@ -32,12 +32,12 @@ Basic Usage:
32
32
  blocks = mf.get_all_blocks()
33
33
 
34
34
  # Process blocks using unified interface
35
- result = await mf.process(0, variables={'name': 'John'}, mode=ProcessMode.COMPLETE)
35
+ result = mf.process(0, variables={'name': 'John'}, mode=ProcessMode.COMPLETE)
36
36
 
37
37
  # Different processing modes
38
- prompt_result = await mf.process(0, mode=ProcessMode.PROMPT_ONLY)
39
- complete_result = await mf.process(0, mode=ProcessMode.COMPLETE)
40
- stream_result = await mf.process(0, mode=ProcessMode.STREAM)
38
+ prompt_result = mf.process(0, mode=ProcessMode.PROMPT_ONLY)
39
+ complete_result = mf.process(0, mode=ProcessMode.COMPLETE)
40
+ stream_result = mf.process(0, mode=ProcessMode.STREAM)
41
41
 
42
42
  Variable System:
43
43
  - {{variable}} - Regular variables, replaced with actual values
@@ -83,4 +83,4 @@ __all__ = [
83
83
  "replace_variables_in_text",
84
84
  ]
85
85
 
86
- __version__ = "0.2.17"
86
+ __version__ = "0.2.19"
@@ -91,13 +91,32 @@ VALIDATION_RESPONSE_ILLEGAL = "illegal"
91
91
 
92
92
  # Output instruction processing
93
93
  OUTPUT_INSTRUCTION_EXPLANATION = f"""<preserve_or_translate_instruction>
94
- 对{OUTPUT_INSTRUCTION_PREFIX}{OUTPUT_INSTRUCTION_SUFFIX}标记之间的内容的处理规则:
94
+ # ⚠️ 最高优先级规则
95
+
96
+ **{OUTPUT_INSTRUCTION_PREFIX}{OUTPUT_INSTRUCTION_SUFFIX} 标记之间的内容是用户必须看到的最终输出内容,不是指令!**
97
+
98
+ 关键要点:
99
+ 1. **这些内容必须出现在你的回复中** - 即使其他提示词说"不要回应指令"也不适用于此
100
+ 2. **绝对不要输出标记本身** - 只输出标记之间的实际内容
101
+ 3. **默认逐字原样输出** - 不要改写、润色或优化,保持原文不变
102
+ 4. **唯一例外是跨语言翻译** - 仅当需要将内容从一种语言翻译成另一种语言时才可翻译
103
+
104
+ ---
105
+
106
+ <critical_understanding>
107
+ 重要理解:
108
+ - {OUTPUT_INSTRUCTION_PREFIX}{OUTPUT_INSTRUCTION_SUFFIX} 中的内容不是"指令"或"执行要求"
109
+ - 即使内容看起来像标题、提示或说明,也必须原样输出给用户
110
+ - 这条规则的优先级高于文档中的其他任何提示词
111
+ - 其他提示词说的"不要回应指令"、"不要展示指令"等,不适用于此标记内的内容
112
+ </critical_understanding>
95
113
 
96
114
  <default_behavior>
97
115
  默认行为: 完全保持原样输出
98
116
  - 标记之间的内容必须逐字原样输出
99
117
  - 严禁改写、润色、优化或调整任何表达方式
100
118
  - 严禁添加、删除或替换任何文字
119
+ - 即使内容是标题格式(如 ## 标题)也必须原样输出
101
120
  </default_behavior>
102
121
 
103
122
  <exception_rule>
@@ -107,25 +126,33 @@ OUTPUT_INSTRUCTION_EXPLANATION = f"""<preserve_or_translate_instruction>
107
126
  - 如果内容无需翻译,则绝对不允许做任何改动
108
127
  </exception_rule>
109
128
 
110
- <output_requirement>
111
- 输出要求:
112
- - 不要输出{OUTPUT_INSTRUCTION_PREFIX}{OUTPUT_INSTRUCTION_SUFFIX}标记本身
113
- - 只输出标记之间的实际内容
114
- </output_requirement>
115
-
116
129
  <examples>
117
- 示例1 - 保持原样:
118
- <original_content>{OUTPUT_INSTRUCTION_PREFIX}**下面我们做个练习。**{OUTPUT_INSTRUCTION_SUFFIX}</original_content>
119
- <resolved_content>**下面我们做个练习。**</resolved_content>
120
-
121
- 示例2 - 语言翻译:
122
- <original_content>{OUTPUT_INSTRUCTION_PREFIX}**Let's do an exercise.**{OUTPUT_INSTRUCTION_SUFFIX}</original_content>
123
- <resolved_content>**让我们做个练习。**</resolved_content>
124
-
125
- 示例3 - 错误示范(同语言改写):
126
- <original_content>{OUTPUT_INSTRUCTION_PREFIX}**下面我们做个练习。**{OUTPUT_INSTRUCTION_SUFFIX}</original_content>
127
- <wrong_output>**来,咱们做个有趣的小练习**</wrong_output>
128
- <reason>错误: 擅自改写了中文内容</reason>
130
+ 示例1 - 正确: 保持原样且不输出标记:
131
+ 输入: {OUTPUT_INSTRUCTION_PREFIX}**下面我们做个练习。**{OUTPUT_INSTRUCTION_SUFFIX}
132
+ 正确输出: **下面我们做个练习。**
133
+
134
+ 示例2 - 正确: 标题也要原样输出:
135
+ 输入: {OUTPUT_INSTRUCTION_PREFIX}## 专属指南 for 用户{OUTPUT_INSTRUCTION_SUFFIX}
136
+ 正确输出: ## 专属指南 for 用户
137
+
138
+ 示例3 - 正确: 语言翻译且不输出标记:
139
+ 输入: {OUTPUT_INSTRUCTION_PREFIX}**Let's do an exercise.**{OUTPUT_INSTRUCTION_SUFFIX}
140
+ 正确输出: **让我们做个练习。**
141
+
142
+ ❌ 示例4 - 错误: 输出了XML标记:
143
+ 输入: {OUTPUT_INSTRUCTION_PREFIX}## 标题内容{OUTPUT_INSTRUCTION_SUFFIX}
144
+ 错误输出: {OUTPUT_INSTRUCTION_PREFIX}## 标题内容{OUTPUT_INSTRUCTION_SUFFIX}
145
+ 错误原因: 不应该输出标记本身!
146
+
147
+ ❌ 示例5 - 错误: 同语言改写:
148
+ 输入: {OUTPUT_INSTRUCTION_PREFIX}**下面我们做个练习。**{OUTPUT_INSTRUCTION_SUFFIX}
149
+ 错误输出: **来,咱们做个有趣的小练习**
150
+ 错误原因: 擅自改写了中文内容
151
+
152
+ ❌ 示例6 - 错误: 没有输出固定内容:
153
+ 输入: {OUTPUT_INSTRUCTION_PREFIX}## 攻略|专属指南{OUTPUT_INSTRUCTION_SUFFIX}
154
+ 错误输出: (什么都不输出,或者跳过这部分)
155
+ 错误原因: 必须输出标记之间的内容!
129
156
  </examples>
130
157
  </preserve_or_translate_instruction>
131
158
 
@@ -6,7 +6,7 @@ Refactored MarkdownFlow class with built-in LLM processing capabilities and unif
6
6
 
7
7
  import json
8
8
  import re
9
- from collections.abc import AsyncGenerator
9
+ from collections.abc import Generator
10
10
  from copy import copy
11
11
  from typing import Any
12
12
 
@@ -177,14 +177,14 @@ class MarkdownFlow:
177
177
 
178
178
  # Core unified interface
179
179
 
180
- async def process(
180
+ def process(
181
181
  self,
182
182
  block_index: int,
183
183
  mode: ProcessMode = ProcessMode.COMPLETE,
184
184
  context: list[dict[str, str]] | None = None,
185
185
  variables: dict[str, str | list[str]] | None = None,
186
186
  user_input: dict[str, list[str]] | None = None,
187
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
187
+ ):
188
188
  """
189
189
  Unified block processing interface.
190
190
 
@@ -196,7 +196,7 @@ class MarkdownFlow:
196
196
  user_input: User input (for interaction blocks)
197
197
 
198
198
  Returns:
199
- LLMResult or AsyncGenerator[LLMResult, None]
199
+ LLMResult or Generator[LLMResult, None, None]
200
200
  """
201
201
  # Process document_prompt variable replacement
202
202
  if self._document_prompt:
@@ -205,31 +205,31 @@ class MarkdownFlow:
205
205
  block = self.get_block(block_index)
206
206
 
207
207
  if block.block_type == BlockType.CONTENT:
208
- return await self._process_content(block_index, mode, context, variables)
208
+ return self._process_content(block_index, mode, context, variables)
209
209
 
210
210
  if block.block_type == BlockType.INTERACTION:
211
211
  if user_input is None:
212
212
  # Render interaction content
213
- return await self._process_interaction_render(block_index, mode, variables)
213
+ return self._process_interaction_render(block_index, mode, variables)
214
214
  # Process user input
215
- return await self._process_interaction_input(block_index, user_input, mode, context, variables)
215
+ return self._process_interaction_input(block_index, user_input, mode, context, variables)
216
216
 
217
217
  if block.block_type == BlockType.PRESERVED_CONTENT:
218
218
  # Preserved content output as-is, no LLM call
219
- return await self._process_preserved_content(block_index, variables)
219
+ return self._process_preserved_content(block_index, variables)
220
220
 
221
221
  # Handle other types as content
222
- return await self._process_content(block_index, mode, context, variables)
222
+ return self._process_content(block_index, mode, context, variables)
223
223
 
224
224
  # Internal processing methods
225
225
 
226
- async def _process_content(
226
+ def _process_content(
227
227
  self,
228
228
  block_index: int,
229
229
  mode: ProcessMode,
230
230
  context: list[dict[str, str]] | None,
231
231
  variables: dict[str, str | list[str]] | None,
232
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
232
+ ):
233
233
  """Process content block."""
234
234
  # Build messages
235
235
  messages = self._build_content_messages(block_index, variables)
@@ -241,20 +241,20 @@ class MarkdownFlow:
241
241
  if not self._llm_provider:
242
242
  raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
243
243
 
244
- content = await self._llm_provider.complete(messages)
244
+ content = self._llm_provider.complete(messages)
245
245
  return LLMResult(content=content, prompt=messages[-1]["content"])
246
246
 
247
247
  if mode == ProcessMode.STREAM:
248
248
  if not self._llm_provider:
249
249
  raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
250
250
 
251
- async def stream_generator():
252
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
251
+ def stream_generator():
252
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
253
253
  yield LLMResult(content=chunk, prompt=messages[-1]["content"])
254
254
 
255
255
  return stream_generator()
256
256
 
257
- async def _process_preserved_content(self, block_index: int, variables: dict[str, str | list[str]] | None) -> LLMResult:
257
+ def _process_preserved_content(self, block_index: int, variables: dict[str, str | list[str]] | None) -> LLMResult:
258
258
  """Process preserved content block, output as-is without LLM call."""
259
259
  block = self.get_block(block_index)
260
260
 
@@ -266,7 +266,7 @@ class MarkdownFlow:
266
266
 
267
267
  return LLMResult(content=content)
268
268
 
269
- async def _process_interaction_render(self, block_index: int, mode: ProcessMode, variables: dict[str, str | list[str]] | None = None) -> LLMResult | AsyncGenerator[LLMResult, None]:
269
+ def _process_interaction_render(self, block_index: int, mode: ProcessMode, variables: dict[str, str | list[str]] | None = None):
270
270
  """Process interaction content rendering."""
271
271
  block = self.get_block(block_index)
272
272
 
@@ -299,7 +299,7 @@ class MarkdownFlow:
299
299
  if not self._llm_provider:
300
300
  return LLMResult(content=processed_block.content) # Fallback processing
301
301
 
302
- rendered_question = await self._llm_provider.complete(messages)
302
+ rendered_question = self._llm_provider.complete(messages)
303
303
  rendered_content = self._reconstruct_interaction_content(processed_block.content, rendered_question)
304
304
 
305
305
  return LLMResult(
@@ -316,7 +316,7 @@ class MarkdownFlow:
316
316
  # For interaction blocks, return reconstructed content (one-time output)
317
317
  rendered_content = self._reconstruct_interaction_content(processed_block.content, question_text or "")
318
318
 
319
- async def stream_generator():
319
+ def stream_generator():
320
320
  yield LLMResult(
321
321
  content=rendered_content,
322
322
  prompt=messages[-1]["content"],
@@ -325,9 +325,9 @@ class MarkdownFlow:
325
325
  return stream_generator()
326
326
 
327
327
  # With LLM provider, collect full response then return once
328
- async def stream_generator():
328
+ def stream_generator():
329
329
  full_response = ""
330
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
330
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
331
331
  full_response += chunk
332
332
 
333
333
  # Reconstruct final interaction content
@@ -341,14 +341,14 @@ class MarkdownFlow:
341
341
 
342
342
  return stream_generator()
343
343
 
344
- async def _process_interaction_input(
344
+ def _process_interaction_input(
345
345
  self,
346
346
  block_index: int,
347
347
  user_input: dict[str, list[str]],
348
348
  mode: ProcessMode,
349
349
  context: list[dict[str, str]] | None,
350
350
  variables: dict[str, str | list[str]] | None = None,
351
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
351
+ ) -> LLMResult | Generator[LLMResult, None, None]:
352
352
  """Process interaction user input."""
353
353
  block = self.get_block(block_index)
354
354
  target_variable = block.variables[0] if block.variables else "user_input"
@@ -356,7 +356,7 @@ class MarkdownFlow:
356
356
  # Basic validation
357
357
  if not user_input or not any(values for values in user_input.values()):
358
358
  error_msg = INPUT_EMPTY_ERROR
359
- return await self._render_error(error_msg, mode)
359
+ return self._render_error(error_msg, mode)
360
360
 
361
361
  # Get the target variable value from user_input
362
362
  target_values = user_input.get(target_variable, [])
@@ -370,7 +370,7 @@ class MarkdownFlow:
370
370
 
371
371
  if "error" in parse_result:
372
372
  error_msg = INTERACTION_PARSE_ERROR.format(error=parse_result["error"])
373
- return await self._render_error(error_msg, mode)
373
+ return self._render_error(error_msg, mode)
374
374
 
375
375
  interaction_type = parse_result.get("type")
376
376
 
@@ -382,7 +382,7 @@ class MarkdownFlow:
382
382
  InteractionType.BUTTONS_MULTI_WITH_TEXT,
383
383
  ]:
384
384
  # All button types: validate user input against available buttons
385
- return await self._process_button_validation(
385
+ return self._process_button_validation(
386
386
  parse_result,
387
387
  target_values,
388
388
  target_variable,
@@ -415,16 +415,16 @@ class MarkdownFlow:
415
415
  },
416
416
  )
417
417
  error_msg = f"No input provided for variable '{target_variable}'"
418
- return await self._render_error(error_msg, mode)
418
+ return self._render_error(error_msg, mode)
419
419
 
420
- async def _process_button_validation(
420
+ def _process_button_validation(
421
421
  self,
422
422
  parse_result: dict[str, Any],
423
423
  target_values: list[str],
424
424
  target_variable: str,
425
425
  mode: ProcessMode,
426
426
  interaction_type: InteractionType,
427
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
427
+ ) -> LLMResult | Generator[LLMResult, None, None]:
428
428
  """
429
429
  Simplified button validation with new input format.
430
430
 
@@ -459,7 +459,7 @@ class MarkdownFlow:
459
459
  # Pure button mode requires input
460
460
  button_displays = [btn["display"] for btn in buttons]
461
461
  error_msg = f"Please select from: {', '.join(button_displays)}"
462
- return await self._render_error(error_msg, mode)
462
+ return self._render_error(error_msg, mode)
463
463
 
464
464
  # Validate input values against available buttons
465
465
  valid_values = []
@@ -484,7 +484,7 @@ class MarkdownFlow:
484
484
  if invalid_values and not allow_text_input:
485
485
  button_displays = [btn["display"] for btn in buttons]
486
486
  error_msg = f"Invalid options: {', '.join(invalid_values)}. Please select from: {', '.join(button_displays)}"
487
- return await self._render_error(error_msg, mode)
487
+ return self._render_error(error_msg, mode)
488
488
 
489
489
  # Success: return validated values
490
490
  return LLMResult(
@@ -499,13 +499,13 @@ class MarkdownFlow:
499
499
  },
500
500
  )
501
501
 
502
- async def _process_llm_validation(
502
+ def _process_llm_validation(
503
503
  self,
504
504
  block_index: int,
505
505
  user_input: dict[str, list[str]],
506
506
  target_variable: str,
507
507
  mode: ProcessMode,
508
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
508
+ ) -> LLMResult | Generator[LLMResult, None, None]:
509
509
  """Process LLM validation."""
510
510
  # Build validation messages
511
511
  messages = self._build_validation_messages(block_index, user_input, target_variable)
@@ -524,7 +524,7 @@ class MarkdownFlow:
524
524
  # Fallback processing, return variables directly
525
525
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
526
526
 
527
- llm_response = await self._llm_provider.complete(messages)
527
+ llm_response = self._llm_provider.complete(messages)
528
528
 
529
529
  # Parse validation response and convert to LLMResult
530
530
  # Use joined target values for fallback; avoids JSON string injection
@@ -536,9 +536,9 @@ class MarkdownFlow:
536
536
  if not self._llm_provider:
537
537
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
538
538
 
539
- async def stream_generator():
539
+ def stream_generator():
540
540
  full_response = ""
541
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
541
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
542
542
  full_response += chunk
543
543
 
544
544
  # Parse complete response and convert to LLMResult
@@ -552,7 +552,7 @@ class MarkdownFlow:
552
552
 
553
553
  return stream_generator()
554
554
 
555
- async def _process_llm_validation_with_options(
555
+ def _process_llm_validation_with_options(
556
556
  self,
557
557
  block_index: int,
558
558
  user_input: dict[str, list[str]],
@@ -560,7 +560,7 @@ class MarkdownFlow:
560
560
  options: list[str],
561
561
  question: str,
562
562
  mode: ProcessMode,
563
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
563
+ ) -> LLMResult | Generator[LLMResult, None, None]:
564
564
  """Process LLM validation with button options (third case)."""
565
565
  # Build special validation messages containing button option information
566
566
  messages = self._build_validation_messages_with_options(user_input, target_variable, options, question)
@@ -581,7 +581,7 @@ class MarkdownFlow:
581
581
  # Fallback processing, return variables directly
582
582
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
583
583
 
584
- llm_response = await self._llm_provider.complete(messages)
584
+ llm_response = self._llm_provider.complete(messages)
585
585
 
586
586
  # Parse validation response and convert to LLMResult
587
587
  # Use joined target values for fallback; avoids JSON string injection
@@ -593,9 +593,9 @@ class MarkdownFlow:
593
593
  if not self._llm_provider:
594
594
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
595
595
 
596
- async def stream_generator():
596
+ def stream_generator():
597
597
  full_response = ""
598
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
598
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
599
599
  full_response += chunk
600
600
  # For validation scenario, don't output chunks in real-time, only final result
601
601
 
@@ -612,7 +612,7 @@ class MarkdownFlow:
612
612
 
613
613
  return stream_generator()
614
614
 
615
- async def _render_error(self, error_message: str, mode: ProcessMode) -> LLMResult | AsyncGenerator[LLMResult, None]:
615
+ def _render_error(self, error_message: str, mode: ProcessMode) -> LLMResult | Generator[LLMResult, None, None]:
616
616
  """Render user-friendly error message."""
617
617
  messages = self._build_error_render_messages(error_message)
618
618
 
@@ -626,15 +626,15 @@ class MarkdownFlow:
626
626
  if not self._llm_provider:
627
627
  return LLMResult(content=error_message) # Fallback processing
628
628
 
629
- friendly_error = await self._llm_provider.complete(messages)
629
+ friendly_error = self._llm_provider.complete(messages)
630
630
  return LLMResult(content=friendly_error, prompt=messages[-1]["content"])
631
631
 
632
632
  if mode == ProcessMode.STREAM:
633
633
  if not self._llm_provider:
634
634
  return LLMResult(content=error_message)
635
635
 
636
- async def stream_generator():
637
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
636
+ def stream_generator():
637
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
638
638
  yield LLMResult(content=chunk, prompt=messages[-1]["content"])
639
639
 
640
640
  return stream_generator()
@@ -5,7 +5,6 @@ Provides LLM provider interfaces and related data models, supporting multiple pr
5
5
  """
6
6
 
7
7
  from abc import ABC, abstractmethod
8
- from collections.abc import AsyncGenerator
9
8
  from dataclasses import dataclass
10
9
  from enum import Enum
11
10
  from typing import Any
@@ -39,7 +38,7 @@ class LLMProvider(ABC):
39
38
  """Abstract LLM provider interface."""
40
39
 
41
40
  @abstractmethod
42
- async def complete(self, messages: list[dict[str, str]]) -> str:
41
+ def complete(self, messages: list[dict[str, str]]) -> str:
43
42
  """
44
43
  Non-streaming LLM call.
45
44
 
@@ -54,7 +53,7 @@ class LLMProvider(ABC):
54
53
  """
55
54
 
56
55
  @abstractmethod
57
- async def stream(self, messages: list[dict[str, str]]) -> AsyncGenerator[str, None]:
56
+ def stream(self, messages: list[dict[str, str]]):
58
57
  """
59
58
  Streaming LLM call.
60
59
 
@@ -72,8 +71,8 @@ class LLMProvider(ABC):
72
71
  class NoLLMProvider(LLMProvider):
73
72
  """Empty LLM provider for prompt-only scenarios."""
74
73
 
75
- async def complete(self, messages: list[dict[str, str]]) -> str:
74
+ def complete(self, messages: list[dict[str, str]]) -> str:
76
75
  raise NotImplementedError(NO_LLM_PROVIDER_ERROR)
77
76
 
78
- async def stream(self, messages: list[dict[str, str]]) -> AsyncGenerator[str, None]:
77
+ def stream(self, messages: list[dict[str, str]]):
79
78
  raise NotImplementedError(NO_LLM_PROVIDER_ERROR)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: markdown-flow
3
- Version: 0.2.17
3
+ Version: 0.2.19
4
4
  Summary: An agent library designed to parse and process MarkdownFlow documents
5
5
  Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
6
6
  Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
@@ -14,7 +14,6 @@
14
14
  - 检查 LLM 输出是否不包含 XML 标记
15
15
  """
16
16
 
17
- import asyncio
18
17
  import os
19
18
  import sys
20
19
 
@@ -29,7 +28,7 @@ from markdown_flow import MarkdownFlow, ProcessMode # noqa: E402
29
28
  from markdown_flow.llm import LLMResult # noqa: E402
30
29
 
31
30
 
32
- async def test_preserved_output():
31
+ def test_preserved_output():
33
32
  """测试固定输出功能"""
34
33
  print("\n" + "=" * 60)
35
34
  print("🔖 固定输出测试")
@@ -38,43 +37,41 @@ async def test_preserved_output():
38
37
  # ========== 配置区域 - 修改这里 ==========
39
38
  # 你的 MarkdownFlow 文档
40
39
  document = """
41
- === **下面我们做个练习,输入一个变量代表风格。** ===
40
+ ===# 💖七夕约会全阶段攻略 ===
41
+
42
+ === 选择你的 MBTI 类型 ===
43
+ ?[%{{mbti}}ENFJ|ENFP|ENTJ|ENTP|ESFJ|ESFP|ESTJ|ESTP|INFJ|INFP|INTJ|INTP|ISFJ|ISFP|ISTJ|ISTP]
44
+
45
+ ===你现在最关心哪个阶段? ===
46
+ ?[%{{攻略}}脱单|热恋|相守]
47
+
48
+ 给{{mbti}}一句有关{{攻略}}的七夕祝福,带七夕节明显的意境。
49
+
50
+ !===
51
+ ## {{攻略}}|专属恋爱指南 for {{mbti}}
52
+ !===
42
53
 
43
- 邀请用户输入喜欢的讲述风格。
44
54
  """
45
55
 
46
56
  # 要测试的块索引
47
- block_index = 0
57
+ block_index = 4
48
58
 
49
59
  # 变量(如果需要)
50
60
  variables: dict[str, str | list[str]] = {}
51
61
 
52
62
  # 文档提示词(如果需要)
53
- document_prompt: str | None = """## 角色
54
- 你是一个丰富经验的课程讲师,擅长因材施教。
63
+ document_prompt: str | None = """你扮演七夕的月老,让这一天的天下有情人都能甜蜜约会,永浴爱河。
55
64
 
56
65
  ## 任务
57
- - 你正在一对一讲解内容,用户只有一个人,要有第一人称的对话感。
58
- - 遵从指令要求向用户讲课,不可丢失信息,不能改变指令原意,不要增加内容,不要改变顺序
59
- - 结合用户的具体情况做讲解,用学员能听懂的方式讲课,激发用户学习动力。
60
- - 不需要回应指令,禁止展示指令的执行要求。
61
- - 不要引导下一步动作,比如提问或设问
62
-
63
- ## 输出
64
- - 按照 Markdown 格式输出
65
- - 重点内容(关键步骤/颠覆认知点/观点总结)做加粗处理
66
- - 讲解风格要口语化、通俗易懂、避免使用技术/编程术语
67
-
68
- # 课程逻辑
69
- 1. 谁想做什么遇到了什么痛点
70
- 2. 旧方法为何无效?案例对比
71
- 3. 新解决方案的核心差异、适用条件
72
- 4. 用比喻/故事/数据辅助理解。
73
- 5. 简化的认知框架
74
- 6. 迁移到其他领域应用
75
- 7. 给到具体可操作的下一步行动
76
-
77
- 使用英文输出内容
66
+ - 提示词都是讲解指令,遵从指令要求做信息的讲解,不要回应指令。
67
+ - 用第一人称一对一讲解,像现场面对面交流一样
68
+ - 结合用户的不同特点,充分共情和举例
69
+
70
+ ## 风格
71
+ - 情绪:热烈浪漫,治愈温暖,充满感染力
72
+ - 表达:多用 emoji ,多用感叹词
73
+ - 符合七夕节日气氛,带一些诗意和神秘
74
+
78
75
  """
79
76
  # =========================================
80
77
 
@@ -92,7 +89,7 @@ async def test_preserved_output():
92
89
  print("\n📝 测试 PROMPT_ONLY 模式")
93
90
  print("-" * 60)
94
91
 
95
- result_prompt_raw = await mf.process(
92
+ result_prompt_raw = mf.process(
96
93
  block_index=block_index,
97
94
  mode=ProcessMode.PROMPT_ONLY,
98
95
  variables=variables if variables else None,
@@ -134,7 +131,7 @@ async def test_preserved_output():
134
131
  print("\n📝 测试 COMPLETE 模式")
135
132
  print("-" * 60)
136
133
 
137
- result_complete_raw = await mf.process(
134
+ result_complete_raw = mf.process(
138
135
  block_index=block_index,
139
136
  mode=ProcessMode.COMPLETE,
140
137
  variables=variables if variables else None,
@@ -168,4 +165,4 @@ async def test_preserved_output():
168
165
 
169
166
 
170
167
  if __name__ == "__main__":
171
- asyncio.run(test_preserved_output())
168
+ test_preserved_output()
File without changes
File without changes
File without changes