markdown-flow 0.2.16__py3-none-any.whl → 0.2.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of markdown-flow might be problematic. Click here for more details.

markdown_flow/__init__.py CHANGED
@@ -32,12 +32,12 @@ Basic Usage:
32
32
  blocks = mf.get_all_blocks()
33
33
 
34
34
  # Process blocks using unified interface
35
- result = await mf.process(0, variables={'name': 'John'}, mode=ProcessMode.COMPLETE)
35
+ result = mf.process(0, variables={'name': 'John'}, mode=ProcessMode.COMPLETE)
36
36
 
37
37
  # Different processing modes
38
- prompt_result = await mf.process(0, mode=ProcessMode.PROMPT_ONLY)
39
- complete_result = await mf.process(0, mode=ProcessMode.COMPLETE)
40
- stream_result = await mf.process(0, mode=ProcessMode.STREAM)
38
+ prompt_result = mf.process(0, mode=ProcessMode.PROMPT_ONLY)
39
+ complete_result = mf.process(0, mode=ProcessMode.COMPLETE)
40
+ stream_result = mf.process(0, mode=ProcessMode.STREAM)
41
41
 
42
42
  Variable System:
43
43
  - {{variable}} - Regular variables, replaced with actual values
@@ -83,4 +83,4 @@ __all__ = [
83
83
  "replace_variables_in_text",
84
84
  ]
85
85
 
86
- __version__ = "0.2.16"
86
+ __version__ = "0.2.18"
@@ -9,7 +9,7 @@ import re
9
9
 
10
10
  # Pre-compiled regex patterns
11
11
  COMPILED_PERCENT_VARIABLE_REGEX = re.compile(
12
- r"%\{\{([\w]+)\}\}" # Match %{{variable}} format for preserved variables
12
+ r"%\{\{([^}]+)\}\}" # Match %{{variable}} format for preserved variables
13
13
  )
14
14
 
15
15
  # Interaction regex base patterns
@@ -20,11 +20,11 @@ INTERACTION_PATTERN_SPLIT = r"((?<!\\)\?\[[^\]]*\](?!\())" # Pattern for re.spl
20
20
  # InteractionParser specific regex patterns
21
21
  COMPILED_INTERACTION_REGEX = re.compile(INTERACTION_PATTERN) # Main interaction pattern matcher
22
22
  COMPILED_LAYER1_INTERACTION_REGEX = COMPILED_INTERACTION_REGEX # Layer 1: Basic format validation (alias)
23
- COMPILED_LAYER2_VARIABLE_REGEX = re.compile(r"^%\{\{([\w]+)\}\}(.*)$") # Layer 2: Variable detection
23
+ COMPILED_LAYER2_VARIABLE_REGEX = re.compile(r"^%\{\{([^}]+)\}\}(.*)$") # Layer 2: Variable detection
24
24
  COMPILED_LAYER3_ELLIPSIS_REGEX = re.compile(r"^(.*)\.\.\.(.*)") # Layer 3: Split content around ellipsis
25
25
  COMPILED_LAYER3_BUTTON_VALUE_REGEX = re.compile(r"^(.+)//(.+)$") # Layer 3: Parse Button//value format
26
26
  COMPILED_BRACE_VARIABLE_REGEX = re.compile(
27
- r"(?<!%)\{\{([\w]+)\}\}" # Match {{variable}} format for replaceable variables
27
+ r"(?<!%)\{\{([^}]+)\}\}" # Match {{variable}} format for replaceable variables
28
28
  )
29
29
  COMPILED_INTERACTION_CONTENT_RECONSTRUCT_REGEX = re.compile(
30
30
  r"(\?\[[^]]*\.\.\.)([^]]*\])" # Reconstruct interaction content: prefix + question + suffix
@@ -47,8 +47,8 @@ INLINE_PRESERVE_PATTERN = r"^===(.+)=== *$"
47
47
  COMPILED_INLINE_PRESERVE_REGEX = re.compile(INLINE_PRESERVE_PATTERN)
48
48
 
49
49
  # Output instruction markers
50
- OUTPUT_INSTRUCTION_PREFIX = "[输出]"
51
- OUTPUT_INSTRUCTION_SUFFIX = "[/输出]"
50
+ OUTPUT_INSTRUCTION_PREFIX = "<preserve_or_translate>"
51
+ OUTPUT_INSTRUCTION_SUFFIX = "</preserve_or_translate>"
52
52
 
53
53
  # System message templates
54
54
  DEFAULT_VALIDATION_SYSTEM_MESSAGE = "你是一个输入验证助手,需要严格按照指定的格式和规则处理用户输入。"
@@ -90,19 +90,44 @@ VALIDATION_RESPONSE_OK = "ok"
90
90
  VALIDATION_RESPONSE_ILLEGAL = "illegal"
91
91
 
92
92
  # Output instruction processing
93
- OUTPUT_INSTRUCTION_EXPLANATION = f"""请按照以下指令执行:
94
-
95
- 当遇到{OUTPUT_INSTRUCTION_PREFIX}content{OUTPUT_INSTRUCTION_SUFFIX}这样的标签对时:
96
- 1. **完全原样输出**中间的content内容,不要进行任何格式转换或修改
97
- 2. 不要输出{OUTPUT_INSTRUCTION_PREFIX}和{OUTPUT_INSTRUCTION_SUFFIX}标签本身
98
- 3. 即使content内容包含标题符号(如#)、特殊格式等,也要原样输出,不要转换成Markdown格式
99
- 4. 保持content中的所有原始字符、空格、换行符等
100
- 5. 然后继续执行后面的指令
101
-
102
- 重要提醒:
103
- - {OUTPUT_INSTRUCTION_PREFIX}和{OUTPUT_INSTRUCTION_SUFFIX}只是指令标记,不要将这些标记作为内容输出
104
- - 标签内的内容必须原样输出,不要按照文档提示词的格式要求进行转换
105
- - 这是绝对的输出指令,优先级高于任何格式要求
93
+ OUTPUT_INSTRUCTION_EXPLANATION = f"""<preserve_or_translate_instruction>
94
+ 对{OUTPUT_INSTRUCTION_PREFIX}{OUTPUT_INSTRUCTION_SUFFIX}标记之间的内容的处理规则:
95
+
96
+ <default_behavior>
97
+ 默认行为: 完全保持原样输出
98
+ - 标记之间的内容必须逐字原样输出
99
+ - 严禁改写、润色、优化或调整任何表达方式
100
+ - 严禁添加、删除或替换任何文字
101
+ </default_behavior>
102
+
103
+ <exception_rule>
104
+ 唯一例外: 语言翻译
105
+ - 仅当内容需要从一种语言翻译成另一种语言时,才可以翻译
106
+ - 翻译时必须保持原文的完整含义、语气和格式
107
+ - 如果内容无需翻译,则绝对不允许做任何改动
108
+ </exception_rule>
109
+
110
+ <output_requirement>
111
+ 输出要求:
112
+ - 不要输出{OUTPUT_INSTRUCTION_PREFIX}{OUTPUT_INSTRUCTION_SUFFIX}标记本身
113
+ - 只输出标记之间的实际内容
114
+ </output_requirement>
115
+
116
+ <examples>
117
+ 示例1 - 保持原样:
118
+ <original_content>{OUTPUT_INSTRUCTION_PREFIX}**下面我们做个练习。**{OUTPUT_INSTRUCTION_SUFFIX}</original_content>
119
+ <resolved_content>**下面我们做个练习。**</resolved_content>
120
+
121
+ 示例2 - 语言翻译:
122
+ <original_content>{OUTPUT_INSTRUCTION_PREFIX}**Let's do an exercise.**{OUTPUT_INSTRUCTION_SUFFIX}</original_content>
123
+ <resolved_content>**让我们做个练习。**</resolved_content>
124
+
125
+ 示例3 - 错误示范(同语言改写):
126
+ <original_content>{OUTPUT_INSTRUCTION_PREFIX}**下面我们做个练习。**{OUTPUT_INSTRUCTION_SUFFIX}</original_content>
127
+ <wrong_output>**来,咱们做个有趣的小练习**</wrong_output>
128
+ <reason>错误: 擅自改写了中文内容</reason>
129
+ </examples>
130
+ </preserve_or_translate_instruction>
106
131
 
107
132
  """
108
133
 
markdown_flow/core.py CHANGED
@@ -28,6 +28,7 @@ from .constants import (
28
28
  INTERACTION_PATTERN_SPLIT,
29
29
  INTERACTION_RENDER_INSTRUCTIONS,
30
30
  LLM_PROVIDER_REQUIRED_ERROR,
31
+ OUTPUT_INSTRUCTION_EXPLANATION,
31
32
  UNSUPPORTED_PROMPT_TYPE_ERROR,
32
33
  )
33
34
  from .enums import BlockType
@@ -183,8 +184,7 @@ class MarkdownFlow:
183
184
  context: list[dict[str, str]] | None = None,
184
185
  variables: dict[str, str | list[str]] | None = None,
185
186
  user_input: dict[str, list[str]] | None = None,
186
- dynamic_interaction_format: str | None = None,
187
- ) -> LLMResult | Generator[LLMResult, None, None]:
187
+ ):
188
188
  """
189
189
  Unified block processing interface.
190
190
 
@@ -194,7 +194,6 @@ class MarkdownFlow:
194
194
  context: Context message list
195
195
  variables: Variable mappings
196
196
  user_input: User input (for interaction blocks)
197
- dynamic_interaction_format: Dynamic interaction format for validation
198
197
 
199
198
  Returns:
200
199
  LLMResult or Generator[LLMResult, None, None]
@@ -206,10 +205,6 @@ class MarkdownFlow:
206
205
  block = self.get_block(block_index)
207
206
 
208
207
  if block.block_type == BlockType.CONTENT:
209
- # Check if this is dynamic interaction validation
210
- if dynamic_interaction_format and user_input:
211
- return self._process_dynamic_interaction_validation(block_index, dynamic_interaction_format, user_input, mode, context, variables)
212
- # Normal content processing (possibly with dynamic conversion)
213
208
  return self._process_content(block_index, mode, context, variables)
214
209
 
215
210
  if block.block_type == BlockType.INTERACTION:
@@ -234,37 +229,27 @@ class MarkdownFlow:
234
229
  mode: ProcessMode,
235
230
  context: list[dict[str, str]] | None,
236
231
  variables: dict[str, str | list[str]] | None,
237
- ) -> LLMResult | Generator[LLMResult, None, None]:
232
+ ):
238
233
  """Process content block."""
234
+ # Build messages
235
+ messages = self._build_content_messages(block_index, variables)
239
236
 
240
- # For PROMPT_ONLY mode, use standard content processing
241
237
  if mode == ProcessMode.PROMPT_ONLY:
242
- messages = self._build_content_messages(block_index, variables)
243
238
  return LLMResult(prompt=messages[-1]["content"], metadata={"messages": messages})
244
239
 
245
- # For COMPLETE and STREAM modes with LLM provider, use dynamic interaction check
246
- # LLM will decide whether content needs to be converted to interaction block
247
- if self._llm_provider:
248
- block = self.get_block(block_index)
249
- if block.block_type == BlockType.CONTENT:
250
- return self._process_with_dynamic_check(block_index, mode, context, variables)
251
-
252
- # Fallback: Build messages using standard content processing
253
- messages = self._build_content_messages(block_index, variables)
254
-
255
240
  if mode == ProcessMode.COMPLETE:
256
241
  if not self._llm_provider:
257
242
  raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
258
243
 
259
- result = self._llm_provider.complete(messages)
260
- return LLMResult(content=result.content, prompt=messages[-1]["content"], metadata=result.metadata)
244
+ content = self._llm_provider.complete(messages)
245
+ return LLMResult(content=content, prompt=messages[-1]["content"])
261
246
 
262
247
  if mode == ProcessMode.STREAM:
263
248
  if not self._llm_provider:
264
249
  raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
265
250
 
266
251
  def stream_generator():
267
- for chunk in self._llm_provider.stream(messages):
252
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
268
253
  yield LLMResult(content=chunk, prompt=messages[-1]["content"])
269
254
 
270
255
  return stream_generator()
@@ -281,7 +266,7 @@ class MarkdownFlow:
281
266
 
282
267
  return LLMResult(content=content)
283
268
 
284
- def _process_interaction_render(self, block_index: int, mode: ProcessMode, variables: dict[str, str | list[str]] | None = None) -> LLMResult | Generator[LLMResult, None, None]:
269
+ def _process_interaction_render(self, block_index: int, mode: ProcessMode, variables: dict[str, str | list[str]] | None = None):
285
270
  """Process interaction content rendering."""
286
271
  block = self.get_block(block_index)
287
272
 
@@ -314,8 +299,7 @@ class MarkdownFlow:
314
299
  if not self._llm_provider:
315
300
  return LLMResult(content=processed_block.content) # Fallback processing
316
301
 
317
- result = self._llm_provider.complete(messages)
318
- rendered_question = result.content
302
+ rendered_question = self._llm_provider.complete(messages)
319
303
  rendered_content = self._reconstruct_interaction_content(processed_block.content, rendered_question)
320
304
 
321
305
  return LLMResult(
@@ -343,7 +327,7 @@ class MarkdownFlow:
343
327
  # With LLM provider, collect full response then return once
344
328
  def stream_generator():
345
329
  full_response = ""
346
- for chunk in self._llm_provider.stream(messages):
330
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
347
331
  full_response += chunk
348
332
 
349
333
  # Reconstruct final interaction content
@@ -366,7 +350,6 @@ class MarkdownFlow:
366
350
  variables: dict[str, str | list[str]] | None = None,
367
351
  ) -> LLMResult | Generator[LLMResult, None, None]:
368
352
  """Process interaction user input."""
369
- _ = context # Mark as intentionally unused
370
353
  block = self.get_block(block_index)
371
354
  target_variable = block.variables[0] if block.variables else "user_input"
372
355
 
@@ -478,7 +461,7 @@ class MarkdownFlow:
478
461
  error_msg = f"Please select from: {', '.join(button_displays)}"
479
462
  return self._render_error(error_msg, mode)
480
463
 
481
- # First, check if user input matches available buttons
464
+ # Validate input values against available buttons
482
465
  valid_values = []
483
466
  invalid_values = []
484
467
 
@@ -491,30 +474,19 @@ class MarkdownFlow:
491
474
  break
492
475
 
493
476
  if not matched:
494
- invalid_values.append(value)
495
-
496
- # If there are invalid values and this interaction allows text input, use LLM validation
497
- if invalid_values and allow_text_input:
498
- # Use LLM validation for text input interactions
499
- button_displays = [btn["display"] for btn in buttons]
500
- question = parse_result.get("question", "")
501
-
502
- return self._process_llm_validation_with_options(
503
- block_index=0, # Not used in the method
504
- user_input={target_variable: target_values},
505
- target_variable=target_variable,
506
- options=button_displays,
507
- question=question,
508
- mode=mode,
509
- )
510
-
511
- # Check for validation errors in pure button mode or when text input not allowed
512
- if invalid_values:
477
+ if allow_text_input:
478
+ # Allow custom text in buttons+text mode
479
+ valid_values.append(value)
480
+ else:
481
+ invalid_values.append(value)
482
+
483
+ # Check for validation errors
484
+ if invalid_values and not allow_text_input:
513
485
  button_displays = [btn["display"] for btn in buttons]
514
486
  error_msg = f"Invalid options: {', '.join(invalid_values)}. Please select from: {', '.join(button_displays)}"
515
487
  return self._render_error(error_msg, mode)
516
488
 
517
- # Success: return validated button values
489
+ # Success: return validated values
518
490
  return LLMResult(
519
491
  content="",
520
492
  variables={target_variable: valid_values},
@@ -524,7 +496,6 @@ class MarkdownFlow:
524
496
  "valid_values": valid_values,
525
497
  "invalid_values": invalid_values,
526
498
  "total_input_count": len(target_values),
527
- "llm_validated": False,
528
499
  },
529
500
  )
530
501
 
@@ -553,8 +524,7 @@ class MarkdownFlow:
553
524
  # Fallback processing, return variables directly
554
525
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
555
526
 
556
- result = self._llm_provider.complete(messages)
557
- llm_response = result.content
527
+ llm_response = self._llm_provider.complete(messages)
558
528
 
559
529
  # Parse validation response and convert to LLMResult
560
530
  # Use joined target values for fallback; avoids JSON string injection
@@ -568,7 +538,7 @@ class MarkdownFlow:
568
538
 
569
539
  def stream_generator():
570
540
  full_response = ""
571
- for chunk in self._llm_provider.stream(messages):
541
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
572
542
  full_response += chunk
573
543
 
574
544
  # Parse complete response and convert to LLMResult
@@ -592,7 +562,6 @@ class MarkdownFlow:
592
562
  mode: ProcessMode,
593
563
  ) -> LLMResult | Generator[LLMResult, None, None]:
594
564
  """Process LLM validation with button options (third case)."""
595
- _ = block_index # Mark as intentionally unused
596
565
  # Build special validation messages containing button option information
597
566
  messages = self._build_validation_messages_with_options(user_input, target_variable, options, question)
598
567
 
@@ -612,8 +581,7 @@ class MarkdownFlow:
612
581
  # Fallback processing, return variables directly
613
582
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
614
583
 
615
- result = self._llm_provider.complete(messages)
616
- llm_response = result.content
584
+ llm_response = self._llm_provider.complete(messages)
617
585
 
618
586
  # Parse validation response and convert to LLMResult
619
587
  # Use joined target values for fallback; avoids JSON string injection
@@ -627,7 +595,7 @@ class MarkdownFlow:
627
595
 
628
596
  def stream_generator():
629
597
  full_response = ""
630
- for chunk in self._llm_provider.stream(messages):
598
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
631
599
  full_response += chunk
632
600
  # For validation scenario, don't output chunks in real-time, only final result
633
601
 
@@ -658,8 +626,7 @@ class MarkdownFlow:
658
626
  if not self._llm_provider:
659
627
  return LLMResult(content=error_message) # Fallback processing
660
628
 
661
- result = self._llm_provider.complete(messages)
662
- friendly_error = result.content
629
+ friendly_error = self._llm_provider.complete(messages)
663
630
  return LLMResult(content=friendly_error, prompt=messages[-1]["content"])
664
631
 
665
632
  if mode == ProcessMode.STREAM:
@@ -667,7 +634,7 @@ class MarkdownFlow:
667
634
  return LLMResult(content=error_message)
668
635
 
669
636
  def stream_generator():
670
- for chunk in self._llm_provider.stream(messages):
637
+ for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
671
638
  yield LLMResult(content=chunk, prompt=messages[-1]["content"])
672
639
 
673
640
  return stream_generator()
@@ -683,8 +650,9 @@ class MarkdownFlow:
683
650
  block = self.get_block(block_index)
684
651
  block_content = block.content
685
652
 
686
- # Process output instructions
687
- block_content = process_output_instructions(block_content)
653
+ # Process output instructions and detect if preserved content exists
654
+ # Returns: (processed_content, has_preserved_content)
655
+ block_content, has_preserved_content = process_output_instructions(block_content)
688
656
 
689
657
  # Replace variables
690
658
  block_content = replace_variables_in_text(block_content, variables or {})
@@ -692,9 +660,16 @@ class MarkdownFlow:
692
660
  # Build message array
693
661
  messages = []
694
662
 
695
- # Add document prompt
663
+ # Conditionally add system prompts
696
664
  if self._document_prompt:
697
- messages.append({"role": "system", "content": self._document_prompt})
665
+ system_msg = self._document_prompt
666
+ # Only add output instruction explanation when preserved content detected
667
+ if has_preserved_content:
668
+ system_msg += "\n\n" + OUTPUT_INSTRUCTION_EXPLANATION.strip()
669
+ messages.append({"role": "system", "content": system_msg})
670
+ elif has_preserved_content:
671
+ # No document prompt but has preserved content, add explanation alone
672
+ messages.append({"role": "system", "content": OUTPUT_INSTRUCTION_EXPLANATION.strip()})
698
673
 
699
674
  # For most content blocks, historical conversation context is not needed
700
675
  # because each document block is an independent instruction
@@ -827,411 +802,5 @@ Original Error: {error_message}
827
802
  if match:
828
803
  prefix = match.group(1)
829
804
  suffix = match.group(2)
830
- # Extract only the closing bracket from suffix, remove original question
831
- # suffix format is "original_question]", we only want "]"
832
- if suffix.endswith("]"):
833
- clean_suffix = "]"
834
- else:
835
- clean_suffix = suffix
836
-
837
- return f"{prefix}{cleaned_question}{clean_suffix}"
805
+ return f"{prefix}{cleaned_question}{suffix}"
838
806
  return original_content # type: ignore[unreachable]
839
-
840
- # Dynamic Interaction Methods
841
-
842
- def _process_with_dynamic_check(
843
- self,
844
- block_index: int,
845
- mode: ProcessMode,
846
- context: list[dict[str, str]] | None,
847
- variables: dict[str, str | list[str]] | None,
848
- ) -> LLMResult | Generator[LLMResult, None, None]:
849
- """Process content with dynamic interaction detection and conversion."""
850
-
851
- block = self.get_block(block_index)
852
- messages = self._build_dynamic_check_messages(block, context, variables)
853
-
854
- # Define Function Calling tools with structured approach
855
- tools = [
856
- {
857
- "type": "function",
858
- "function": {
859
- "name": "create_interaction_block",
860
- "description": "Convert content to interaction block with structured data when it needs to collect user input",
861
- "parameters": {
862
- "type": "object",
863
- "properties": {
864
- "needs_interaction": {"type": "boolean", "description": "Whether this content needs to be converted to interaction block"},
865
- "variable_name": {"type": "string", "description": "Name of the variable to collect (without {{}} brackets)"},
866
- "interaction_type": {
867
- "type": "string",
868
- "enum": ["single_select", "multi_select", "text_input", "mixed"],
869
- "description": "Type of interaction: single_select (|), multi_select (||), text_input (...), mixed (options + text)",
870
- },
871
- "options": {"type": "array", "items": {"type": "string"}, "description": "List of selectable options (3-4 specific options based on context)"},
872
- "allow_text_input": {"type": "boolean", "description": "Whether to include a text input option for 'Other' cases"},
873
- "text_input_prompt": {"type": "string", "description": "Prompt text for the text input option (e.g., '其他请输入', 'Other, please specify')"},
874
- },
875
- "required": ["needs_interaction"],
876
- },
877
- },
878
- }
879
- ]
880
-
881
- if not self._llm_provider:
882
- raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
883
-
884
- # Call LLM with tools
885
- result = self._llm_provider.complete(messages, tools)
886
-
887
- # If interaction was generated through Function Calling, construct the MarkdownFlow format
888
- if result.transformed_to_interaction and result.metadata and "tool_args" in result.metadata:
889
- tool_args = result.metadata["tool_args"]
890
- if tool_args.get("needs_interaction"):
891
- # Construct MarkdownFlow format from structured data
892
- interaction_content = self._build_interaction_format(tool_args)
893
- result.content = interaction_content
894
-
895
- # If transformed to interaction, return as is
896
- if result.transformed_to_interaction:
897
- return result
898
-
899
- # If not transformed, continue with normal processing using standard content messages
900
- normal_messages = self._build_content_messages(block_index, variables)
901
-
902
- if mode == ProcessMode.STREAM:
903
-
904
- def stream_wrapper():
905
- stream_generator = self._llm_provider.stream(normal_messages)
906
- for chunk in stream_generator:
907
- yield LLMResult(content=chunk)
908
-
909
- return stream_wrapper()
910
-
911
- # Complete mode - use normal content processing
912
- normal_result = self._llm_provider.complete(normal_messages)
913
- return LLMResult(content=normal_result.content, prompt=normal_messages[-1]["content"], metadata=normal_result.metadata)
914
-
915
- def _build_dynamic_check_messages(
916
- self,
917
- block: "Block",
918
- context: list[dict[str, str]] | None,
919
- variables: dict[str, str | list[str]] | None,
920
- ) -> list[dict[str, str]]:
921
- """Build messages for dynamic interaction detection."""
922
-
923
- import json
924
-
925
- # System prompt for detection
926
- system_prompt = """You are an intelligent document processing assistant specializing in creating interactive forms.
927
-
928
- Task: Analyze the given content block and determine if it needs to be converted to an interaction block to collect user information.
929
-
930
- **ABSOLUTE RULE**: Convert ONLY when ALL THREE mandatory elements are explicitly present:
931
- 1. Storage action word + target connector + variable
932
- 2. No exceptions, no implications, no assumptions
933
-
934
- **MANDATORY TRIPLE PATTERN (ALL REQUIRED):**
935
-
936
- **Element 1: Storage Action Words**
937
- - Chinese: "记录", "保存", "存储", "收集", "采集"
938
- - English: "save", "store", "record", "collect", "gather"
939
-
940
- **Element 2: Target Connection Words**
941
- - Chinese: "到", "为", "在", "至"
942
- - English: "to", "as", "in", "into"
943
-
944
- **Element 3: Target Variable**
945
- - Must contain {{variable_name}} syntax for NEW data storage
946
- - Variable must be for collecting NEW information, not using existing data
947
-
948
- **VALID CONVERSION FORMULA:**
949
- [Storage Word] + [Connector] + {{new_variable}}
950
-
951
- Examples of VALID patterns:
952
- - "...记录到{{姓名}}"
953
- - "...保存为{{偏好}}"
954
- - "...存储在{{选择}}"
955
- - "...save to {{preference}}"
956
- - "...collect as {{user_input}}"
957
-
958
- **STRICT EXCLUSION RULES:**
959
-
960
- ❌ NEVER convert if missing ANY element:
961
- - No storage action word = NO conversion
962
- - No target connector = NO conversion
963
- - No {{variable}} = NO conversion
964
- - Using existing {{variable}} instead of collecting new = NO conversion
965
-
966
- ❌ NEVER convert casual conversation:
967
- - Simple questions without storage intent
968
- - Introduction requests without persistence
969
- - General inquiries without data collection
970
- - Educational or exploratory content
971
-
972
- ❌ NEVER infer or assume storage intent:
973
- - Don't assume "询问姓名" means "保存姓名"
974
- - Don't assume "了解偏好" means "记录偏好"
975
- - Don't assume data collection without explicit storage words
976
-
977
- **PATTERN ANALYSIS METHOD:**
978
- 1. **Exact Pattern Match**: Search for [Storage Word] + [Connector] + {{variable}}
979
- 2. **No Pattern = No Conversion**: If exact pattern not found, return needs_interaction: false
980
- 3. **Zero Tolerance**: No partial matches, no similar meanings, no interpretations
981
-
982
- **ULTRA-CONSERVATIVE APPROACH:**
983
- - If there's ANY doubt about storage intent = DON'T convert
984
- - If storage pattern is not 100% explicit = DON'T convert
985
- - If you need to "interpret" or "infer" storage intent = DON'T convert
986
- - Prefer false negatives over false positives
987
-
988
- When exact pattern is found, generate structured interaction data. Otherwise, always return needs_interaction: false."""
989
-
990
- # User message with content and context
991
- # Build user prompt with document context
992
- user_prompt_parts = []
993
-
994
- # Add document-level prompt context if exists
995
- if self._document_prompt:
996
- user_prompt_parts.append(f"""Document-level instructions:
997
- {self._document_prompt}
998
-
999
- (Note: The above are the user's document-level instructions that provide context and requirements for processing.)
1000
- """)
1001
-
1002
- # Prepare content analysis with both original and resolved versions
1003
- original_content = block.content
1004
-
1005
- # Create resolved content with variable substitution for better context
1006
- resolved_content = original_content
1007
- if variables:
1008
- from .utils import replace_variables_in_text
1009
-
1010
- resolved_content = replace_variables_in_text(original_content, variables)
1011
-
1012
- content_analysis = f"""Current content block to analyze:
1013
-
1014
- **Original content (shows variable structure):**
1015
- {original_content}
1016
-
1017
- **Resolved content (with current variable values):**
1018
- {resolved_content}
1019
-
1020
- **Existing variable values:**
1021
- {json.dumps(variables, ensure_ascii=False) if variables else "None"}"""
1022
-
1023
- # Add different analysis based on whether content has variables
1024
- if "{{" in original_content and "}}" in original_content:
1025
- from .utils import extract_variables_from_text
1026
-
1027
- content_variables = set(extract_variables_from_text(original_content))
1028
-
1029
- # Find new variables (not yet collected)
1030
- new_variables = content_variables - (set(variables.keys()) if variables else set())
1031
- existing_used_variables = content_variables & (set(variables.keys()) if variables else set())
1032
-
1033
- content_analysis += f"""
1034
-
1035
- **Variable analysis:**
1036
- - Variables used from previous steps: {list(existing_used_variables) if existing_used_variables else "None"}
1037
- - New variables to collect: {list(new_variables) if new_variables else "None"}
1038
-
1039
- **Context guidance:**
1040
- - Use the resolved content to understand the actual context and requirements
1041
- - Generate options based on the real variable values shown in the resolved content
1042
- - Collect user input for the new variables identified above"""
1043
-
1044
- user_prompt_parts.append(content_analysis)
1045
-
1046
- # Add analysis requirements and structured output guide
1047
- user_prompt_parts.append("""## Analysis Task:
1048
- 1. Determine if this content needs to be converted to an interaction block
1049
- 2. If conversion is needed, provide structured interaction data
1050
-
1051
- ## Context-based Analysis:
1052
- - Use the "Resolved content" to understand actual context (e.g., if it shows "川菜", generate Sichuan dish options)
1053
- - Extract the "New variables to collect" identified in the variable analysis above
1054
- - Generate 3-4 specific options based on the resolved context and document-level instructions
1055
- - Follow ALL document-level instruction requirements (language, domain, terminology)
1056
-
1057
- ## Selection Type Decision Logic:
1058
- Ask: "Can a user realistically want/choose multiple of these options simultaneously?"
1059
-
1060
- **Use MULTI_SELECT when:**
1061
- - Food dishes (can order multiple: 宫保鸡丁, 麻婆豆腐)
1062
- - Programming skills (can know multiple: Python, JavaScript)
1063
- - Interests/hobbies (can have multiple: 读书, 运动, 旅游)
1064
- - Product features (can want multiple: 定制颜色, 个性化logo)
1065
- - Exercise types (can do multiple: 跑步, 游泳, 瑜伽)
1066
-
1067
- **Use SINGLE_SELECT when:**
1068
- - Job positions (usually apply for one: 软件工程师 OR 产品经理)
1069
- - Experience levels (have one current level: Beginner OR Advanced)
1070
- - Budget ranges (have one range: 5-10万 OR 10-20万)
1071
- - Education levels (have one highest: Bachelor's OR Master's)
1072
-
1073
- ## Output Instructions:
1074
- If this content needs interaction, use the create_interaction_block function with:
1075
- - `needs_interaction`: true/false
1076
- - `variable_name`: the variable to collect (from "New variables" above)
1077
- - `interaction_type`: "single_select", "multi_select", "text_input", or "mixed"
1078
- - `options`: array of 3-4 specific options based on context
1079
- - `allow_text_input`: true if you want to include "other" option
1080
- - `text_input_prompt`: text for the "other" option (in appropriate language)
1081
-
1082
- Analyze the content and provide the structured interaction data.""")
1083
-
1084
- user_prompt = "\n\n".join(user_prompt_parts)
1085
-
1086
- messages = [{"role": "system", "content": system_prompt}]
1087
-
1088
- # Add context if provided
1089
- if context:
1090
- messages.extend(context)
1091
-
1092
- messages.append({"role": "user", "content": user_prompt})
1093
-
1094
- return messages
1095
-
1096
- def _build_interaction_format(self, tool_args: dict) -> str:
1097
- """Build MarkdownFlow interaction format from structured Function Calling data."""
1098
- variable_name = tool_args.get("variable_name", "")
1099
- interaction_type = tool_args.get("interaction_type", "single_select")
1100
- options = tool_args.get("options", [])
1101
- allow_text_input = tool_args.get("allow_text_input", False)
1102
- text_input_prompt = tool_args.get("text_input_prompt", "...请输入")
1103
-
1104
- if not variable_name:
1105
- return ""
1106
-
1107
- # For text_input type, options can be empty
1108
- if interaction_type != "text_input" and not options:
1109
- return ""
1110
-
1111
- # Choose separator based on interaction type
1112
- if interaction_type in ["multi_select", "mixed"]:
1113
- separator = "||"
1114
- else:
1115
- separator = "|"
1116
-
1117
- # Build options string
1118
- if interaction_type == "text_input":
1119
- # Text input only
1120
- options_str = f"...{text_input_prompt}"
1121
- else:
1122
- # Options with potential text input
1123
- options_str = separator.join(options)
1124
-
1125
- if allow_text_input and text_input_prompt:
1126
- # Ensure text input has ... prefix
1127
- text_option = text_input_prompt if text_input_prompt.startswith("...") else f"...{text_input_prompt}"
1128
- options_str += f"{separator}{text_option}"
1129
-
1130
- return f"?[%{{{{{variable_name}}}}} {options_str}]"
1131
-
1132
- def _process_dynamic_interaction_validation(
1133
- self,
1134
- block_index: int,
1135
- interaction_format: str,
1136
- user_input: dict[str, list[str]],
1137
- mode: ProcessMode,
1138
- context: list[dict[str, str]] | None,
1139
- variables: dict[str, str | list[str]] | None,
1140
- ) -> LLMResult:
1141
- """Validate user input for dynamically generated interaction blocks using same logic as normal interactions."""
1142
- _ = block_index # Mark as intentionally unused
1143
- _ = context # Mark as intentionally unused
1144
-
1145
- from .utils import InteractionParser
1146
-
1147
- # Parse the interaction format using the same parser as normal interactions
1148
- parser = InteractionParser()
1149
- parse_result = parser.parse(interaction_format)
1150
-
1151
- if "error" in parse_result:
1152
- error_msg = f"Invalid interaction format: {parse_result['error']}"
1153
- return self._render_error(error_msg, mode)
1154
-
1155
- # Extract variable name and interaction type
1156
- variable_name = parse_result.get("variable")
1157
- interaction_type = parse_result.get("type")
1158
-
1159
- if not variable_name:
1160
- error_msg = f"No variable found in interaction format: {interaction_format}"
1161
- return self._render_error(error_msg, mode)
1162
-
1163
- # Get user input for the target variable
1164
- target_values = user_input.get(variable_name, [])
1165
-
1166
- # Basic validation - check if input is provided when required
1167
- if not target_values:
1168
- # Check if this is a text input or allows empty input
1169
- allow_text_input = interaction_type in [
1170
- InteractionType.BUTTONS_WITH_TEXT,
1171
- InteractionType.BUTTONS_MULTI_WITH_TEXT,
1172
- ]
1173
-
1174
- if allow_text_input:
1175
- # Allow empty input for buttons+text mode - merge with existing variables
1176
- merged_variables = dict(variables or {})
1177
- merged_variables[variable_name] = []
1178
- return LLMResult(
1179
- content="",
1180
- variables=merged_variables,
1181
- metadata={
1182
- "interaction_type": "dynamic_interaction",
1183
- "empty_input": True,
1184
- },
1185
- )
1186
- error_msg = f"No input provided for variable '{variable_name}'"
1187
- return self._render_error(error_msg, mode)
1188
-
1189
- # Use the same validation logic as normal interactions
1190
- if interaction_type in [
1191
- InteractionType.BUTTONS_ONLY,
1192
- InteractionType.BUTTONS_WITH_TEXT,
1193
- InteractionType.BUTTONS_MULTI_SELECT,
1194
- InteractionType.BUTTONS_MULTI_WITH_TEXT,
1195
- ]:
1196
- # Button validation - reuse the existing button validation logic
1197
- button_result = self._process_button_validation(
1198
- parse_result,
1199
- target_values,
1200
- variable_name,
1201
- mode,
1202
- interaction_type,
1203
- )
1204
-
1205
- # Merge with existing variables for dynamic interactions
1206
- if hasattr(button_result, "variables") and button_result.variables is not None and variables:
1207
- merged_variables = dict(variables)
1208
- merged_variables.update(button_result.variables)
1209
- return LLMResult(
1210
- content=button_result.content,
1211
- variables=merged_variables,
1212
- metadata=button_result.metadata,
1213
- )
1214
- return button_result
1215
-
1216
- if interaction_type == InteractionType.NON_ASSIGNMENT_BUTTON:
1217
- # Non-assignment buttons: don't set variables, keep existing ones
1218
- return LLMResult(
1219
- content="",
1220
- variables=dict(variables or {}),
1221
- metadata={
1222
- "interaction_type": "non_assignment_button",
1223
- "user_input": user_input,
1224
- },
1225
- )
1226
- # Text-only input type - merge with existing variables
1227
- merged_variables = dict(variables or {})
1228
- merged_variables[variable_name] = target_values
1229
- return LLMResult(
1230
- content="",
1231
- variables=merged_variables,
1232
- metadata={
1233
- "interaction_type": "text_only",
1234
- "target_variable": variable_name,
1235
- "values": target_values,
1236
- },
1237
- )
markdown_flow/llm.py CHANGED
@@ -5,7 +5,6 @@ Provides LLM provider interfaces and related data models, supporting multiple pr
5
5
  """
6
6
 
7
7
  from abc import ABC, abstractmethod
8
- from collections.abc import Generator
9
8
  from dataclasses import dataclass
10
9
  from enum import Enum
11
10
  from typing import Any
@@ -29,7 +28,6 @@ class LLMResult:
29
28
  prompt: str | None = None # Used prompt
30
29
  variables: dict[str, str | list[str]] | None = None # Extracted variables
31
30
  metadata: dict[str, Any] | None = None # Metadata
32
- transformed_to_interaction: bool = False # Whether content block was transformed to interaction block
33
31
 
34
32
  def __bool__(self):
35
33
  """Support boolean evaluation."""
@@ -40,23 +38,22 @@ class LLMProvider(ABC):
40
38
  """Abstract LLM provider interface."""
41
39
 
42
40
  @abstractmethod
43
- def complete(self, messages: list[dict[str, str]], tools: list[dict[str, Any]] | None = None) -> LLMResult:
41
+ def complete(self, messages: list[dict[str, str]]) -> str:
44
42
  """
45
- Non-streaming LLM call with optional function calling support.
43
+ Non-streaming LLM call.
46
44
 
47
45
  Args:
48
46
  messages: Message list in format [{"role": "system/user/assistant", "content": "..."}]
49
- tools: Optional tools/functions for LLM to call
50
47
 
51
48
  Returns:
52
- LLMResult: Structured result with content and metadata
49
+ str: LLM response content
53
50
 
54
51
  Raises:
55
52
  ValueError: When LLM call fails
56
53
  """
57
54
 
58
55
  @abstractmethod
59
- def stream(self, messages: list[dict[str, str]]) -> Generator[str, None, None]:
56
+ def stream(self, messages: list[dict[str, str]]):
60
57
  """
61
58
  Streaming LLM call.
62
59
 
@@ -74,8 +71,8 @@ class LLMProvider(ABC):
74
71
  class NoLLMProvider(LLMProvider):
75
72
  """Empty LLM provider for prompt-only scenarios."""
76
73
 
77
- def complete(self, messages: list[dict[str, str]], tools: list[dict[str, Any]] | None = None) -> LLMResult:
74
+ def complete(self, messages: list[dict[str, str]]) -> str:
78
75
  raise NotImplementedError(NO_LLM_PROVIDER_ERROR)
79
76
 
80
- def stream(self, messages: list[dict[str, str]]) -> Generator[str, None, None]:
77
+ def stream(self, messages: list[dict[str, str]]):
81
78
  raise NotImplementedError(NO_LLM_PROVIDER_ERROR)
markdown_flow/utils.py CHANGED
@@ -23,7 +23,6 @@ from .constants import (
23
23
  CONTEXT_QUESTION_MARKER,
24
24
  CONTEXT_QUESTION_TEMPLATE,
25
25
  JSON_PARSE_ERROR,
26
- OUTPUT_INSTRUCTION_EXPLANATION,
27
26
  OUTPUT_INSTRUCTION_PREFIX,
28
27
  OUTPUT_INSTRUCTION_SUFFIX,
29
28
  SMART_VALIDATION_TEMPLATE,
@@ -559,7 +558,7 @@ def parse_json_response(response_text: str) -> dict[str, Any]:
559
558
  raise ValueError(JSON_PARSE_ERROR)
560
559
 
561
560
 
562
- def process_output_instructions(content: str) -> str:
561
+ def process_output_instructions(content: str) -> tuple[str, bool]:
563
562
  """
564
563
  Process output instruction markers, converting !=== format to [output] format.
565
564
 
@@ -569,7 +568,9 @@ def process_output_instructions(content: str) -> str:
569
568
  content: Raw content containing output instructions
570
569
 
571
570
  Returns:
572
- Processed content with === and !=== markers converted to [output] format
571
+ Tuple of (processed_content, has_preserved_content):
572
+ - processed_content: Content with === and !=== markers converted to XML format
573
+ - has_preserved_content: True if content contained preserved markers
573
574
  """
574
575
  lines = content.split("\n")
575
576
  result_lines = []
@@ -650,11 +651,8 @@ def process_output_instructions(content: str) -> str:
650
651
  # Assemble final content
651
652
  processed_content = "\n".join(result_lines)
652
653
 
653
- # Add explanation prefix (if has output instructions)
654
- if has_output_instruction:
655
- processed_content = OUTPUT_INSTRUCTION_EXPLANATION + processed_content
656
-
657
- return processed_content
654
+ # Return both processed content and whether it contains preserved content
655
+ return processed_content, has_output_instruction
658
656
 
659
657
 
660
658
  def extract_preserved_content(content: str) -> str:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: markdown-flow
3
- Version: 0.2.16
3
+ Version: 0.2.18
4
4
  Summary: An agent library designed to parse and process MarkdownFlow documents
5
5
  Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
6
6
  Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
@@ -73,7 +73,7 @@ llm_provider = YourLLMProvider(api_key="your-key")
73
73
  mf = MarkdownFlow(document, llm_provider=llm_provider)
74
74
 
75
75
  # Process with different modes
76
- result = mf.process(
76
+ result = await mf.process(
77
77
  block_index=0,
78
78
  mode=ProcessMode.COMPLETE,
79
79
  variables={'name': 'Alice', 'level': 'Intermediate'}
@@ -84,7 +84,7 @@ result = mf.process(
84
84
 
85
85
  ```python
86
86
  # Stream processing for real-time responses
87
- for chunk in mf.process(
87
+ async for chunk in mf.process(
88
88
  block_index=0,
89
89
  mode=ProcessMode.STREAM,
90
90
  variables={'name': 'Bob'}
@@ -92,36 +92,6 @@ for chunk in mf.process(
92
92
  print(chunk.content, end='')
93
93
  ```
94
94
 
95
- ### Dynamic Interaction Generation ✨
96
-
97
- Transform natural language content into interactive elements automatically:
98
-
99
- ```python
100
- from markdown_flow import MarkdownFlow, ProcessMode
101
-
102
- # Dynamic interaction generation works automatically
103
- mf = MarkdownFlow(
104
- document="询问用户的菜品偏好,并记录到变量{{菜品选择}}",
105
- llm_provider=llm_provider,
106
- document_prompt="你是中餐厅服务员,提供川菜、粤菜、鲁菜等选项"
107
- )
108
-
109
- # Process with Function Calling
110
- result = mf.process(0, ProcessMode.COMPLETE)
111
-
112
- if result.transformed_to_interaction:
113
- print(f"Generated interaction: {result.content}")
114
- # Output: ?[%{{菜品选择}} 宫保鸡丁||麻婆豆腐||水煮鱼||...其他菜品]
115
-
116
- # Continue with user input
117
- user_result = mf.process(
118
- block_index=0,
119
- mode=ProcessMode.COMPLETE,
120
- user_input={"菜品选择": ["宫保鸡丁", "麻婆豆腐"]},
121
- dynamic_interaction_format=result.content
122
- )
123
- ```
124
-
125
95
  ### Interactive Elements
126
96
 
127
97
  ```python
@@ -152,66 +122,13 @@ user_input = {
152
122
  'skills': ['Python', 'JavaScript', 'Go'] # Multi-selection
153
123
  }
154
124
 
155
- result = mf.process(
125
+ result = await mf.process(
156
126
  block_index=1, # Process skills interaction
157
127
  user_input=user_input,
158
128
  mode=ProcessMode.COMPLETE
159
129
  )
160
130
  ```
161
131
 
162
- ## ✨ Key Features
163
-
164
- ### 🏗️ Three-Layer Architecture
165
-
166
- - **Document Level**: Parse `---` separators and `?[]` interaction patterns
167
- - **Block Level**: Categorize as CONTENT, INTERACTION, or PRESERVED_CONTENT
168
- - **Interaction Level**: Handle 6 different interaction types with smart validation
169
-
170
- ### 🔄 Dynamic Interaction Generation
171
-
172
- - **Natural Language Input**: Write content in plain language
173
- - **AI-Powered Conversion**: LLM automatically detects interaction needs using Function Calling
174
- - **Structured Data Generation**: LLM returns structured data, core builds MarkdownFlow format
175
- - **Language Agnostic**: Support for any language with proper document prompts
176
- - **Context Awareness**: Both original and resolved variable contexts provided to LLM
177
-
178
- ### 🤖 Unified LLM Integration
179
-
180
- - **Single Interface**: One `complete()` method for both regular and Function Calling modes
181
- - **Automatic Detection**: Tools parameter determines processing mode automatically
182
- - **Consistent Returns**: Always returns `LLMResult` with structured metadata
183
- - **Error Handling**: Automatic fallback from Function Calling to regular completion
184
- - **Provider Agnostic**: Abstract interface supports any LLM service
185
-
186
- ### 📝 Variable System
187
-
188
- - **Replaceable Variables**: `{{variable}}` for content personalization
189
- - **Preserved Variables**: `%{{variable}}` for LLM understanding in interactions
190
- - **Multi-Value Support**: Handle both single values and arrays
191
- - **Smart Extraction**: Automatic detection from document content
192
-
193
- ### 🎯 Interaction Types
194
-
195
- - **Text Input**: `?[%{{var}}...question]` - Free text entry
196
- - **Single Select**: `?[%{{var}} A|B|C]` - Choose one option
197
- - **Multi Select**: `?[%{{var}} A||B||C]` - Choose multiple options
198
- - **Mixed Mode**: `?[%{{var}} A||B||...custom]` - Predefined + custom input
199
- - **Display Buttons**: `?[Continue|Cancel]` - Action buttons without assignment
200
- - **Value Separation**: `?[%{{var}} Display//value|...]` - Different display/stored values
201
-
202
- ### 🔒 Content Preservation
203
-
204
- - **Multiline Format**: `!===content!===` blocks output exactly as written
205
- - **Inline Format**: `===content===` for single-line preserved content
206
- - **Variable Support**: Preserved content can contain variables for substitution
207
-
208
- ### ⚡ Performance Optimized
209
-
210
- - **Pre-compiled Regex**: All patterns compiled once for maximum performance
211
- - **Synchronous Interface**: Clean synchronous operations with optional streaming
212
- - **Stream Processing**: Real-time streaming responses supported
213
- - **Memory Efficient**: Lazy evaluation and generator patterns
214
-
215
132
  ## 📖 API Reference
216
133
 
217
134
  ### Core Classes
@@ -231,7 +148,7 @@ class MarkdownFlow:
231
148
  def get_all_blocks(self) -> List[Block]: ...
232
149
  def extract_variables(self) -> Set[str]: ...
233
150
 
234
- def process(
151
+ async def process(
235
152
  self,
236
153
  block_index: int,
237
154
  mode: ProcessMode = ProcessMode.COMPLETE,
@@ -276,15 +193,15 @@ class ProcessMode(Enum):
276
193
 
277
194
  ```python
278
195
  # Generate prompt only
279
- prompt_result = mf.process(0, ProcessMode.PROMPT_ONLY)
196
+ prompt_result = await mf.process(0, ProcessMode.PROMPT_ONLY)
280
197
  print(prompt_result.content) # Raw prompt text
281
198
 
282
199
  # Complete response
283
- complete_result = mf.process(0, ProcessMode.COMPLETE)
200
+ complete_result = await mf.process(0, ProcessMode.COMPLETE)
284
201
  print(complete_result.content) # Full LLM response
285
202
 
286
203
  # Streaming response
287
- for chunk in mf.process(0, ProcessMode.STREAM):
204
+ async for chunk in mf.process(0, ProcessMode.STREAM):
288
205
  print(chunk.content, end='')
289
206
  ```
290
207
 
@@ -294,14 +211,14 @@ Abstract base class for implementing LLM providers.
294
211
 
295
212
  ```python
296
213
  from abc import ABC, abstractmethod
297
- from typing import Generator
214
+ from typing import AsyncGenerator
298
215
 
299
216
  class LLMProvider(ABC):
300
217
  @abstractmethod
301
- def complete(self, prompt: str) -> LLMResult: ...
218
+ async def complete(self, prompt: str) -> LLMResult: ...
302
219
 
303
220
  @abstractmethod
304
- def stream(self, prompt: str) -> Generator[str, None, None]: ...
221
+ async def stream(self, prompt: str) -> AsyncGenerator[str, None]: ...
305
222
  ```
306
223
 
307
224
  **Custom Implementation:**
@@ -309,23 +226,23 @@ class LLMProvider(ABC):
309
226
  ```python
310
227
  class OpenAIProvider(LLMProvider):
311
228
  def __init__(self, api_key: str):
312
- self.client = openai.OpenAI(api_key=api_key)
229
+ self.client = openai.AsyncOpenAI(api_key=api_key)
313
230
 
314
- def complete(self, prompt: str) -> LLMResult:
315
- response = self.client.completions.create(
231
+ async def complete(self, prompt: str) -> LLMResult:
232
+ response = await self.client.completions.create(
316
233
  model="gpt-3.5-turbo",
317
234
  prompt=prompt,
318
235
  max_tokens=500
319
236
  )
320
237
  return LLMResult(content=response.choices[0].text.strip())
321
238
 
322
- def stream(self, prompt: str):
323
- stream = self.client.completions.create(
239
+ async def stream(self, prompt: str):
240
+ stream = await self.client.completions.create(
324
241
  model="gpt-3.5-turbo",
325
242
  prompt=prompt,
326
243
  stream=True
327
244
  )
328
- for chunk in stream:
245
+ async for chunk in stream:
329
246
  if chunk.choices[0].text:
330
247
  yield chunk.choices[0].text
331
248
  ```
@@ -485,7 +402,7 @@ The new version introduces multi-select interaction support with improvements to
485
402
  user_input = "Python"
486
403
 
487
404
  # Process interaction
488
- result = mf.process(
405
+ result = await mf.process(
489
406
  block_index=1,
490
407
  user_input=user_input,
491
408
  mode=ProcessMode.COMPLETE
@@ -502,7 +419,7 @@ user_input = {
502
419
  }
503
420
 
504
421
  # Process interaction
505
- result = mf.process(
422
+ result = await mf.process(
506
423
  block_index=1,
507
424
  user_input=user_input,
508
425
  mode=ProcessMode.COMPLETE
@@ -545,10 +462,10 @@ class CustomAPIProvider(LLMProvider):
545
462
  def __init__(self, base_url: str, api_key: str):
546
463
  self.base_url = base_url
547
464
  self.api_key = api_key
548
- self.client = httpx.Client()
465
+ self.client = httpx.AsyncClient()
549
466
 
550
- def complete(self, prompt: str) -> LLMResult:
551
- response = self.client.post(
467
+ async def complete(self, prompt: str) -> LLMResult:
468
+ response = await self.client.post(
552
469
  f"{self.base_url}/complete",
553
470
  headers={"Authorization": f"Bearer {self.api_key}"},
554
471
  json={"prompt": prompt, "max_tokens": 1000}
@@ -556,14 +473,14 @@ class CustomAPIProvider(LLMProvider):
556
473
  data = response.json()
557
474
  return LLMResult(content=data["text"])
558
475
 
559
- def stream(self, prompt: str):
560
- with self.client.stream(
476
+ async def stream(self, prompt: str):
477
+ async with self.client.stream(
561
478
  "POST",
562
479
  f"{self.base_url}/stream",
563
480
  headers={"Authorization": f"Bearer {self.api_key}"},
564
481
  json={"prompt": prompt}
565
482
  ) as response:
566
- for chunk in response.iter_text():
483
+ async for chunk in response.aiter_text():
567
484
  if chunk.strip():
568
485
  yield chunk
569
486
 
@@ -575,7 +492,7 @@ mf = MarkdownFlow(document, llm_provider=provider)
575
492
  ### Multi-Block Document Processing
576
493
 
577
494
  ```python
578
- def process_conversation():
495
+ async def process_conversation():
579
496
  conversation = """
580
497
  # AI Assistant
581
498
 
@@ -612,7 +529,7 @@ Would you like to start with the basics?
612
529
  for i, block in enumerate(blocks):
613
530
  if block.block_type == BlockType.CONTENT:
614
531
  print(f"\n--- Processing Block {i} ---")
615
- result = mf.process(
532
+ result = await mf.process(
616
533
  block_index=i,
617
534
  mode=ProcessMode.COMPLETE,
618
535
  variables=variables
@@ -627,8 +544,9 @@ Would you like to start with the basics?
627
544
 
628
545
  ```python
629
546
  from markdown_flow import MarkdownFlow, ProcessMode
547
+ import asyncio
630
548
 
631
- def stream_with_progress():
549
+ async def stream_with_progress():
632
550
  document = """
633
551
  Generate a comprehensive Python tutorial for {{user_name}}
634
552
  focusing on {{topic}} with practical examples.
@@ -642,7 +560,7 @@ Include code samples, explanations, and practice exercises.
642
560
  content = ""
643
561
  chunk_count = 0
644
562
 
645
- for chunk in mf.process(
563
+ async for chunk in mf.process(
646
564
  block_index=0,
647
565
  mode=ProcessMode.STREAM,
648
566
  variables={
@@ -681,13 +599,13 @@ class InteractiveDocumentBuilder:
681
599
  self.user_responses = {}
682
600
  self.current_block = 0
683
601
 
684
- def start_interaction(self):
602
+ async def start_interaction(self):
685
603
  blocks = self.mf.get_all_blocks()
686
604
 
687
605
  for i, block in enumerate(blocks):
688
606
  if block.block_type == BlockType.CONTENT:
689
607
  # Process content block with current variables
690
- result = self.mf.process(
608
+ result = await self.mf.process(
691
609
  block_index=i,
692
610
  mode=ProcessMode.COMPLETE,
693
611
  variables=self.user_responses
@@ -696,11 +614,11 @@ class InteractiveDocumentBuilder:
696
614
 
697
615
  elif block.block_type == BlockType.INTERACTION:
698
616
  # Handle user interaction
699
- response = self.handle_interaction(block.content)
617
+ response = await self.handle_interaction(block.content)
700
618
  if response:
701
619
  self.user_responses.update(response)
702
620
 
703
- def handle_interaction(self, interaction_content: str):
621
+ async def handle_interaction(self, interaction_content: str):
704
622
  from markdown_flow.utils import InteractionParser
705
623
 
706
624
  interaction = InteractionParser.parse(interaction_content)
@@ -717,7 +635,7 @@ class InteractiveDocumentBuilder:
717
635
  return {interaction.variable: selected}
718
636
  except (ValueError, IndexError):
719
637
  print("Invalid choice")
720
- return self.handle_interaction(interaction_content)
638
+ return await self.handle_interaction(interaction_content)
721
639
 
722
640
  elif interaction.name == "TEXT_ONLY":
723
641
  response = input(f"{interaction.question}: ")
@@ -739,7 +657,7 @@ Great choice, {{name}}! {{subject}} is an excellent field to study.
739
657
  """
740
658
 
741
659
  builder = InteractiveDocumentBuilder(template, your_llm_provider)
742
- builder.start_interaction()
660
+ await builder.start_interaction()
743
661
  ```
744
662
 
745
663
  ### Variable System Deep Dive
@@ -0,0 +1,13 @@
1
+ markdown_flow/__init__.py,sha256=5TBCmuAdWvPqKJHpP5_R2qVOGf4FFkdaL6oUazBIY7E,2851
2
+ markdown_flow/constants.py,sha256=HI061nHbGG9BeN-n9dMX17GlAT7fmYmsRZ6Cr8OSbXY,8809
3
+ markdown_flow/core.py,sha256=Z0c5SssgPhqbDhbO2HZgHAaX6RpJEccb_r9RoGHVEjI,32565
4
+ markdown_flow/enums.py,sha256=Wr41zt0Ce5b3fyLtOTE2erEVp1n92g9OVaBF_BZg_l8,820
5
+ markdown_flow/exceptions.py,sha256=9sUZ-Jd3CPLdSRqG8Pw7eMm7cv_S3VZM6jmjUU8OhIc,976
6
+ markdown_flow/llm.py,sha256=E2aq-OXwt4rS-alpf_iIJd2K38De_O3pzSZHuEaMeoE,2100
7
+ markdown_flow/models.py,sha256=ENcvXMVXwpFN-RzbeVHhXTjBN0bbmRpJ96K-XS2rizI,2893
8
+ markdown_flow/utils.py,sha256=rJOalKxCGuXYiAJzI3WfD-loLc-7BHQGpac934_uC4c,28504
9
+ markdown_flow-0.2.18.dist-info/licenses/LICENSE,sha256=qz3BziejhHPd1xa5eVtYEU5Qp6L2pn4otuj194uGxmc,1069
10
+ markdown_flow-0.2.18.dist-info/METADATA,sha256=-y3oljzO7iSaHHodM8c4id3SRMDxdP3zhSpihSUYW0I,21010
11
+ markdown_flow-0.2.18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ markdown_flow-0.2.18.dist-info/top_level.txt,sha256=DpigGvQuIt2L0TTLnDU5sylhiTGiZS7MmAMa2hi-AJs,14
13
+ markdown_flow-0.2.18.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- markdown_flow/__init__.py,sha256=IOL89FC4C8kH6qU5bjNsmuumPk3YDEHKHEjnJFFzQRU,2875
2
- markdown_flow/constants.py,sha256=Wq10-gvskBT6CpI3WiNfZpDIfl-lnAMaHVvjGvYpGG0,8006
3
- markdown_flow/core.py,sha256=INQ5vIJOFOCYHkBszqCbgn0BGVQ0WXMONtQ6AJmRXkM,51237
4
- markdown_flow/enums.py,sha256=Wr41zt0Ce5b3fyLtOTE2erEVp1n92g9OVaBF_BZg_l8,820
5
- markdown_flow/exceptions.py,sha256=9sUZ-Jd3CPLdSRqG8Pw7eMm7cv_S3VZM6jmjUU8OhIc,976
6
- markdown_flow/llm.py,sha256=MhllCwqzrN_RtIG-whfdkNk6e0WQ2H6RJVCRv3lNM_0,2531
7
- markdown_flow/models.py,sha256=ENcvXMVXwpFN-RzbeVHhXTjBN0bbmRpJ96K-XS2rizI,2893
8
- markdown_flow/utils.py,sha256=cVi0zDRK_rCMAr3EDhgITmx6Po5fSvYjqrprYaitYE0,28450
9
- markdown_flow-0.2.16.dist-info/licenses/LICENSE,sha256=qz3BziejhHPd1xa5eVtYEU5Qp6L2pn4otuj194uGxmc,1069
10
- markdown_flow-0.2.16.dist-info/METADATA,sha256=0uwa1wYOmt5MjukW6x2MTlUxYO-zn9Q4hkKu_NSiKOc,24287
11
- markdown_flow-0.2.16.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
- markdown_flow-0.2.16.dist-info/top_level.txt,sha256=DpigGvQuIt2L0TTLnDU5sylhiTGiZS7MmAMa2hi-AJs,14
13
- markdown_flow-0.2.16.dist-info/RECORD,,