markdown-flow 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of markdown-flow might be problematic. Click here for more details.

markdown_flow/__init__.py CHANGED
@@ -83,4 +83,4 @@ __all__ = [
83
83
  "replace_variables_in_text",
84
84
  ]
85
85
 
86
- __version__ = "0.2.6"
86
+ __version__ = "0.2.8"
markdown_flow/core.py CHANGED
@@ -6,7 +6,7 @@ Refactored MarkdownFlow class with built-in LLM processing capabilities and unif
6
6
 
7
7
  import json
8
8
  import re
9
- from collections.abc import AsyncGenerator
9
+ from collections.abc import Generator
10
10
  from copy import copy
11
11
  from typing import Any
12
12
 
@@ -69,7 +69,6 @@ class MarkdownFlow:
69
69
  document_prompt: str | None = None,
70
70
  interaction_prompt: str | None = None,
71
71
  interaction_error_prompt: str | None = None,
72
- enable_dynamic_interaction: bool = False,
73
72
  ):
74
73
  """
75
74
  Initialize MarkdownFlow instance.
@@ -80,14 +79,12 @@ class MarkdownFlow:
80
79
  document_prompt: Document-level system prompt
81
80
  interaction_prompt: Interaction content rendering prompt
82
81
  interaction_error_prompt: Interaction error rendering prompt
83
- enable_dynamic_interaction: Enable dynamic content to interaction conversion
84
82
  """
85
83
  self._document = document
86
84
  self._llm_provider = llm_provider
87
85
  self._document_prompt = document_prompt
88
86
  self._interaction_prompt = interaction_prompt or DEFAULT_INTERACTION_PROMPT
89
87
  self._interaction_error_prompt = interaction_error_prompt or DEFAULT_INTERACTION_ERROR_PROMPT
90
- self._enable_dynamic_interaction = enable_dynamic_interaction
91
88
  self._blocks = None
92
89
  self._interaction_configs: dict[int, InteractionValidationConfig] = {}
93
90
 
@@ -179,7 +176,7 @@ class MarkdownFlow:
179
176
 
180
177
  # Core unified interface
181
178
 
182
- async def process(
179
+ def process(
183
180
  self,
184
181
  block_index: int,
185
182
  mode: ProcessMode = ProcessMode.COMPLETE,
@@ -187,7 +184,7 @@ class MarkdownFlow:
187
184
  variables: dict[str, str | list[str]] | None = None,
188
185
  user_input: dict[str, list[str]] | None = None,
189
186
  dynamic_interaction_format: str | None = None,
190
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
187
+ ) -> LLMResult | Generator[LLMResult, None, None]:
191
188
  """
192
189
  Unified block processing interface.
193
190
 
@@ -200,7 +197,7 @@ class MarkdownFlow:
200
197
  dynamic_interaction_format: Dynamic interaction format for validation
201
198
 
202
199
  Returns:
203
- LLMResult or AsyncGenerator[LLMResult, None]
200
+ LLMResult or Generator[LLMResult, None, None]
204
201
  """
205
202
  # Process document_prompt variable replacement
206
203
  if self._document_prompt:
@@ -211,65 +208,68 @@ class MarkdownFlow:
211
208
  if block.block_type == BlockType.CONTENT:
212
209
  # Check if this is dynamic interaction validation
213
210
  if dynamic_interaction_format and user_input:
214
- return await self._process_dynamic_interaction_validation(
215
- block_index, dynamic_interaction_format, user_input, mode, context, variables
216
- )
211
+ return self._process_dynamic_interaction_validation(block_index, dynamic_interaction_format, user_input, mode, context, variables)
217
212
  # Normal content processing (possibly with dynamic conversion)
218
- return await self._process_content(block_index, mode, context, variables)
213
+ return self._process_content(block_index, mode, context, variables)
219
214
 
220
215
  if block.block_type == BlockType.INTERACTION:
221
216
  if user_input is None:
222
217
  # Render interaction content
223
- return await self._process_interaction_render(block_index, mode, variables)
218
+ return self._process_interaction_render(block_index, mode, variables)
224
219
  # Process user input
225
- return await self._process_interaction_input(block_index, user_input, mode, context, variables)
220
+ return self._process_interaction_input(block_index, user_input, mode, context, variables)
226
221
 
227
222
  if block.block_type == BlockType.PRESERVED_CONTENT:
228
223
  # Preserved content output as-is, no LLM call
229
- return await self._process_preserved_content(block_index, variables)
224
+ return self._process_preserved_content(block_index, variables)
230
225
 
231
226
  # Handle other types as content
232
- return await self._process_content(block_index, mode, context, variables)
227
+ return self._process_content(block_index, mode, context, variables)
233
228
 
234
229
  # Internal processing methods
235
230
 
236
- async def _process_content(
231
+ def _process_content(
237
232
  self,
238
233
  block_index: int,
239
234
  mode: ProcessMode,
240
235
  context: list[dict[str, str]] | None,
241
236
  variables: dict[str, str | list[str]] | None,
242
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
237
+ ) -> LLMResult | Generator[LLMResult, None, None]:
243
238
  """Process content block."""
244
239
 
245
- # Check if dynamic interaction is enabled and should be attempted
246
- if self._enable_dynamic_interaction and mode != ProcessMode.PROMPT_ONLY:
247
- return await self._process_with_dynamic_check(block_index, mode, context, variables)
248
-
249
- # Original logic: Build messages
250
- messages = self._build_content_messages(block_index, variables)
251
-
240
+ # For PROMPT_ONLY mode, use standard content processing
252
241
  if mode == ProcessMode.PROMPT_ONLY:
242
+ messages = self._build_content_messages(block_index, variables)
253
243
  return LLMResult(prompt=messages[-1]["content"], metadata={"messages": messages})
254
244
 
245
+ # For COMPLETE and STREAM modes with LLM provider, use dynamic interaction check
246
+ # LLM will decide whether content needs to be converted to interaction block
247
+ if self._llm_provider:
248
+ block = self.get_block(block_index)
249
+ if block.block_type == BlockType.CONTENT:
250
+ return self._process_with_dynamic_check(block_index, mode, context, variables)
251
+
252
+ # Fallback: Build messages using standard content processing
253
+ messages = self._build_content_messages(block_index, variables)
254
+
255
255
  if mode == ProcessMode.COMPLETE:
256
256
  if not self._llm_provider:
257
257
  raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
258
258
 
259
- result = await self._llm_provider.complete(messages)
259
+ result = self._llm_provider.complete(messages)
260
260
  return LLMResult(content=result.content, prompt=messages[-1]["content"], metadata=result.metadata)
261
261
 
262
262
  if mode == ProcessMode.STREAM:
263
263
  if not self._llm_provider:
264
264
  raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
265
265
 
266
- async def stream_generator():
267
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
266
+ def stream_generator():
267
+ for chunk in self._llm_provider.stream(messages):
268
268
  yield LLMResult(content=chunk, prompt=messages[-1]["content"])
269
269
 
270
270
  return stream_generator()
271
271
 
272
- async def _process_preserved_content(self, block_index: int, variables: dict[str, str | list[str]] | None) -> LLMResult:
272
+ def _process_preserved_content(self, block_index: int, variables: dict[str, str | list[str]] | None) -> LLMResult:
273
273
  """Process preserved content block, output as-is without LLM call."""
274
274
  block = self.get_block(block_index)
275
275
 
@@ -281,7 +281,7 @@ class MarkdownFlow:
281
281
 
282
282
  return LLMResult(content=content)
283
283
 
284
- async def _process_interaction_render(self, block_index: int, mode: ProcessMode, variables: dict[str, str | list[str]] | None = None) -> LLMResult | AsyncGenerator[LLMResult, None]:
284
+ def _process_interaction_render(self, block_index: int, mode: ProcessMode, variables: dict[str, str | list[str]] | None = None) -> LLMResult | Generator[LLMResult, None, None]:
285
285
  """Process interaction content rendering."""
286
286
  block = self.get_block(block_index)
287
287
 
@@ -314,7 +314,7 @@ class MarkdownFlow:
314
314
  if not self._llm_provider:
315
315
  return LLMResult(content=processed_block.content) # Fallback processing
316
316
 
317
- result = await self._llm_provider.complete(messages)
317
+ result = self._llm_provider.complete(messages)
318
318
  rendered_question = result.content
319
319
  rendered_content = self._reconstruct_interaction_content(processed_block.content, rendered_question)
320
320
 
@@ -332,7 +332,7 @@ class MarkdownFlow:
332
332
  # For interaction blocks, return reconstructed content (one-time output)
333
333
  rendered_content = self._reconstruct_interaction_content(processed_block.content, question_text or "")
334
334
 
335
- async def stream_generator():
335
+ def stream_generator():
336
336
  yield LLMResult(
337
337
  content=rendered_content,
338
338
  prompt=messages[-1]["content"],
@@ -341,9 +341,9 @@ class MarkdownFlow:
341
341
  return stream_generator()
342
342
 
343
343
  # With LLM provider, collect full response then return once
344
- async def stream_generator():
344
+ def stream_generator():
345
345
  full_response = ""
346
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
346
+ for chunk in self._llm_provider.stream(messages):
347
347
  full_response += chunk
348
348
 
349
349
  # Reconstruct final interaction content
@@ -357,22 +357,23 @@ class MarkdownFlow:
357
357
 
358
358
  return stream_generator()
359
359
 
360
- async def _process_interaction_input(
360
+ def _process_interaction_input(
361
361
  self,
362
362
  block_index: int,
363
363
  user_input: dict[str, list[str]],
364
364
  mode: ProcessMode,
365
365
  context: list[dict[str, str]] | None,
366
366
  variables: dict[str, str | list[str]] | None = None,
367
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
367
+ ) -> LLMResult | Generator[LLMResult, None, None]:
368
368
  """Process interaction user input."""
369
+ _ = context # Mark as intentionally unused
369
370
  block = self.get_block(block_index)
370
371
  target_variable = block.variables[0] if block.variables else "user_input"
371
372
 
372
373
  # Basic validation
373
374
  if not user_input or not any(values for values in user_input.values()):
374
375
  error_msg = INPUT_EMPTY_ERROR
375
- return await self._render_error(error_msg, mode)
376
+ return self._render_error(error_msg, mode)
376
377
 
377
378
  # Get the target variable value from user_input
378
379
  target_values = user_input.get(target_variable, [])
@@ -386,7 +387,7 @@ class MarkdownFlow:
386
387
 
387
388
  if "error" in parse_result:
388
389
  error_msg = INTERACTION_PARSE_ERROR.format(error=parse_result["error"])
389
- return await self._render_error(error_msg, mode)
390
+ return self._render_error(error_msg, mode)
390
391
 
391
392
  interaction_type = parse_result.get("type")
392
393
 
@@ -398,7 +399,7 @@ class MarkdownFlow:
398
399
  InteractionType.BUTTONS_MULTI_WITH_TEXT,
399
400
  ]:
400
401
  # All button types: validate user input against available buttons
401
- return await self._process_button_validation(
402
+ return self._process_button_validation(
402
403
  parse_result,
403
404
  target_values,
404
405
  target_variable,
@@ -431,16 +432,16 @@ class MarkdownFlow:
431
432
  },
432
433
  )
433
434
  error_msg = f"No input provided for variable '{target_variable}'"
434
- return await self._render_error(error_msg, mode)
435
+ return self._render_error(error_msg, mode)
435
436
 
436
- async def _process_button_validation(
437
+ def _process_button_validation(
437
438
  self,
438
439
  parse_result: dict[str, Any],
439
440
  target_values: list[str],
440
441
  target_variable: str,
441
442
  mode: ProcessMode,
442
443
  interaction_type: InteractionType,
443
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
444
+ ) -> LLMResult | Generator[LLMResult, None, None]:
444
445
  """
445
446
  Simplified button validation with new input format.
446
447
 
@@ -475,7 +476,7 @@ class MarkdownFlow:
475
476
  # Pure button mode requires input
476
477
  button_displays = [btn["display"] for btn in buttons]
477
478
  error_msg = f"Please select from: {', '.join(button_displays)}"
478
- return await self._render_error(error_msg, mode)
479
+ return self._render_error(error_msg, mode)
479
480
 
480
481
  # Validate input values against available buttons
481
482
  valid_values = []
@@ -500,7 +501,7 @@ class MarkdownFlow:
500
501
  if invalid_values and not allow_text_input:
501
502
  button_displays = [btn["display"] for btn in buttons]
502
503
  error_msg = f"Invalid options: {', '.join(invalid_values)}. Please select from: {', '.join(button_displays)}"
503
- return await self._render_error(error_msg, mode)
504
+ return self._render_error(error_msg, mode)
504
505
 
505
506
  # Success: return validated values
506
507
  return LLMResult(
@@ -515,13 +516,13 @@ class MarkdownFlow:
515
516
  },
516
517
  )
517
518
 
518
- async def _process_llm_validation(
519
+ def _process_llm_validation(
519
520
  self,
520
521
  block_index: int,
521
522
  user_input: dict[str, list[str]],
522
523
  target_variable: str,
523
524
  mode: ProcessMode,
524
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
525
+ ) -> LLMResult | Generator[LLMResult, None, None]:
525
526
  """Process LLM validation."""
526
527
  # Build validation messages
527
528
  messages = self._build_validation_messages(block_index, user_input, target_variable)
@@ -540,7 +541,7 @@ class MarkdownFlow:
540
541
  # Fallback processing, return variables directly
541
542
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
542
543
 
543
- result = await self._llm_provider.complete(messages)
544
+ result = self._llm_provider.complete(messages)
544
545
  llm_response = result.content
545
546
 
546
547
  # Parse validation response and convert to LLMResult
@@ -553,9 +554,9 @@ class MarkdownFlow:
553
554
  if not self._llm_provider:
554
555
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
555
556
 
556
- async def stream_generator():
557
+ def stream_generator():
557
558
  full_response = ""
558
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
559
+ for chunk in self._llm_provider.stream(messages):
559
560
  full_response += chunk
560
561
 
561
562
  # Parse complete response and convert to LLMResult
@@ -569,7 +570,7 @@ class MarkdownFlow:
569
570
 
570
571
  return stream_generator()
571
572
 
572
- async def _process_llm_validation_with_options(
573
+ def _process_llm_validation_with_options(
573
574
  self,
574
575
  block_index: int,
575
576
  user_input: dict[str, list[str]],
@@ -577,8 +578,9 @@ class MarkdownFlow:
577
578
  options: list[str],
578
579
  question: str,
579
580
  mode: ProcessMode,
580
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
581
+ ) -> LLMResult | Generator[LLMResult, None, None]:
581
582
  """Process LLM validation with button options (third case)."""
583
+ _ = block_index # Mark as intentionally unused
582
584
  # Build special validation messages containing button option information
583
585
  messages = self._build_validation_messages_with_options(user_input, target_variable, options, question)
584
586
 
@@ -598,7 +600,7 @@ class MarkdownFlow:
598
600
  # Fallback processing, return variables directly
599
601
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
600
602
 
601
- result = await self._llm_provider.complete(messages)
603
+ result = self._llm_provider.complete(messages)
602
604
  llm_response = result.content
603
605
 
604
606
  # Parse validation response and convert to LLMResult
@@ -611,9 +613,9 @@ class MarkdownFlow:
611
613
  if not self._llm_provider:
612
614
  return LLMResult(content="", variables=user_input) # type: ignore[arg-type]
613
615
 
614
- async def stream_generator():
616
+ def stream_generator():
615
617
  full_response = ""
616
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
618
+ for chunk in self._llm_provider.stream(messages):
617
619
  full_response += chunk
618
620
  # For validation scenario, don't output chunks in real-time, only final result
619
621
 
@@ -630,7 +632,7 @@ class MarkdownFlow:
630
632
 
631
633
  return stream_generator()
632
634
 
633
- async def _render_error(self, error_message: str, mode: ProcessMode) -> LLMResult | AsyncGenerator[LLMResult, None]:
635
+ def _render_error(self, error_message: str, mode: ProcessMode) -> LLMResult | Generator[LLMResult, None, None]:
634
636
  """Render user-friendly error message."""
635
637
  messages = self._build_error_render_messages(error_message)
636
638
 
@@ -644,7 +646,7 @@ class MarkdownFlow:
644
646
  if not self._llm_provider:
645
647
  return LLMResult(content=error_message) # Fallback processing
646
648
 
647
- result = await self._llm_provider.complete(messages)
649
+ result = self._llm_provider.complete(messages)
648
650
  friendly_error = result.content
649
651
  return LLMResult(content=friendly_error, prompt=messages[-1]["content"])
650
652
 
@@ -652,8 +654,8 @@ class MarkdownFlow:
652
654
  if not self._llm_provider:
653
655
  return LLMResult(content=error_message)
654
656
 
655
- async def stream_generator():
656
- async for chunk in self._llm_provider.stream(messages): # type: ignore[attr-defined]
657
+ def stream_generator():
658
+ for chunk in self._llm_provider.stream(messages):
657
659
  yield LLMResult(content=chunk, prompt=messages[-1]["content"])
658
660
 
659
661
  return stream_generator()
@@ -818,64 +820,50 @@ Original Error: {error_message}
818
820
 
819
821
  # Dynamic Interaction Methods
820
822
 
821
- async def _process_with_dynamic_check(
823
+ def _process_with_dynamic_check(
822
824
  self,
823
825
  block_index: int,
824
826
  mode: ProcessMode,
825
827
  context: list[dict[str, str]] | None,
826
828
  variables: dict[str, str | list[str]] | None,
827
- ) -> LLMResult | AsyncGenerator[LLMResult, None]:
829
+ ) -> LLMResult | Generator[LLMResult, None, None]:
828
830
  """Process content with dynamic interaction detection and conversion."""
829
831
 
830
832
  block = self.get_block(block_index)
831
833
  messages = self._build_dynamic_check_messages(block, context, variables)
832
834
 
833
835
  # Define Function Calling tools with structured approach
834
- tools = [{
835
- "type": "function",
836
- "function": {
837
- "name": "create_interaction_block",
838
- "description": "Convert content to interaction block with structured data when it needs to collect user input",
839
- "parameters": {
840
- "type": "object",
841
- "properties": {
842
- "needs_interaction": {
843
- "type": "boolean",
844
- "description": "Whether this content needs to be converted to interaction block"
845
- },
846
- "variable_name": {
847
- "type": "string",
848
- "description": "Name of the variable to collect (without {{}} brackets)"
849
- },
850
- "interaction_type": {
851
- "type": "string",
852
- "enum": ["single_select", "multi_select", "text_input", "mixed"],
853
- "description": "Type of interaction: single_select (|), multi_select (||), text_input (...), mixed (options + text)"
836
+ tools = [
837
+ {
838
+ "type": "function",
839
+ "function": {
840
+ "name": "create_interaction_block",
841
+ "description": "Convert content to interaction block with structured data when it needs to collect user input",
842
+ "parameters": {
843
+ "type": "object",
844
+ "properties": {
845
+ "needs_interaction": {"type": "boolean", "description": "Whether this content needs to be converted to interaction block"},
846
+ "variable_name": {"type": "string", "description": "Name of the variable to collect (without {{}} brackets)"},
847
+ "interaction_type": {
848
+ "type": "string",
849
+ "enum": ["single_select", "multi_select", "text_input", "mixed"],
850
+ "description": "Type of interaction: single_select (|), multi_select (||), text_input (...), mixed (options + text)",
851
+ },
852
+ "options": {"type": "array", "items": {"type": "string"}, "description": "List of selectable options (3-4 specific options based on context)"},
853
+ "allow_text_input": {"type": "boolean", "description": "Whether to include a text input option for 'Other' cases"},
854
+ "text_input_prompt": {"type": "string", "description": "Prompt text for the text input option (e.g., '其他请输入', 'Other, please specify')"},
854
855
  },
855
- "options": {
856
- "type": "array",
857
- "items": {"type": "string"},
858
- "description": "List of selectable options (3-4 specific options based on context)"
859
- },
860
- "allow_text_input": {
861
- "type": "boolean",
862
- "description": "Whether to include a text input option for 'Other' cases"
863
- },
864
- "text_input_prompt": {
865
- "type": "string",
866
- "description": "Prompt text for the text input option (e.g., '其他请输入', 'Other, please specify')"
867
- }
856
+ "required": ["needs_interaction"],
868
857
  },
869
- "required": ["needs_interaction"]
870
- }
858
+ },
871
859
  }
872
- }]
860
+ ]
873
861
 
874
862
  if not self._llm_provider:
875
863
  raise ValueError(LLM_PROVIDER_REQUIRED_ERROR)
876
864
 
877
865
  # Call LLM with tools
878
- result = await self._llm_provider.complete(messages, tools)
866
+ result = self._llm_provider.complete(messages, tools)
879
867
 
880
868
  # If interaction was generated through Function Calling, construct the MarkdownFlow format
881
869
  if result.transformed_to_interaction and result.metadata and "tool_args" in result.metadata:
@@ -889,15 +877,21 @@ Original Error: {error_message}
889
877
  if result.transformed_to_interaction:
890
878
  return result
891
879
 
892
- # If not transformed, continue with normal processing
880
+ # If not transformed, continue with normal processing using standard content messages
881
+ normal_messages = self._build_content_messages(block_index, variables)
882
+
893
883
  if mode == ProcessMode.STREAM:
894
- async def stream_wrapper():
895
- async for chunk in self._llm_provider.stream(messages):
884
+
885
+ def stream_wrapper():
886
+ stream_generator = self._llm_provider.stream(normal_messages)
887
+ for chunk in stream_generator:
896
888
  yield LLMResult(content=chunk)
889
+
897
890
  return stream_wrapper()
898
891
 
899
- # Complete mode - already handled by complete_with_tools
900
- return result
892
+ # Complete mode - use normal content processing
893
+ normal_result = self._llm_provider.complete(normal_messages)
894
+ return LLMResult(content=normal_result.content, prompt=normal_messages[-1]["content"], metadata=normal_result.metadata)
901
895
 
902
896
  def _build_dynamic_check_messages(
903
897
  self,
@@ -940,6 +934,7 @@ If conversion is needed, generate a STANDARD interaction block format with SPECI
940
934
  resolved_content = original_content
941
935
  if variables:
942
936
  from .utils import replace_variables_in_text
937
+
943
938
  resolved_content = replace_variables_in_text(original_content, variables)
944
939
 
945
940
  content_analysis = f"""Current content block to analyze:
@@ -956,6 +951,7 @@ If conversion is needed, generate a STANDARD interaction block format with SPECI
956
951
  # Add different analysis based on whether content has variables
957
952
  if "{{" in original_content and "}}" in original_content:
958
953
  from .utils import extract_variables_from_text
954
+
959
955
  content_variables = set(extract_variables_from_text(original_content))
960
956
 
961
957
  # Find new variables (not yet collected)
@@ -1015,9 +1011,7 @@ Analyze the content and provide the structured interaction data.""")
1015
1011
 
1016
1012
  user_prompt = "\n\n".join(user_prompt_parts)
1017
1013
 
1018
- messages = [
1019
- {"role": "system", "content": system_prompt}
1020
- ]
1014
+ messages = [{"role": "system", "content": system_prompt}]
1021
1015
 
1022
1016
  # Add context if provided
1023
1017
  if context:
@@ -1063,7 +1057,7 @@ Analyze the content and provide the structured interaction data.""")
1063
1057
 
1064
1058
  return f"?[%{{{{{variable_name}}}}} {options_str}]"
1065
1059
 
1066
- async def _process_dynamic_interaction_validation(
1060
+ def _process_dynamic_interaction_validation(
1067
1061
  self,
1068
1062
  block_index: int,
1069
1063
  interaction_format: str,
@@ -1072,47 +1066,102 @@ Analyze the content and provide the structured interaction data.""")
1072
1066
  context: list[dict[str, str]] | None,
1073
1067
  variables: dict[str, str | list[str]] | None,
1074
1068
  ) -> LLMResult:
1075
- """Validate user input for dynamically generated interaction blocks."""
1069
+ """Validate user input for dynamically generated interaction blocks using same logic as normal interactions."""
1070
+ _ = block_index # Mark as intentionally unused
1071
+ _ = context # Mark as intentionally unused
1076
1072
 
1077
1073
  from .utils import InteractionParser
1078
1074
 
1079
- # Parse the interaction format
1075
+ # Parse the interaction format using the same parser as normal interactions
1080
1076
  parser = InteractionParser()
1081
- interaction = parser.parse(interaction_format)
1077
+ parse_result = parser.parse(interaction_format)
1082
1078
 
1083
- if interaction is None:
1084
- raise ValueError(f"Invalid interaction format: {interaction_format}")
1079
+ if "error" in parse_result:
1080
+ error_msg = f"Invalid interaction format: {parse_result['error']}"
1081
+ return self._render_error(error_msg, mode)
1085
1082
 
1086
- # Extract variable name from the interaction format
1087
- # This is a simplified extraction - in real implementation you'd use the parser result
1088
- import re
1089
- var_match = re.search(r'%\{\{([^}]+)\}\}', interaction_format)
1090
- if not var_match:
1091
- raise ValueError(f"No variable found in interaction format: {interaction_format}")
1083
+ # Extract variable name and interaction type
1084
+ variable_name = parse_result.get("variable")
1085
+ interaction_type = parse_result.get("type")
1092
1086
 
1093
- variable_name = var_match.group(1)
1087
+ if not variable_name:
1088
+ error_msg = f"No variable found in interaction format: {interaction_format}"
1089
+ return self._render_error(error_msg, mode)
1094
1090
 
1095
- # Validate the user input
1096
- user_values = user_input.get(variable_name, [])
1097
- if not user_values:
1098
- raise ValueError(f"No input provided for variable: {variable_name}")
1091
+ # Get user input for the target variable
1092
+ target_values = user_input.get(variable_name, [])
1099
1093
 
1100
- # Process the validation result
1101
- updated_variables = dict(variables or {})
1094
+ # Basic validation - check if input is provided when required
1095
+ if not target_values:
1096
+ # Check if this is a text input or allows empty input
1097
+ allow_text_input = interaction_type in [
1098
+ InteractionType.BUTTONS_WITH_TEXT,
1099
+ InteractionType.BUTTONS_MULTI_WITH_TEXT,
1100
+ ]
1102
1101
 
1103
- # Handle single vs multiple values
1104
- if len(user_values) == 1:
1105
- updated_variables[variable_name] = user_values[0]
1106
- else:
1107
- updated_variables[variable_name] = user_values
1102
+ if allow_text_input:
1103
+ # Allow empty input for buttons+text mode - merge with existing variables
1104
+ merged_variables = dict(variables or {})
1105
+ merged_variables[variable_name] = []
1106
+ return LLMResult(
1107
+ content="",
1108
+ variables=merged_variables,
1109
+ metadata={
1110
+ "interaction_type": "dynamic_interaction",
1111
+ "empty_input": True,
1112
+ },
1113
+ )
1114
+ else:
1115
+ error_msg = f"No input provided for variable '{variable_name}'"
1116
+ return self._render_error(error_msg, mode)
1108
1117
 
1109
- # Return successful validation result
1110
- return LLMResult(
1111
- content=f"Successfully collected {variable_name}: {user_values}",
1112
- variables=updated_variables,
1113
- metadata={
1114
- "validation_success": True,
1115
- "variable_collected": variable_name,
1116
- "values_collected": user_values
1117
- }
1118
- )
1118
+ # Use the same validation logic as normal interactions
1119
+ if interaction_type in [
1120
+ InteractionType.BUTTONS_ONLY,
1121
+ InteractionType.BUTTONS_WITH_TEXT,
1122
+ InteractionType.BUTTONS_MULTI_SELECT,
1123
+ InteractionType.BUTTONS_MULTI_WITH_TEXT,
1124
+ ]:
1125
+ # Button validation - reuse the existing button validation logic
1126
+ button_result = self._process_button_validation(
1127
+ parse_result,
1128
+ target_values,
1129
+ variable_name,
1130
+ mode,
1131
+ interaction_type,
1132
+ )
1133
+
1134
+ # Merge with existing variables for dynamic interactions
1135
+ if hasattr(button_result, 'variables') and variables:
1136
+ merged_variables = dict(variables)
1137
+ merged_variables.update(button_result.variables)
1138
+ return LLMResult(
1139
+ content=button_result.content,
1140
+ variables=merged_variables,
1141
+ metadata=button_result.metadata,
1142
+ )
1143
+ return button_result
1144
+
1145
+ elif interaction_type == InteractionType.NON_ASSIGNMENT_BUTTON:
1146
+ # Non-assignment buttons: don't set variables, keep existing ones
1147
+ return LLMResult(
1148
+ content="",
1149
+ variables=dict(variables or {}),
1150
+ metadata={
1151
+ "interaction_type": "non_assignment_button",
1152
+ "user_input": user_input,
1153
+ },
1154
+ )
1155
+ else:
1156
+ # Text-only input type - merge with existing variables
1157
+ merged_variables = dict(variables or {})
1158
+ merged_variables[variable_name] = target_values
1159
+ return LLMResult(
1160
+ content="",
1161
+ variables=merged_variables,
1162
+ metadata={
1163
+ "interaction_type": "text_only",
1164
+ "target_variable": variable_name,
1165
+ "values": target_values,
1166
+ },
1167
+ )
markdown_flow/llm.py CHANGED
@@ -5,7 +5,7 @@ Provides LLM provider interfaces and related data models, supporting multiple pr
5
5
  """
6
6
 
7
7
  from abc import ABC, abstractmethod
8
- from collections.abc import AsyncGenerator
8
+ from collections.abc import AsyncGenerator, Generator
9
9
  from dataclasses import dataclass
10
10
  from enum import Enum
11
11
  from typing import Any
@@ -40,11 +40,7 @@ class LLMProvider(ABC):
40
40
  """Abstract LLM provider interface."""
41
41
 
42
42
  @abstractmethod
43
- async def complete(
44
- self,
45
- messages: list[dict[str, str]],
46
- tools: list[dict[str, Any]] | None = None
47
- ) -> LLMResult:
43
+ def complete(self, messages: list[dict[str, str]], tools: list[dict[str, Any]] | None = None) -> LLMResult:
48
44
  """
49
45
  Non-streaming LLM call with optional function calling support.
50
46
 
@@ -60,7 +56,7 @@ class LLMProvider(ABC):
60
56
  """
61
57
 
62
58
  @abstractmethod
63
- async def stream(self, messages: list[dict[str, str]]) -> AsyncGenerator[str, None]:
59
+ def stream(self, messages: list[dict[str, str]]) -> Generator[str, None, None]:
64
60
  """
65
61
  Streaming LLM call.
66
62
 
@@ -75,16 +71,11 @@ class LLMProvider(ABC):
75
71
  """
76
72
 
77
73
 
78
-
79
74
  class NoLLMProvider(LLMProvider):
80
75
  """Empty LLM provider for prompt-only scenarios."""
81
76
 
82
- async def complete(
83
- self,
84
- messages: list[dict[str, str]],
85
- tools: list[dict[str, Any]] | None = None
86
- ) -> LLMResult:
77
+ def complete(self, messages: list[dict[str, str]], tools: list[dict[str, Any]] | None = None) -> LLMResult:
87
78
  raise NotImplementedError(NO_LLM_PROVIDER_ERROR)
88
79
 
89
- async def stream(self, messages: list[dict[str, str]]) -> AsyncGenerator[str, None]:
80
+ def stream(self, messages: list[dict[str, str]]) -> Generator[str, None, None]:
90
81
  raise NotImplementedError(NO_LLM_PROVIDER_ERROR)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: markdown-flow
3
- Version: 0.2.6
3
+ Version: 0.2.8
4
4
  Summary: An agent library designed to parse and process MarkdownFlow documents
5
5
  Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
6
6
  Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
@@ -73,7 +73,7 @@ llm_provider = YourLLMProvider(api_key="your-key")
73
73
  mf = MarkdownFlow(document, llm_provider=llm_provider)
74
74
 
75
75
  # Process with different modes
76
- result = await mf.process(
76
+ result = mf.process(
77
77
  block_index=0,
78
78
  mode=ProcessMode.COMPLETE,
79
79
  variables={'name': 'Alice', 'level': 'Intermediate'}
@@ -84,7 +84,7 @@ result = await mf.process(
84
84
 
85
85
  ```python
86
86
  # Stream processing for real-time responses
87
- async for chunk in mf.process(
87
+ for chunk in mf.process(
88
88
  block_index=0,
89
89
  mode=ProcessMode.STREAM,
90
90
  variables={'name': 'Bob'}
@@ -92,6 +92,36 @@ async for chunk in mf.process(
92
92
  print(chunk.content, end='')
93
93
  ```
94
94
 
95
+ ### Dynamic Interaction Generation ✨
96
+
97
+ Transform natural language content into interactive elements automatically:
98
+
99
+ ```python
100
+ from markdown_flow import MarkdownFlow, ProcessMode
101
+
102
+ # Dynamic interaction generation works automatically
103
+ mf = MarkdownFlow(
104
+ document="询问用户的菜品偏好,并记录到变量{{菜品选择}}",
105
+ llm_provider=llm_provider,
106
+ document_prompt="你是中餐厅服务员,提供川菜、粤菜、鲁菜等选项"
107
+ )
108
+
109
+ # Process with Function Calling
110
+ result = mf.process(0, ProcessMode.COMPLETE)
111
+
112
+ if result.transformed_to_interaction:
113
+ print(f"Generated interaction: {result.content}")
114
+ # Output: ?[%{{菜品选择}} 宫保鸡丁||麻婆豆腐||水煮鱼||...其他菜品]
115
+
116
+ # Continue with user input
117
+ user_result = mf.process(
118
+ block_index=0,
119
+ mode=ProcessMode.COMPLETE,
120
+ user_input={"菜品选择": ["宫保鸡丁", "麻婆豆腐"]},
121
+ dynamic_interaction_format=result.content
122
+ )
123
+ ```
124
+
95
125
  ### Interactive Elements
96
126
 
97
127
  ```python
@@ -122,13 +152,66 @@ user_input = {
122
152
  'skills': ['Python', 'JavaScript', 'Go'] # Multi-selection
123
153
  }
124
154
 
125
- result = await mf.process(
155
+ result = mf.process(
126
156
  block_index=1, # Process skills interaction
127
157
  user_input=user_input,
128
158
  mode=ProcessMode.COMPLETE
129
159
  )
130
160
  ```
131
161
 
162
+ ## ✨ Key Features
163
+
164
+ ### 🏗️ Three-Layer Architecture
165
+
166
+ - **Document Level**: Parse `---` separators and `?[]` interaction patterns
167
+ - **Block Level**: Categorize as CONTENT, INTERACTION, or PRESERVED_CONTENT
168
+ - **Interaction Level**: Handle 6 different interaction types with smart validation
169
+
170
+ ### 🔄 Dynamic Interaction Generation
171
+
172
+ - **Natural Language Input**: Write content in plain language
173
+ - **AI-Powered Conversion**: LLM automatically detects interaction needs using Function Calling
174
+ - **Structured Data Generation**: LLM returns structured data, core builds MarkdownFlow format
175
+ - **Language Agnostic**: Support for any language with proper document prompts
176
+ - **Context Awareness**: Both original and resolved variable contexts provided to LLM
177
+
178
+ ### 🤖 Unified LLM Integration
179
+
180
+ - **Single Interface**: One `complete()` method for both regular and Function Calling modes
181
+ - **Automatic Detection**: Tools parameter determines processing mode automatically
182
+ - **Consistent Returns**: Always returns `LLMResult` with structured metadata
183
+ - **Error Handling**: Automatic fallback from Function Calling to regular completion
184
+ - **Provider Agnostic**: Abstract interface supports any LLM service
185
+
186
+ ### 📝 Variable System
187
+
188
+ - **Replaceable Variables**: `{{variable}}` for content personalization
189
+ - **Preserved Variables**: `%{{variable}}` for LLM understanding in interactions
190
+ - **Multi-Value Support**: Handle both single values and arrays
191
+ - **Smart Extraction**: Automatic detection from document content
192
+
193
+ ### 🎯 Interaction Types
194
+
195
+ - **Text Input**: `?[%{{var}}...question]` - Free text entry
196
+ - **Single Select**: `?[%{{var}} A|B|C]` - Choose one option
197
+ - **Multi Select**: `?[%{{var}} A||B||C]` - Choose multiple options
198
+ - **Mixed Mode**: `?[%{{var}} A||B||...custom]` - Predefined + custom input
199
+ - **Display Buttons**: `?[Continue|Cancel]` - Action buttons without assignment
200
+ - **Value Separation**: `?[%{{var}} Display//value|...]` - Different display/stored values
201
+
202
+ ### 🔒 Content Preservation
203
+
204
+ - **Multiline Format**: `!===content!===` blocks output exactly as written
205
+ - **Inline Format**: `===content===` for single-line preserved content
206
+ - **Variable Support**: Preserved content can contain variables for substitution
207
+
208
+ ### ⚡ Performance Optimized
209
+
210
+ - **Pre-compiled Regex**: All patterns compiled once for maximum performance
211
+ - **Synchronous Interface**: Clean synchronous operations with optional streaming
212
+ - **Stream Processing**: Real-time streaming responses supported
213
+ - **Memory Efficient**: Lazy evaluation and generator patterns
214
+
132
215
  ## 📖 API Reference
133
216
 
134
217
  ### Core Classes
@@ -148,7 +231,7 @@ class MarkdownFlow:
148
231
  def get_all_blocks(self) -> List[Block]: ...
149
232
  def extract_variables(self) -> Set[str]: ...
150
233
 
151
- async def process(
234
+ def process(
152
235
  self,
153
236
  block_index: int,
154
237
  mode: ProcessMode = ProcessMode.COMPLETE,
@@ -193,15 +276,15 @@ class ProcessMode(Enum):
193
276
 
194
277
  ```python
195
278
  # Generate prompt only
196
- prompt_result = await mf.process(0, ProcessMode.PROMPT_ONLY)
279
+ prompt_result = mf.process(0, ProcessMode.PROMPT_ONLY)
197
280
  print(prompt_result.content) # Raw prompt text
198
281
 
199
282
  # Complete response
200
- complete_result = await mf.process(0, ProcessMode.COMPLETE)
283
+ complete_result = mf.process(0, ProcessMode.COMPLETE)
201
284
  print(complete_result.content) # Full LLM response
202
285
 
203
286
  # Streaming response
204
- async for chunk in mf.process(0, ProcessMode.STREAM):
287
+ for chunk in mf.process(0, ProcessMode.STREAM):
205
288
  print(chunk.content, end='')
206
289
  ```
207
290
 
@@ -211,14 +294,14 @@ Abstract base class for implementing LLM providers.
211
294
 
212
295
  ```python
213
296
  from abc import ABC, abstractmethod
214
- from typing import AsyncGenerator
297
+ from typing import Generator
215
298
 
216
299
  class LLMProvider(ABC):
217
300
  @abstractmethod
218
- async def complete(self, prompt: str) -> LLMResult: ...
301
+ def complete(self, prompt: str) -> LLMResult: ...
219
302
 
220
303
  @abstractmethod
221
- async def stream(self, prompt: str) -> AsyncGenerator[str, None]: ...
304
+ def stream(self, prompt: str) -> Generator[str, None, None]: ...
222
305
  ```
223
306
 
224
307
  **Custom Implementation:**
@@ -226,23 +309,23 @@ class LLMProvider(ABC):
226
309
  ```python
227
310
  class OpenAIProvider(LLMProvider):
228
311
  def __init__(self, api_key: str):
229
- self.client = openai.AsyncOpenAI(api_key=api_key)
312
+ self.client = openai.OpenAI(api_key=api_key)
230
313
 
231
- async def complete(self, prompt: str) -> LLMResult:
232
- response = await self.client.completions.create(
314
+ def complete(self, prompt: str) -> LLMResult:
315
+ response = self.client.completions.create(
233
316
  model="gpt-3.5-turbo",
234
317
  prompt=prompt,
235
318
  max_tokens=500
236
319
  )
237
320
  return LLMResult(content=response.choices[0].text.strip())
238
321
 
239
- async def stream(self, prompt: str):
240
- stream = await self.client.completions.create(
322
+ def stream(self, prompt: str):
323
+ stream = self.client.completions.create(
241
324
  model="gpt-3.5-turbo",
242
325
  prompt=prompt,
243
326
  stream=True
244
327
  )
245
- async for chunk in stream:
328
+ for chunk in stream:
246
329
  if chunk.choices[0].text:
247
330
  yield chunk.choices[0].text
248
331
  ```
@@ -402,7 +485,7 @@ The new version introduces multi-select interaction support with improvements to
402
485
  user_input = "Python"
403
486
 
404
487
  # Process interaction
405
- result = await mf.process(
488
+ result = mf.process(
406
489
  block_index=1,
407
490
  user_input=user_input,
408
491
  mode=ProcessMode.COMPLETE
@@ -419,7 +502,7 @@ user_input = {
419
502
  }
420
503
 
421
504
  # Process interaction
422
- result = await mf.process(
505
+ result = mf.process(
423
506
  block_index=1,
424
507
  user_input=user_input,
425
508
  mode=ProcessMode.COMPLETE
@@ -462,10 +545,10 @@ class CustomAPIProvider(LLMProvider):
462
545
  def __init__(self, base_url: str, api_key: str):
463
546
  self.base_url = base_url
464
547
  self.api_key = api_key
465
- self.client = httpx.AsyncClient()
548
+ self.client = httpx.Client()
466
549
 
467
- async def complete(self, prompt: str) -> LLMResult:
468
- response = await self.client.post(
550
+ def complete(self, prompt: str) -> LLMResult:
551
+ response = self.client.post(
469
552
  f"{self.base_url}/complete",
470
553
  headers={"Authorization": f"Bearer {self.api_key}"},
471
554
  json={"prompt": prompt, "max_tokens": 1000}
@@ -473,14 +556,14 @@ class CustomAPIProvider(LLMProvider):
473
556
  data = response.json()
474
557
  return LLMResult(content=data["text"])
475
558
 
476
- async def stream(self, prompt: str):
477
- async with self.client.stream(
559
+ def stream(self, prompt: str):
560
+ with self.client.stream(
478
561
  "POST",
479
562
  f"{self.base_url}/stream",
480
563
  headers={"Authorization": f"Bearer {self.api_key}"},
481
564
  json={"prompt": prompt}
482
565
  ) as response:
483
- async for chunk in response.aiter_text():
566
+ for chunk in response.iter_text():
484
567
  if chunk.strip():
485
568
  yield chunk
486
569
 
@@ -492,7 +575,7 @@ mf = MarkdownFlow(document, llm_provider=provider)
492
575
  ### Multi-Block Document Processing
493
576
 
494
577
  ```python
495
- async def process_conversation():
578
+ def process_conversation():
496
579
  conversation = """
497
580
  # AI Assistant
498
581
 
@@ -529,7 +612,7 @@ Would you like to start with the basics?
529
612
  for i, block in enumerate(blocks):
530
613
  if block.block_type == BlockType.CONTENT:
531
614
  print(f"\n--- Processing Block {i} ---")
532
- result = await mf.process(
615
+ result = mf.process(
533
616
  block_index=i,
534
617
  mode=ProcessMode.COMPLETE,
535
618
  variables=variables
@@ -544,9 +627,8 @@ Would you like to start with the basics?
544
627
 
545
628
  ```python
546
629
  from markdown_flow import MarkdownFlow, ProcessMode
547
- import asyncio
548
630
 
549
- async def stream_with_progress():
631
+ def stream_with_progress():
550
632
  document = """
551
633
  Generate a comprehensive Python tutorial for {{user_name}}
552
634
  focusing on {{topic}} with practical examples.
@@ -560,7 +642,7 @@ Include code samples, explanations, and practice exercises.
560
642
  content = ""
561
643
  chunk_count = 0
562
644
 
563
- async for chunk in mf.process(
645
+ for chunk in mf.process(
564
646
  block_index=0,
565
647
  mode=ProcessMode.STREAM,
566
648
  variables={
@@ -599,13 +681,13 @@ class InteractiveDocumentBuilder:
599
681
  self.user_responses = {}
600
682
  self.current_block = 0
601
683
 
602
- async def start_interaction(self):
684
+ def start_interaction(self):
603
685
  blocks = self.mf.get_all_blocks()
604
686
 
605
687
  for i, block in enumerate(blocks):
606
688
  if block.block_type == BlockType.CONTENT:
607
689
  # Process content block with current variables
608
- result = await self.mf.process(
690
+ result = self.mf.process(
609
691
  block_index=i,
610
692
  mode=ProcessMode.COMPLETE,
611
693
  variables=self.user_responses
@@ -614,11 +696,11 @@ class InteractiveDocumentBuilder:
614
696
 
615
697
  elif block.block_type == BlockType.INTERACTION:
616
698
  # Handle user interaction
617
- response = await self.handle_interaction(block.content)
699
+ response = self.handle_interaction(block.content)
618
700
  if response:
619
701
  self.user_responses.update(response)
620
702
 
621
- async def handle_interaction(self, interaction_content: str):
703
+ def handle_interaction(self, interaction_content: str):
622
704
  from markdown_flow.utils import InteractionParser
623
705
 
624
706
  interaction = InteractionParser.parse(interaction_content)
@@ -635,7 +717,7 @@ class InteractiveDocumentBuilder:
635
717
  return {interaction.variable: selected}
636
718
  except (ValueError, IndexError):
637
719
  print("Invalid choice")
638
- return await self.handle_interaction(interaction_content)
720
+ return self.handle_interaction(interaction_content)
639
721
 
640
722
  elif interaction.name == "TEXT_ONLY":
641
723
  response = input(f"{interaction.question}: ")
@@ -657,7 +739,7 @@ Great choice, {{name}}! {{subject}} is an excellent field to study.
657
739
  """
658
740
 
659
741
  builder = InteractiveDocumentBuilder(template, your_llm_provider)
660
- await builder.start_interaction()
742
+ builder.start_interaction()
661
743
  ```
662
744
 
663
745
  ### Variable System Deep Dive
@@ -0,0 +1,13 @@
1
+ markdown_flow/__init__.py,sha256=rx_PP6l0JOJ3CriXTjlMOycNclrIyvxbgHakpx37SxM,2874
2
+ markdown_flow/constants.py,sha256=pd_KCpTEVlz_IXYekrByqb9VWCQR_XHXoGsFYdLW1Eg,8006
3
+ markdown_flow/core.py,sha256=k_6cNDp3a0PSnWF3yOWmuIpFHRp7wTsPUcGbnMpw5bg,48548
4
+ markdown_flow/enums.py,sha256=Wr41zt0Ce5b3fyLtOTE2erEVp1n92g9OVaBF_BZg_l8,820
5
+ markdown_flow/exceptions.py,sha256=9sUZ-Jd3CPLdSRqG8Pw7eMm7cv_S3VZM6jmjUU8OhIc,976
6
+ markdown_flow/llm.py,sha256=7DjOL2h2N1g0L4NF9kn0M5mR45ZL0vPsW3TzuOGy1bw,2547
7
+ markdown_flow/models.py,sha256=ENcvXMVXwpFN-RzbeVHhXTjBN0bbmRpJ96K-XS2rizI,2893
8
+ markdown_flow/utils.py,sha256=cVi0zDRK_rCMAr3EDhgITmx6Po5fSvYjqrprYaitYE0,28450
9
+ markdown_flow-0.2.8.dist-info/licenses/LICENSE,sha256=qz3BziejhHPd1xa5eVtYEU5Qp6L2pn4otuj194uGxmc,1069
10
+ markdown_flow-0.2.8.dist-info/METADATA,sha256=Yv38um7zSpnztRo60I1bVCsi3U6_XMcg-9WcVFILPg4,24286
11
+ markdown_flow-0.2.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ markdown_flow-0.2.8.dist-info/top_level.txt,sha256=DpigGvQuIt2L0TTLnDU5sylhiTGiZS7MmAMa2hi-AJs,14
13
+ markdown_flow-0.2.8.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- markdown_flow/__init__.py,sha256=BudzfDYsTYYeQnjQXA0WTe5AOHT73l8YMKqjJccL4l0,2874
2
- markdown_flow/constants.py,sha256=pd_KCpTEVlz_IXYekrByqb9VWCQR_XHXoGsFYdLW1Eg,8006
3
- markdown_flow/core.py,sha256=IctWJ8gbe90RhNOOzsWOlc7jJaYLexKtnZmbEgleqU0,46319
4
- markdown_flow/enums.py,sha256=Wr41zt0Ce5b3fyLtOTE2erEVp1n92g9OVaBF_BZg_l8,820
5
- markdown_flow/exceptions.py,sha256=9sUZ-Jd3CPLdSRqG8Pw7eMm7cv_S3VZM6jmjUU8OhIc,976
6
- markdown_flow/llm.py,sha256=7A2jy8wfVwLWieWVjV8ZTe6y0-1pYmLW0pyuenuci3s,2619
7
- markdown_flow/models.py,sha256=ENcvXMVXwpFN-RzbeVHhXTjBN0bbmRpJ96K-XS2rizI,2893
8
- markdown_flow/utils.py,sha256=cVi0zDRK_rCMAr3EDhgITmx6Po5fSvYjqrprYaitYE0,28450
9
- markdown_flow-0.2.6.dist-info/licenses/LICENSE,sha256=qz3BziejhHPd1xa5eVtYEU5Qp6L2pn4otuj194uGxmc,1069
10
- markdown_flow-0.2.6.dist-info/METADATA,sha256=gEvq6LxkTTtL-igF15HWMtaDgCu4nZNVW0IRJzxycd8,21009
11
- markdown_flow-0.2.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
- markdown_flow-0.2.6.dist-info/top_level.txt,sha256=DpigGvQuIt2L0TTLnDU5sylhiTGiZS7MmAMa2hi-AJs,14
13
- markdown_flow-0.2.6.dist-info/RECORD,,