markdown-flow 0.2.18__py3-none-any.whl → 0.2.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of markdown-flow might be problematic. Click here for more details.
- markdown_flow/__init__.py +3 -4
- markdown_flow/constants.py +47 -40
- markdown_flow/core.py +340 -94
- markdown_flow/llm.py +4 -3
- markdown_flow/models.py +1 -1
- markdown_flow/parser/__init__.py +34 -0
- markdown_flow/parser/interaction.py +354 -0
- markdown_flow/parser/json_parser.py +50 -0
- markdown_flow/parser/output.py +215 -0
- markdown_flow/parser/validation.py +121 -0
- markdown_flow/parser/variable.py +95 -0
- markdown_flow/providers/__init__.py +15 -0
- markdown_flow/providers/config.py +51 -0
- markdown_flow/providers/openai.py +371 -0
- markdown_flow/utils.py +43 -43
- {markdown_flow-0.2.18.dist-info → markdown_flow-0.2.26.dist-info}/METADATA +45 -52
- markdown_flow-0.2.26.dist-info/RECORD +22 -0
- markdown_flow-0.2.18.dist-info/RECORD +0 -13
- {markdown_flow-0.2.18.dist-info → markdown_flow-0.2.26.dist-info}/WHEEL +0 -0
- {markdown_flow-0.2.18.dist-info → markdown_flow-0.2.26.dist-info}/licenses/LICENSE +0 -0
- {markdown_flow-0.2.18.dist-info → markdown_flow-0.2.26.dist-info}/top_level.txt +0 -0
markdown_flow/utils.py
CHANGED
|
@@ -19,6 +19,7 @@ from .constants import (
|
|
|
19
19
|
COMPILED_PERCENT_VARIABLE_REGEX,
|
|
20
20
|
COMPILED_PRESERVE_FENCE_REGEX,
|
|
21
21
|
COMPILED_SINGLE_PIPE_SPLIT_REGEX,
|
|
22
|
+
CONTEXT_BUTTON_OPTIONS_TEMPLATE,
|
|
22
23
|
CONTEXT_CONVERSATION_TEMPLATE,
|
|
23
24
|
CONTEXT_QUESTION_MARKER,
|
|
24
25
|
CONTEXT_QUESTION_TEMPLATE,
|
|
@@ -67,7 +68,7 @@ def is_preserved_content_block(content: str) -> bool:
|
|
|
67
68
|
Check if content is completely preserved content block.
|
|
68
69
|
|
|
69
70
|
Preserved blocks are entirely wrapped by markers with no external content.
|
|
70
|
-
Supports inline (===content===)
|
|
71
|
+
Supports inline (===content===), multiline (!=== ... !===) formats, and mixed formats.
|
|
71
72
|
|
|
72
73
|
Args:
|
|
73
74
|
content: Content to check
|
|
@@ -81,61 +82,50 @@ def is_preserved_content_block(content: str) -> bool:
|
|
|
81
82
|
|
|
82
83
|
lines = content.split("\n")
|
|
83
84
|
|
|
84
|
-
#
|
|
85
|
-
all_inline_format = True
|
|
86
|
-
has_any_content = False
|
|
87
|
-
|
|
88
|
-
for line in lines:
|
|
89
|
-
stripped_line = line.strip()
|
|
90
|
-
if stripped_line: # Non-empty line
|
|
91
|
-
has_any_content = True
|
|
92
|
-
# Check if inline format: ===content===
|
|
93
|
-
match = COMPILED_INLINE_PRESERVE_REGEX.match(stripped_line)
|
|
94
|
-
if match:
|
|
95
|
-
# Ensure inner content exists and contains no ===
|
|
96
|
-
inner_content = match.group(1).strip()
|
|
97
|
-
if not inner_content or "===" in inner_content:
|
|
98
|
-
all_inline_format = False
|
|
99
|
-
break
|
|
100
|
-
else:
|
|
101
|
-
all_inline_format = False # type: ignore[unreachable]
|
|
102
|
-
break
|
|
103
|
-
|
|
104
|
-
# If all lines are inline format, return directly
|
|
105
|
-
if has_any_content and all_inline_format:
|
|
106
|
-
return True
|
|
107
|
-
|
|
108
|
-
# Check multiline format using state machine
|
|
85
|
+
# Use state machine to validate that all non-empty content is preserved
|
|
109
86
|
state = "OUTSIDE" # States: OUTSIDE, INSIDE
|
|
110
|
-
|
|
111
|
-
has_preserve_blocks = False # Has preserve blocks
|
|
87
|
+
has_preserve_content = False
|
|
112
88
|
|
|
113
89
|
for line in lines:
|
|
114
90
|
stripped_line = line.strip()
|
|
115
91
|
|
|
92
|
+
# Check if this line is a fence marker (!===)
|
|
116
93
|
if COMPILED_PRESERVE_FENCE_REGEX.match(stripped_line):
|
|
117
94
|
if state == "OUTSIDE":
|
|
118
95
|
# Enter preserve block
|
|
119
96
|
state = "INSIDE"
|
|
120
|
-
|
|
97
|
+
has_preserve_content = True
|
|
121
98
|
elif state == "INSIDE":
|
|
122
99
|
# Exit preserve block
|
|
123
100
|
state = "OUTSIDE"
|
|
124
|
-
#
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
101
|
+
# Fence markers themselves are valid preserved content
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
# Non-fence lines
|
|
105
|
+
if stripped_line: # Non-empty line
|
|
106
|
+
if state == "INSIDE":
|
|
107
|
+
# Inside fence block, this is valid preserved content
|
|
108
|
+
has_preserve_content = True
|
|
109
|
+
else:
|
|
110
|
+
# Outside fence block, check if it's inline format
|
|
111
|
+
match = COMPILED_INLINE_PRESERVE_REGEX.match(stripped_line)
|
|
112
|
+
if match:
|
|
113
|
+
# Ensure inner content exists and contains no ===
|
|
114
|
+
inner_content = match.group(1).strip()
|
|
115
|
+
if inner_content and "===" not in inner_content:
|
|
116
|
+
# Valid inline format
|
|
117
|
+
has_preserve_content = True
|
|
118
|
+
else:
|
|
119
|
+
# Invalid inline format
|
|
120
|
+
return False
|
|
121
|
+
else:
|
|
122
|
+
# Not fence, not inline format -> external content
|
|
123
|
+
return False
|
|
133
124
|
|
|
134
125
|
# Judgment conditions:
|
|
135
|
-
# 1. Must have
|
|
136
|
-
# 2.
|
|
137
|
-
|
|
138
|
-
return has_preserve_blocks and not has_content_outside and state == "OUTSIDE"
|
|
126
|
+
# 1. Must have preserved content
|
|
127
|
+
# 2. Final state must be OUTSIDE (all fence blocks closed)
|
|
128
|
+
return has_preserve_content and state == "OUTSIDE"
|
|
139
129
|
|
|
140
130
|
|
|
141
131
|
def extract_interaction_question(content: str) -> str | None:
|
|
@@ -479,6 +469,7 @@ def generate_smart_validation_template(
|
|
|
479
469
|
target_variable: str,
|
|
480
470
|
context: list[dict[str, Any]] | None = None,
|
|
481
471
|
interaction_question: str | None = None,
|
|
472
|
+
buttons: list[dict[str, str]] | None = None,
|
|
482
473
|
) -> str:
|
|
483
474
|
"""
|
|
484
475
|
Generate smart validation template based on context and question.
|
|
@@ -487,19 +478,28 @@ def generate_smart_validation_template(
|
|
|
487
478
|
target_variable: Target variable name
|
|
488
479
|
context: Context message list with role and content fields
|
|
489
480
|
interaction_question: Question text from interaction block
|
|
481
|
+
buttons: Button options list with display and value fields
|
|
490
482
|
|
|
491
483
|
Returns:
|
|
492
484
|
Generated validation template
|
|
493
485
|
"""
|
|
494
486
|
# Build context information
|
|
495
487
|
context_info = ""
|
|
496
|
-
if interaction_question or context:
|
|
488
|
+
if interaction_question or context or buttons:
|
|
497
489
|
context_parts = []
|
|
498
490
|
|
|
499
491
|
# Add question information (most important, put first)
|
|
500
492
|
if interaction_question:
|
|
501
493
|
context_parts.append(CONTEXT_QUESTION_TEMPLATE.format(question=interaction_question))
|
|
502
494
|
|
|
495
|
+
# Add button options information
|
|
496
|
+
if buttons:
|
|
497
|
+
button_displays = [btn.get("display", "") for btn in buttons if btn.get("display")]
|
|
498
|
+
if button_displays:
|
|
499
|
+
button_options_str = ", ".join(button_displays)
|
|
500
|
+
button_info = CONTEXT_BUTTON_OPTIONS_TEMPLATE.format(button_options=button_options_str)
|
|
501
|
+
context_parts.append(button_info)
|
|
502
|
+
|
|
503
503
|
# Add conversation context
|
|
504
504
|
if context:
|
|
505
505
|
for msg in context:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: markdown-flow
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.26
|
|
4
4
|
Summary: An agent library designed to parse and process MarkdownFlow documents
|
|
5
5
|
Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
|
|
@@ -73,7 +73,7 @@ llm_provider = YourLLMProvider(api_key="your-key")
|
|
|
73
73
|
mf = MarkdownFlow(document, llm_provider=llm_provider)
|
|
74
74
|
|
|
75
75
|
# Process with different modes
|
|
76
|
-
result =
|
|
76
|
+
result = mf.process(
|
|
77
77
|
block_index=0,
|
|
78
78
|
mode=ProcessMode.COMPLETE,
|
|
79
79
|
variables={'name': 'Alice', 'level': 'Intermediate'}
|
|
@@ -84,7 +84,7 @@ result = await mf.process(
|
|
|
84
84
|
|
|
85
85
|
```python
|
|
86
86
|
# Stream processing for real-time responses
|
|
87
|
-
|
|
87
|
+
for chunk in mf.process(
|
|
88
88
|
block_index=0,
|
|
89
89
|
mode=ProcessMode.STREAM,
|
|
90
90
|
variables={'name': 'Bob'}
|
|
@@ -122,7 +122,7 @@ user_input = {
|
|
|
122
122
|
'skills': ['Python', 'JavaScript', 'Go'] # Multi-selection
|
|
123
123
|
}
|
|
124
124
|
|
|
125
|
-
result =
|
|
125
|
+
result = mf.process(
|
|
126
126
|
block_index=1, # Process skills interaction
|
|
127
127
|
user_input=user_input,
|
|
128
128
|
mode=ProcessMode.COMPLETE
|
|
@@ -148,13 +148,13 @@ class MarkdownFlow:
|
|
|
148
148
|
def get_all_blocks(self) -> List[Block]: ...
|
|
149
149
|
def extract_variables(self) -> Set[str]: ...
|
|
150
150
|
|
|
151
|
-
|
|
151
|
+
def process(
|
|
152
152
|
self,
|
|
153
153
|
block_index: int,
|
|
154
154
|
mode: ProcessMode = ProcessMode.COMPLETE,
|
|
155
155
|
variables: Optional[Dict[str, str]] = None,
|
|
156
156
|
user_input: Optional[str] = None
|
|
157
|
-
) -> LLMResult: ...
|
|
157
|
+
) -> LLMResult | Generator[LLMResult, None, None]: ...
|
|
158
158
|
```
|
|
159
159
|
|
|
160
160
|
**Methods:**
|
|
@@ -184,24 +184,19 @@ Processing mode enumeration for different use cases.
|
|
|
184
184
|
|
|
185
185
|
```python
|
|
186
186
|
class ProcessMode(Enum):
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
STREAM = "stream" # Streaming LLM responses
|
|
187
|
+
COMPLETE = "complete" # Non-streaming LLM processing
|
|
188
|
+
STREAM = "stream" # Streaming LLM responses
|
|
190
189
|
```
|
|
191
190
|
|
|
192
191
|
**Usage:**
|
|
193
192
|
|
|
194
193
|
```python
|
|
195
|
-
# Generate prompt only
|
|
196
|
-
prompt_result = await mf.process(0, ProcessMode.PROMPT_ONLY)
|
|
197
|
-
print(prompt_result.content) # Raw prompt text
|
|
198
|
-
|
|
199
194
|
# Complete response
|
|
200
|
-
complete_result =
|
|
195
|
+
complete_result = mf.process(0, ProcessMode.COMPLETE)
|
|
201
196
|
print(complete_result.content) # Full LLM response
|
|
202
197
|
|
|
203
198
|
# Streaming response
|
|
204
|
-
|
|
199
|
+
for chunk in mf.process(0, ProcessMode.STREAM):
|
|
205
200
|
print(chunk.content, end='')
|
|
206
201
|
```
|
|
207
202
|
|
|
@@ -211,14 +206,14 @@ Abstract base class for implementing LLM providers.
|
|
|
211
206
|
|
|
212
207
|
```python
|
|
213
208
|
from abc import ABC, abstractmethod
|
|
214
|
-
from typing import
|
|
209
|
+
from typing import Generator
|
|
215
210
|
|
|
216
211
|
class LLMProvider(ABC):
|
|
217
212
|
@abstractmethod
|
|
218
|
-
|
|
213
|
+
def complete(self, messages: list[dict[str, str]]) -> str: ...
|
|
219
214
|
|
|
220
215
|
@abstractmethod
|
|
221
|
-
|
|
216
|
+
def stream(self, messages: list[dict[str, str]]) -> Generator[str, None, None]: ...
|
|
222
217
|
```
|
|
223
218
|
|
|
224
219
|
**Custom Implementation:**
|
|
@@ -226,25 +221,24 @@ class LLMProvider(ABC):
|
|
|
226
221
|
```python
|
|
227
222
|
class OpenAIProvider(LLMProvider):
|
|
228
223
|
def __init__(self, api_key: str):
|
|
229
|
-
self.client = openai.
|
|
224
|
+
self.client = openai.OpenAI(api_key=api_key)
|
|
230
225
|
|
|
231
|
-
|
|
232
|
-
response =
|
|
226
|
+
def complete(self, messages: list[dict[str, str]]) -> str:
|
|
227
|
+
response = self.client.chat.completions.create(
|
|
233
228
|
model="gpt-3.5-turbo",
|
|
234
|
-
|
|
235
|
-
max_tokens=500
|
|
229
|
+
messages=messages
|
|
236
230
|
)
|
|
237
|
-
return
|
|
231
|
+
return response.choices[0].message.content
|
|
238
232
|
|
|
239
|
-
|
|
240
|
-
stream =
|
|
233
|
+
def stream(self, messages: list[dict[str, str]]):
|
|
234
|
+
stream = self.client.chat.completions.create(
|
|
241
235
|
model="gpt-3.5-turbo",
|
|
242
|
-
|
|
236
|
+
messages=messages,
|
|
243
237
|
stream=True
|
|
244
238
|
)
|
|
245
|
-
|
|
246
|
-
if chunk.choices[0].
|
|
247
|
-
yield chunk.choices[0].
|
|
239
|
+
for chunk in stream:
|
|
240
|
+
if chunk.choices[0].delta.content:
|
|
241
|
+
yield chunk.choices[0].delta.content
|
|
248
242
|
```
|
|
249
243
|
|
|
250
244
|
### Block Types
|
|
@@ -402,7 +396,7 @@ The new version introduces multi-select interaction support with improvements to
|
|
|
402
396
|
user_input = "Python"
|
|
403
397
|
|
|
404
398
|
# Process interaction
|
|
405
|
-
result =
|
|
399
|
+
result = mf.process(
|
|
406
400
|
block_index=1,
|
|
407
401
|
user_input=user_input,
|
|
408
402
|
mode=ProcessMode.COMPLETE
|
|
@@ -419,7 +413,7 @@ user_input = {
|
|
|
419
413
|
}
|
|
420
414
|
|
|
421
415
|
# Process interaction
|
|
422
|
-
result =
|
|
416
|
+
result = mf.process(
|
|
423
417
|
block_index=1,
|
|
424
418
|
user_input=user_input,
|
|
425
419
|
mode=ProcessMode.COMPLETE
|
|
@@ -462,10 +456,10 @@ class CustomAPIProvider(LLMProvider):
|
|
|
462
456
|
def __init__(self, base_url: str, api_key: str):
|
|
463
457
|
self.base_url = base_url
|
|
464
458
|
self.api_key = api_key
|
|
465
|
-
self.client = httpx.
|
|
459
|
+
self.client = httpx.Client()
|
|
466
460
|
|
|
467
|
-
|
|
468
|
-
response =
|
|
461
|
+
def complete(self, prompt: str) -> LLMResult:
|
|
462
|
+
response = self.client.post(
|
|
469
463
|
f"{self.base_url}/complete",
|
|
470
464
|
headers={"Authorization": f"Bearer {self.api_key}"},
|
|
471
465
|
json={"prompt": prompt, "max_tokens": 1000}
|
|
@@ -473,14 +467,14 @@ class CustomAPIProvider(LLMProvider):
|
|
|
473
467
|
data = response.json()
|
|
474
468
|
return LLMResult(content=data["text"])
|
|
475
469
|
|
|
476
|
-
|
|
477
|
-
|
|
470
|
+
def stream(self, prompt: str):
|
|
471
|
+
with self.client.stream(
|
|
478
472
|
"POST",
|
|
479
473
|
f"{self.base_url}/stream",
|
|
480
474
|
headers={"Authorization": f"Bearer {self.api_key}"},
|
|
481
475
|
json={"prompt": prompt}
|
|
482
476
|
) as response:
|
|
483
|
-
|
|
477
|
+
for chunk in response.iter_text():
|
|
484
478
|
if chunk.strip():
|
|
485
479
|
yield chunk
|
|
486
480
|
|
|
@@ -492,7 +486,7 @@ mf = MarkdownFlow(document, llm_provider=provider)
|
|
|
492
486
|
### Multi-Block Document Processing
|
|
493
487
|
|
|
494
488
|
```python
|
|
495
|
-
|
|
489
|
+
def process_conversation():
|
|
496
490
|
conversation = """
|
|
497
491
|
# AI Assistant
|
|
498
492
|
|
|
@@ -529,7 +523,7 @@ Would you like to start with the basics?
|
|
|
529
523
|
for i, block in enumerate(blocks):
|
|
530
524
|
if block.block_type == BlockType.CONTENT:
|
|
531
525
|
print(f"\n--- Processing Block {i} ---")
|
|
532
|
-
result =
|
|
526
|
+
result = mf.process(
|
|
533
527
|
block_index=i,
|
|
534
528
|
mode=ProcessMode.COMPLETE,
|
|
535
529
|
variables=variables
|
|
@@ -544,9 +538,8 @@ Would you like to start with the basics?
|
|
|
544
538
|
|
|
545
539
|
```python
|
|
546
540
|
from markdown_flow import MarkdownFlow, ProcessMode
|
|
547
|
-
import asyncio
|
|
548
541
|
|
|
549
|
-
|
|
542
|
+
def stream_with_progress():
|
|
550
543
|
document = """
|
|
551
544
|
Generate a comprehensive Python tutorial for {{user_name}}
|
|
552
545
|
focusing on {{topic}} with practical examples.
|
|
@@ -560,12 +553,12 @@ Include code samples, explanations, and practice exercises.
|
|
|
560
553
|
content = ""
|
|
561
554
|
chunk_count = 0
|
|
562
555
|
|
|
563
|
-
|
|
556
|
+
for chunk in mf.process(
|
|
564
557
|
block_index=0,
|
|
565
558
|
mode=ProcessMode.STREAM,
|
|
566
559
|
variables={
|
|
567
560
|
'user_name': 'developer',
|
|
568
|
-
'topic': '
|
|
561
|
+
'topic': 'synchronous programming'
|
|
569
562
|
}
|
|
570
563
|
):
|
|
571
564
|
content += chunk.content
|
|
@@ -599,13 +592,13 @@ class InteractiveDocumentBuilder:
|
|
|
599
592
|
self.user_responses = {}
|
|
600
593
|
self.current_block = 0
|
|
601
594
|
|
|
602
|
-
|
|
595
|
+
def start_interaction(self):
|
|
603
596
|
blocks = self.mf.get_all_blocks()
|
|
604
597
|
|
|
605
598
|
for i, block in enumerate(blocks):
|
|
606
599
|
if block.block_type == BlockType.CONTENT:
|
|
607
600
|
# Process content block with current variables
|
|
608
|
-
result =
|
|
601
|
+
result = self.mf.process(
|
|
609
602
|
block_index=i,
|
|
610
603
|
mode=ProcessMode.COMPLETE,
|
|
611
604
|
variables=self.user_responses
|
|
@@ -614,14 +607,14 @@ class InteractiveDocumentBuilder:
|
|
|
614
607
|
|
|
615
608
|
elif block.block_type == BlockType.INTERACTION:
|
|
616
609
|
# Handle user interaction
|
|
617
|
-
response =
|
|
610
|
+
response = self.handle_interaction(block.content)
|
|
618
611
|
if response:
|
|
619
612
|
self.user_responses.update(response)
|
|
620
613
|
|
|
621
|
-
|
|
622
|
-
from markdown_flow.
|
|
614
|
+
def handle_interaction(self, interaction_content: str):
|
|
615
|
+
from markdown_flow.parser import InteractionParser
|
|
623
616
|
|
|
624
|
-
interaction = InteractionParser.parse(interaction_content)
|
|
617
|
+
interaction = InteractionParser().parse(interaction_content)
|
|
625
618
|
print(f"\n{interaction_content}")
|
|
626
619
|
|
|
627
620
|
if interaction.name == "BUTTONS_ONLY":
|
|
@@ -635,7 +628,7 @@ class InteractiveDocumentBuilder:
|
|
|
635
628
|
return {interaction.variable: selected}
|
|
636
629
|
except (ValueError, IndexError):
|
|
637
630
|
print("Invalid choice")
|
|
638
|
-
return
|
|
631
|
+
return self.handle_interaction(interaction_content)
|
|
639
632
|
|
|
640
633
|
elif interaction.name == "TEXT_ONLY":
|
|
641
634
|
response = input(f"{interaction.question}: ")
|
|
@@ -657,7 +650,7 @@ Great choice, {{name}}! {{subject}} is an excellent field to study.
|
|
|
657
650
|
"""
|
|
658
651
|
|
|
659
652
|
builder = InteractiveDocumentBuilder(template, your_llm_provider)
|
|
660
|
-
|
|
653
|
+
builder.start_interaction()
|
|
661
654
|
```
|
|
662
655
|
|
|
663
656
|
### Variable System Deep Dive
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
markdown_flow/__init__.py,sha256=wBE8Q9hyL-5sA6OWPuucQDbJAhKZ2XNlPl9xu-71v4k,2775
|
|
2
|
+
markdown_flow/constants.py,sha256=zHXE8E7E4Axf75xZdCdjkLRPfzXDZ8s9D9PxizncrVA,9653
|
|
3
|
+
markdown_flow/core.py,sha256=g2-KbKnD1kPywgNLe1BR81dgZBn1wYJuDxk2WSuhO9Y,42476
|
|
4
|
+
markdown_flow/enums.py,sha256=Wr41zt0Ce5b3fyLtOTE2erEVp1n92g9OVaBF_BZg_l8,820
|
|
5
|
+
markdown_flow/exceptions.py,sha256=9sUZ-Jd3CPLdSRqG8Pw7eMm7cv_S3VZM6jmjUU8OhIc,976
|
|
6
|
+
markdown_flow/llm.py,sha256=MJRbXKj35AjLCAhWpFhS07s-m3YU2qO1HOFff05HG2I,2239
|
|
7
|
+
markdown_flow/models.py,sha256=8uG4mmyTd53UPZNIoBLy03iwcPPrikuXLY4CKuR_oCM,2894
|
|
8
|
+
markdown_flow/utils.py,sha256=TlQan3rNIcbIzgOa-kpphFKpw9IXblFKhIesac_lu3Y,28769
|
|
9
|
+
markdown_flow/parser/__init__.py,sha256=_mua4f2utEvoB6aGk2ZYhbKCJQIEv4_Nnf7Zkyrx8aM,1035
|
|
10
|
+
markdown_flow/parser/interaction.py,sha256=T4W7iO-iyNJnpM7SmvOH_DRlLuWSDcFyIrN2fH6cv7w,12653
|
|
11
|
+
markdown_flow/parser/json_parser.py,sha256=78GhyyOjlg0l4UmKKNc4zrg-4pSHzrJEt7VKqbz3uyE,1305
|
|
12
|
+
markdown_flow/parser/output.py,sha256=LgxvH6-RINM50p58miQtw_fHER1JEWDGucHk5-sZ-gk,8087
|
|
13
|
+
markdown_flow/parser/validation.py,sha256=dkk8MopUdj1Yj6YIcRqRzcgXxUeM679hBAY2pnOC2DU,4417
|
|
14
|
+
markdown_flow/parser/variable.py,sha256=eJLbVOyZT8uYM5eJNv5kHLqdRoNz5iNlxHhhi2oDW94,2986
|
|
15
|
+
markdown_flow/providers/__init__.py,sha256=3dJWYdzb2W8nwOeQIJ-RWO79JOlHaAB9B0dXn1aP4eQ,318
|
|
16
|
+
markdown_flow/providers/config.py,sha256=KJu7uChBcEUZAOjo1GsXB2Cq9DDIaltbiVCOyAw4BDQ,2120
|
|
17
|
+
markdown_flow/providers/openai.py,sha256=NySSSCHloYmkKXwppk3eeBOTh9dVeHu9z9Ne3VaUU0A,12425
|
|
18
|
+
markdown_flow-0.2.26.dist-info/licenses/LICENSE,sha256=qz3BziejhHPd1xa5eVtYEU5Qp6L2pn4otuj194uGxmc,1069
|
|
19
|
+
markdown_flow-0.2.26.dist-info/METADATA,sha256=AxVoOBt1d32QUf-DHbUkjMHDqpYkTg0LqOfec1_9Ea0,20686
|
|
20
|
+
markdown_flow-0.2.26.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
21
|
+
markdown_flow-0.2.26.dist-info/top_level.txt,sha256=DpigGvQuIt2L0TTLnDU5sylhiTGiZS7MmAMa2hi-AJs,14
|
|
22
|
+
markdown_flow-0.2.26.dist-info/RECORD,,
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
markdown_flow/__init__.py,sha256=5TBCmuAdWvPqKJHpP5_R2qVOGf4FFkdaL6oUazBIY7E,2851
|
|
2
|
-
markdown_flow/constants.py,sha256=HI061nHbGG9BeN-n9dMX17GlAT7fmYmsRZ6Cr8OSbXY,8809
|
|
3
|
-
markdown_flow/core.py,sha256=Z0c5SssgPhqbDhbO2HZgHAaX6RpJEccb_r9RoGHVEjI,32565
|
|
4
|
-
markdown_flow/enums.py,sha256=Wr41zt0Ce5b3fyLtOTE2erEVp1n92g9OVaBF_BZg_l8,820
|
|
5
|
-
markdown_flow/exceptions.py,sha256=9sUZ-Jd3CPLdSRqG8Pw7eMm7cv_S3VZM6jmjUU8OhIc,976
|
|
6
|
-
markdown_flow/llm.py,sha256=E2aq-OXwt4rS-alpf_iIJd2K38De_O3pzSZHuEaMeoE,2100
|
|
7
|
-
markdown_flow/models.py,sha256=ENcvXMVXwpFN-RzbeVHhXTjBN0bbmRpJ96K-XS2rizI,2893
|
|
8
|
-
markdown_flow/utils.py,sha256=rJOalKxCGuXYiAJzI3WfD-loLc-7BHQGpac934_uC4c,28504
|
|
9
|
-
markdown_flow-0.2.18.dist-info/licenses/LICENSE,sha256=qz3BziejhHPd1xa5eVtYEU5Qp6L2pn4otuj194uGxmc,1069
|
|
10
|
-
markdown_flow-0.2.18.dist-info/METADATA,sha256=-y3oljzO7iSaHHodM8c4id3SRMDxdP3zhSpihSUYW0I,21010
|
|
11
|
-
markdown_flow-0.2.18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
12
|
-
markdown_flow-0.2.18.dist-info/top_level.txt,sha256=DpigGvQuIt2L0TTLnDU5sylhiTGiZS7MmAMa2hi-AJs,14
|
|
13
|
-
markdown_flow-0.2.18.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|