markdown-flow 0.2.19__py3-none-any.whl → 0.2.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- markdown_flow/__init__.py +4 -4
- markdown_flow/constants.py +210 -99
- markdown_flow/core.py +605 -209
- markdown_flow/llm.py +4 -3
- markdown_flow/models.py +1 -17
- markdown_flow/parser/__init__.py +38 -0
- markdown_flow/parser/code_fence_utils.py +190 -0
- markdown_flow/parser/interaction.py +354 -0
- markdown_flow/parser/json_parser.py +50 -0
- markdown_flow/parser/output.py +215 -0
- markdown_flow/parser/preprocessor.py +151 -0
- markdown_flow/parser/validation.py +100 -0
- markdown_flow/parser/variable.py +95 -0
- markdown_flow/providers/__init__.py +16 -0
- markdown_flow/providers/config.py +46 -0
- markdown_flow/providers/openai.py +369 -0
- markdown_flow/utils.py +43 -43
- {markdown_flow-0.2.19.dist-info → markdown_flow-0.2.30.dist-info}/METADATA +45 -52
- markdown_flow-0.2.30.dist-info/RECORD +24 -0
- markdown_flow-0.2.19.dist-info/RECORD +0 -13
- {markdown_flow-0.2.19.dist-info → markdown_flow-0.2.30.dist-info}/WHEEL +0 -0
- {markdown_flow-0.2.19.dist-info → markdown_flow-0.2.30.dist-info}/licenses/LICENSE +0 -0
- {markdown_flow-0.2.19.dist-info → markdown_flow-0.2.30.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: markdown-flow
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.30
|
|
4
4
|
Summary: An agent library designed to parse and process MarkdownFlow documents
|
|
5
5
|
Project-URL: Homepage, https://github.com/ai-shifu/markdown-flow-agent-py
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/ai-shifu/markdown-flow-agent-py/issues
|
|
@@ -73,7 +73,7 @@ llm_provider = YourLLMProvider(api_key="your-key")
|
|
|
73
73
|
mf = MarkdownFlow(document, llm_provider=llm_provider)
|
|
74
74
|
|
|
75
75
|
# Process with different modes
|
|
76
|
-
result =
|
|
76
|
+
result = mf.process(
|
|
77
77
|
block_index=0,
|
|
78
78
|
mode=ProcessMode.COMPLETE,
|
|
79
79
|
variables={'name': 'Alice', 'level': 'Intermediate'}
|
|
@@ -84,7 +84,7 @@ result = await mf.process(
|
|
|
84
84
|
|
|
85
85
|
```python
|
|
86
86
|
# Stream processing for real-time responses
|
|
87
|
-
|
|
87
|
+
for chunk in mf.process(
|
|
88
88
|
block_index=0,
|
|
89
89
|
mode=ProcessMode.STREAM,
|
|
90
90
|
variables={'name': 'Bob'}
|
|
@@ -122,7 +122,7 @@ user_input = {
|
|
|
122
122
|
'skills': ['Python', 'JavaScript', 'Go'] # Multi-selection
|
|
123
123
|
}
|
|
124
124
|
|
|
125
|
-
result =
|
|
125
|
+
result = mf.process(
|
|
126
126
|
block_index=1, # Process skills interaction
|
|
127
127
|
user_input=user_input,
|
|
128
128
|
mode=ProcessMode.COMPLETE
|
|
@@ -148,13 +148,13 @@ class MarkdownFlow:
|
|
|
148
148
|
def get_all_blocks(self) -> List[Block]: ...
|
|
149
149
|
def extract_variables(self) -> Set[str]: ...
|
|
150
150
|
|
|
151
|
-
|
|
151
|
+
def process(
|
|
152
152
|
self,
|
|
153
153
|
block_index: int,
|
|
154
154
|
mode: ProcessMode = ProcessMode.COMPLETE,
|
|
155
155
|
variables: Optional[Dict[str, str]] = None,
|
|
156
156
|
user_input: Optional[str] = None
|
|
157
|
-
) -> LLMResult: ...
|
|
157
|
+
) -> LLMResult | Generator[LLMResult, None, None]: ...
|
|
158
158
|
```
|
|
159
159
|
|
|
160
160
|
**Methods:**
|
|
@@ -184,24 +184,19 @@ Processing mode enumeration for different use cases.
|
|
|
184
184
|
|
|
185
185
|
```python
|
|
186
186
|
class ProcessMode(Enum):
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
STREAM = "stream" # Streaming LLM responses
|
|
187
|
+
COMPLETE = "complete" # Non-streaming LLM processing
|
|
188
|
+
STREAM = "stream" # Streaming LLM responses
|
|
190
189
|
```
|
|
191
190
|
|
|
192
191
|
**Usage:**
|
|
193
192
|
|
|
194
193
|
```python
|
|
195
|
-
# Generate prompt only
|
|
196
|
-
prompt_result = await mf.process(0, ProcessMode.PROMPT_ONLY)
|
|
197
|
-
print(prompt_result.content) # Raw prompt text
|
|
198
|
-
|
|
199
194
|
# Complete response
|
|
200
|
-
complete_result =
|
|
195
|
+
complete_result = mf.process(0, ProcessMode.COMPLETE)
|
|
201
196
|
print(complete_result.content) # Full LLM response
|
|
202
197
|
|
|
203
198
|
# Streaming response
|
|
204
|
-
|
|
199
|
+
for chunk in mf.process(0, ProcessMode.STREAM):
|
|
205
200
|
print(chunk.content, end='')
|
|
206
201
|
```
|
|
207
202
|
|
|
@@ -211,14 +206,14 @@ Abstract base class for implementing LLM providers.
|
|
|
211
206
|
|
|
212
207
|
```python
|
|
213
208
|
from abc import ABC, abstractmethod
|
|
214
|
-
from typing import
|
|
209
|
+
from typing import Generator
|
|
215
210
|
|
|
216
211
|
class LLMProvider(ABC):
|
|
217
212
|
@abstractmethod
|
|
218
|
-
|
|
213
|
+
def complete(self, messages: list[dict[str, str]]) -> str: ...
|
|
219
214
|
|
|
220
215
|
@abstractmethod
|
|
221
|
-
|
|
216
|
+
def stream(self, messages: list[dict[str, str]]) -> Generator[str, None, None]: ...
|
|
222
217
|
```
|
|
223
218
|
|
|
224
219
|
**Custom Implementation:**
|
|
@@ -226,25 +221,24 @@ class LLMProvider(ABC):
|
|
|
226
221
|
```python
|
|
227
222
|
class OpenAIProvider(LLMProvider):
|
|
228
223
|
def __init__(self, api_key: str):
|
|
229
|
-
self.client = openai.
|
|
224
|
+
self.client = openai.OpenAI(api_key=api_key)
|
|
230
225
|
|
|
231
|
-
|
|
232
|
-
response =
|
|
226
|
+
def complete(self, messages: list[dict[str, str]]) -> str:
|
|
227
|
+
response = self.client.chat.completions.create(
|
|
233
228
|
model="gpt-3.5-turbo",
|
|
234
|
-
|
|
235
|
-
max_tokens=500
|
|
229
|
+
messages=messages
|
|
236
230
|
)
|
|
237
|
-
return
|
|
231
|
+
return response.choices[0].message.content
|
|
238
232
|
|
|
239
|
-
|
|
240
|
-
stream =
|
|
233
|
+
def stream(self, messages: list[dict[str, str]]):
|
|
234
|
+
stream = self.client.chat.completions.create(
|
|
241
235
|
model="gpt-3.5-turbo",
|
|
242
|
-
|
|
236
|
+
messages=messages,
|
|
243
237
|
stream=True
|
|
244
238
|
)
|
|
245
|
-
|
|
246
|
-
if chunk.choices[0].
|
|
247
|
-
yield chunk.choices[0].
|
|
239
|
+
for chunk in stream:
|
|
240
|
+
if chunk.choices[0].delta.content:
|
|
241
|
+
yield chunk.choices[0].delta.content
|
|
248
242
|
```
|
|
249
243
|
|
|
250
244
|
### Block Types
|
|
@@ -402,7 +396,7 @@ The new version introduces multi-select interaction support with improvements to
|
|
|
402
396
|
user_input = "Python"
|
|
403
397
|
|
|
404
398
|
# Process interaction
|
|
405
|
-
result =
|
|
399
|
+
result = mf.process(
|
|
406
400
|
block_index=1,
|
|
407
401
|
user_input=user_input,
|
|
408
402
|
mode=ProcessMode.COMPLETE
|
|
@@ -419,7 +413,7 @@ user_input = {
|
|
|
419
413
|
}
|
|
420
414
|
|
|
421
415
|
# Process interaction
|
|
422
|
-
result =
|
|
416
|
+
result = mf.process(
|
|
423
417
|
block_index=1,
|
|
424
418
|
user_input=user_input,
|
|
425
419
|
mode=ProcessMode.COMPLETE
|
|
@@ -462,10 +456,10 @@ class CustomAPIProvider(LLMProvider):
|
|
|
462
456
|
def __init__(self, base_url: str, api_key: str):
|
|
463
457
|
self.base_url = base_url
|
|
464
458
|
self.api_key = api_key
|
|
465
|
-
self.client = httpx.
|
|
459
|
+
self.client = httpx.Client()
|
|
466
460
|
|
|
467
|
-
|
|
468
|
-
response =
|
|
461
|
+
def complete(self, prompt: str) -> LLMResult:
|
|
462
|
+
response = self.client.post(
|
|
469
463
|
f"{self.base_url}/complete",
|
|
470
464
|
headers={"Authorization": f"Bearer {self.api_key}"},
|
|
471
465
|
json={"prompt": prompt, "max_tokens": 1000}
|
|
@@ -473,14 +467,14 @@ class CustomAPIProvider(LLMProvider):
|
|
|
473
467
|
data = response.json()
|
|
474
468
|
return LLMResult(content=data["text"])
|
|
475
469
|
|
|
476
|
-
|
|
477
|
-
|
|
470
|
+
def stream(self, prompt: str):
|
|
471
|
+
with self.client.stream(
|
|
478
472
|
"POST",
|
|
479
473
|
f"{self.base_url}/stream",
|
|
480
474
|
headers={"Authorization": f"Bearer {self.api_key}"},
|
|
481
475
|
json={"prompt": prompt}
|
|
482
476
|
) as response:
|
|
483
|
-
|
|
477
|
+
for chunk in response.iter_text():
|
|
484
478
|
if chunk.strip():
|
|
485
479
|
yield chunk
|
|
486
480
|
|
|
@@ -492,7 +486,7 @@ mf = MarkdownFlow(document, llm_provider=provider)
|
|
|
492
486
|
### Multi-Block Document Processing
|
|
493
487
|
|
|
494
488
|
```python
|
|
495
|
-
|
|
489
|
+
def process_conversation():
|
|
496
490
|
conversation = """
|
|
497
491
|
# AI Assistant
|
|
498
492
|
|
|
@@ -529,7 +523,7 @@ Would you like to start with the basics?
|
|
|
529
523
|
for i, block in enumerate(blocks):
|
|
530
524
|
if block.block_type == BlockType.CONTENT:
|
|
531
525
|
print(f"\n--- Processing Block {i} ---")
|
|
532
|
-
result =
|
|
526
|
+
result = mf.process(
|
|
533
527
|
block_index=i,
|
|
534
528
|
mode=ProcessMode.COMPLETE,
|
|
535
529
|
variables=variables
|
|
@@ -544,9 +538,8 @@ Would you like to start with the basics?
|
|
|
544
538
|
|
|
545
539
|
```python
|
|
546
540
|
from markdown_flow import MarkdownFlow, ProcessMode
|
|
547
|
-
import asyncio
|
|
548
541
|
|
|
549
|
-
|
|
542
|
+
def stream_with_progress():
|
|
550
543
|
document = """
|
|
551
544
|
Generate a comprehensive Python tutorial for {{user_name}}
|
|
552
545
|
focusing on {{topic}} with practical examples.
|
|
@@ -560,12 +553,12 @@ Include code samples, explanations, and practice exercises.
|
|
|
560
553
|
content = ""
|
|
561
554
|
chunk_count = 0
|
|
562
555
|
|
|
563
|
-
|
|
556
|
+
for chunk in mf.process(
|
|
564
557
|
block_index=0,
|
|
565
558
|
mode=ProcessMode.STREAM,
|
|
566
559
|
variables={
|
|
567
560
|
'user_name': 'developer',
|
|
568
|
-
'topic': '
|
|
561
|
+
'topic': 'synchronous programming'
|
|
569
562
|
}
|
|
570
563
|
):
|
|
571
564
|
content += chunk.content
|
|
@@ -599,13 +592,13 @@ class InteractiveDocumentBuilder:
|
|
|
599
592
|
self.user_responses = {}
|
|
600
593
|
self.current_block = 0
|
|
601
594
|
|
|
602
|
-
|
|
595
|
+
def start_interaction(self):
|
|
603
596
|
blocks = self.mf.get_all_blocks()
|
|
604
597
|
|
|
605
598
|
for i, block in enumerate(blocks):
|
|
606
599
|
if block.block_type == BlockType.CONTENT:
|
|
607
600
|
# Process content block with current variables
|
|
608
|
-
result =
|
|
601
|
+
result = self.mf.process(
|
|
609
602
|
block_index=i,
|
|
610
603
|
mode=ProcessMode.COMPLETE,
|
|
611
604
|
variables=self.user_responses
|
|
@@ -614,14 +607,14 @@ class InteractiveDocumentBuilder:
|
|
|
614
607
|
|
|
615
608
|
elif block.block_type == BlockType.INTERACTION:
|
|
616
609
|
# Handle user interaction
|
|
617
|
-
response =
|
|
610
|
+
response = self.handle_interaction(block.content)
|
|
618
611
|
if response:
|
|
619
612
|
self.user_responses.update(response)
|
|
620
613
|
|
|
621
|
-
|
|
622
|
-
from markdown_flow.
|
|
614
|
+
def handle_interaction(self, interaction_content: str):
|
|
615
|
+
from markdown_flow.parser import InteractionParser
|
|
623
616
|
|
|
624
|
-
interaction = InteractionParser.parse(interaction_content)
|
|
617
|
+
interaction = InteractionParser().parse(interaction_content)
|
|
625
618
|
print(f"\n{interaction_content}")
|
|
626
619
|
|
|
627
620
|
if interaction.name == "BUTTONS_ONLY":
|
|
@@ -635,7 +628,7 @@ class InteractiveDocumentBuilder:
|
|
|
635
628
|
return {interaction.variable: selected}
|
|
636
629
|
except (ValueError, IndexError):
|
|
637
630
|
print("Invalid choice")
|
|
638
|
-
return
|
|
631
|
+
return self.handle_interaction(interaction_content)
|
|
639
632
|
|
|
640
633
|
elif interaction.name == "TEXT_ONLY":
|
|
641
634
|
response = input(f"{interaction.question}: ")
|
|
@@ -657,7 +650,7 @@ Great choice, {{name}}! {{subject}} is an excellent field to study.
|
|
|
657
650
|
"""
|
|
658
651
|
|
|
659
652
|
builder = InteractiveDocumentBuilder(template, your_llm_provider)
|
|
660
|
-
|
|
653
|
+
builder.start_interaction()
|
|
661
654
|
```
|
|
662
655
|
|
|
663
656
|
### Variable System Deep Dive
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
markdown_flow/__init__.py,sha256=E-P0SvBLJKQSwj2ZijjjXsDFl9axsrX8oTfTl7YBO7w,2808
|
|
2
|
+
markdown_flow/constants.py,sha256=aroEBhrOGY6JxofRxPFe87vesEJFY1Srm0_jRHVdtig,14274
|
|
3
|
+
markdown_flow/core.py,sha256=bwUdblJPPWqV_Utwn3ijmjpauI_FXcZUlVG30BIa7nw,47831
|
|
4
|
+
markdown_flow/enums.py,sha256=Wr41zt0Ce5b3fyLtOTE2erEVp1n92g9OVaBF_BZg_l8,820
|
|
5
|
+
markdown_flow/exceptions.py,sha256=9sUZ-Jd3CPLdSRqG8Pw7eMm7cv_S3VZM6jmjUU8OhIc,976
|
|
6
|
+
markdown_flow/llm.py,sha256=MJRbXKj35AjLCAhWpFhS07s-m3YU2qO1HOFff05HG2I,2239
|
|
7
|
+
markdown_flow/models.py,sha256=EJtZ-EQffKHmNbKJXVsHxGXHV0ywOgdAzopWDSjyVmM,2417
|
|
8
|
+
markdown_flow/utils.py,sha256=TlQan3rNIcbIzgOa-kpphFKpw9IXblFKhIesac_lu3Y,28769
|
|
9
|
+
markdown_flow/parser/__init__.py,sha256=zhLc8m7OvkdKk7K70Db9u3EgTGGcPG1_Cxj5EC2Fnwo,1144
|
|
10
|
+
markdown_flow/parser/code_fence_utils.py,sha256=DdkZDTXSCNMfDfODYVOopWd4-5Enci5siplt8JTFs1g,5074
|
|
11
|
+
markdown_flow/parser/interaction.py,sha256=T4W7iO-iyNJnpM7SmvOH_DRlLuWSDcFyIrN2fH6cv7w,12653
|
|
12
|
+
markdown_flow/parser/json_parser.py,sha256=78GhyyOjlg0l4UmKKNc4zrg-4pSHzrJEt7VKqbz3uyE,1305
|
|
13
|
+
markdown_flow/parser/output.py,sha256=LgxvH6-RINM50p58miQtw_fHER1JEWDGucHk5-sZ-gk,8087
|
|
14
|
+
markdown_flow/parser/preprocessor.py,sha256=YO2znQo7biYAxZZIO5oyrH4h88LZPIe3SidX7ZEOS88,4877
|
|
15
|
+
markdown_flow/parser/validation.py,sha256=fc5-zL4_vsgFQuQ0BHXlJRH5Vkx102SKJy-H72tpLK8,3647
|
|
16
|
+
markdown_flow/parser/variable.py,sha256=eJLbVOyZT8uYM5eJNv5kHLqdRoNz5iNlxHhhi2oDW94,2986
|
|
17
|
+
markdown_flow/providers/__init__.py,sha256=QMr8H9gxoLr6pWXoAb11oZX_She6KWPxnRips537nQ4,319
|
|
18
|
+
markdown_flow/providers/config.py,sha256=Y4Nihqj3KxI6_RyvVKF_mv4mBoPNXeLgYQgv0FqxQfU,2057
|
|
19
|
+
markdown_flow/providers/openai.py,sha256=KgExRJ8QsCeU_c-Yx3IhxG2hBbYN5uZ-uf0VTMvD1LE,12326
|
|
20
|
+
markdown_flow-0.2.30.dist-info/licenses/LICENSE,sha256=qz3BziejhHPd1xa5eVtYEU5Qp6L2pn4otuj194uGxmc,1069
|
|
21
|
+
markdown_flow-0.2.30.dist-info/METADATA,sha256=LEehzrEIw6Q_TyvGFuAG-m0fPEKgO-QcaLgD8LcvyYM,20686
|
|
22
|
+
markdown_flow-0.2.30.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
23
|
+
markdown_flow-0.2.30.dist-info/top_level.txt,sha256=DpigGvQuIt2L0TTLnDU5sylhiTGiZS7MmAMa2hi-AJs,14
|
|
24
|
+
markdown_flow-0.2.30.dist-info/RECORD,,
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
markdown_flow/__init__.py,sha256=CM2h28s8bHo4_Xl-9Y17QVBRZmcL-naRN2wpBcBGqU4,2851
|
|
2
|
-
markdown_flow/constants.py,sha256=TEbO67-y711KGkog6UZEm4mssFA52kIWhECcbtbkvkA,10196
|
|
3
|
-
markdown_flow/core.py,sha256=Z0c5SssgPhqbDhbO2HZgHAaX6RpJEccb_r9RoGHVEjI,32565
|
|
4
|
-
markdown_flow/enums.py,sha256=Wr41zt0Ce5b3fyLtOTE2erEVp1n92g9OVaBF_BZg_l8,820
|
|
5
|
-
markdown_flow/exceptions.py,sha256=9sUZ-Jd3CPLdSRqG8Pw7eMm7cv_S3VZM6jmjUU8OhIc,976
|
|
6
|
-
markdown_flow/llm.py,sha256=E2aq-OXwt4rS-alpf_iIJd2K38De_O3pzSZHuEaMeoE,2100
|
|
7
|
-
markdown_flow/models.py,sha256=ENcvXMVXwpFN-RzbeVHhXTjBN0bbmRpJ96K-XS2rizI,2893
|
|
8
|
-
markdown_flow/utils.py,sha256=rJOalKxCGuXYiAJzI3WfD-loLc-7BHQGpac934_uC4c,28504
|
|
9
|
-
markdown_flow-0.2.19.dist-info/licenses/LICENSE,sha256=qz3BziejhHPd1xa5eVtYEU5Qp6L2pn4otuj194uGxmc,1069
|
|
10
|
-
markdown_flow-0.2.19.dist-info/METADATA,sha256=HzxjL7F5cuTQbnUjVGYoBOG_qQTYXdHAKxLjQ6j8BNU,21010
|
|
11
|
-
markdown_flow-0.2.19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
12
|
-
markdown_flow-0.2.19.dist-info/top_level.txt,sha256=DpigGvQuIt2L0TTLnDU5sylhiTGiZS7MmAMa2hi-AJs,14
|
|
13
|
-
markdown_flow-0.2.19.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|