cowork-dash 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cowork_dash/agent.py +7 -4
- cowork_dash/app.py +729 -121
- cowork_dash/assets/app.js +12 -1
- cowork_dash/assets/favicon.ico +0 -0
- cowork_dash/assets/styles.css +144 -2
- cowork_dash/canvas.py +194 -80
- cowork_dash/cli.py +7 -0
- cowork_dash/components.py +194 -104
- cowork_dash/config.py +13 -0
- cowork_dash/file_utils.py +17 -6
- cowork_dash/layout.py +117 -23
- cowork_dash/tools.py +88 -11
- {cowork_dash-0.1.4.dist-info → cowork_dash-0.1.6.dist-info}/METADATA +31 -40
- cowork_dash-0.1.6.dist-info/RECORD +20 -0
- cowork_dash-0.1.4.dist-info/RECORD +0 -19
- {cowork_dash-0.1.4.dist-info → cowork_dash-0.1.6.dist-info}/WHEEL +0 -0
- {cowork_dash-0.1.4.dist-info → cowork_dash-0.1.6.dist-info}/entry_points.txt +0 -0
- {cowork_dash-0.1.4.dist-info → cowork_dash-0.1.6.dist-info}/licenses/LICENSE +0 -0
cowork_dash/assets/app.js
CHANGED
|
@@ -8,6 +8,11 @@
|
|
|
8
8
|
securityLevel: 'loose',
|
|
9
9
|
logLevel: 'error'
|
|
10
10
|
});
|
|
11
|
+
// Clear processed flag so diagrams re-render with new theme
|
|
12
|
+
const mermaidDivs = document.querySelectorAll('.mermaid-diagram');
|
|
13
|
+
mermaidDivs.forEach(function(div) {
|
|
14
|
+
div.removeAttribute('data-processed');
|
|
15
|
+
});
|
|
11
16
|
// Re-render any existing mermaid diagrams
|
|
12
17
|
renderMermaid();
|
|
13
18
|
}
|
|
@@ -57,7 +62,13 @@ async function renderMermaid() {
|
|
|
57
62
|
|
|
58
63
|
for (const div of mermaidDivs) {
|
|
59
64
|
if (!div.getAttribute('data-processed')) {
|
|
60
|
-
|
|
65
|
+
// Get code from stored attribute (for re-renders) or from textContent (first render)
|
|
66
|
+
let code = div.getAttribute('data-mermaid-code');
|
|
67
|
+
if (!code) {
|
|
68
|
+
code = div.textContent.trim();
|
|
69
|
+
// Store original code for future re-renders (theme changes)
|
|
70
|
+
div.setAttribute('data-mermaid-code', code);
|
|
71
|
+
}
|
|
61
72
|
div.setAttribute('data-processed', 'true');
|
|
62
73
|
|
|
63
74
|
try {
|
|
Binary file
|
cowork_dash/assets/styles.css
CHANGED
|
@@ -118,6 +118,41 @@ details summary:hover {
|
|
|
118
118
|
padding: 0;
|
|
119
119
|
}
|
|
120
120
|
|
|
121
|
+
/* Breadcrumb navigation styles */
|
|
122
|
+
.breadcrumb-bar {
|
|
123
|
+
background: var(--mantine-color-gray-0);
|
|
124
|
+
}
|
|
125
|
+
.breadcrumb-item {
|
|
126
|
+
transition: background 0.15s ease;
|
|
127
|
+
}
|
|
128
|
+
.breadcrumb-clickable:hover {
|
|
129
|
+
background: var(--mantine-color-blue-light) !important;
|
|
130
|
+
}
|
|
131
|
+
.breadcrumb-separator {
|
|
132
|
+
user-select: none;
|
|
133
|
+
}
|
|
134
|
+
[data-mantine-color-scheme="dark"] .breadcrumb-bar {
|
|
135
|
+
background: var(--mantine-color-dark-6) !important;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/* Folder selection styles */
|
|
139
|
+
.folder-select-target {
|
|
140
|
+
transition: background 0.15s ease;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
.folder-expand-toggle {
|
|
144
|
+
opacity: 0.6;
|
|
145
|
+
transition: opacity 0.15s ease;
|
|
146
|
+
}
|
|
147
|
+
.folder-expand-toggle:hover {
|
|
148
|
+
opacity: 1;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
/* Dark mode - breadcrumb */
|
|
152
|
+
[data-mantine-color-scheme="dark"] .breadcrumb-clickable:hover {
|
|
153
|
+
background: var(--mantine-color-dark-5) !important;
|
|
154
|
+
}
|
|
155
|
+
|
|
121
156
|
/* Dark mode - input/summary/chat */
|
|
122
157
|
[data-mantine-color-scheme="dark"] .file-item:hover { background: var(--mantine-color-dark-5) !important; }
|
|
123
158
|
[data-mantine-color-scheme="dark"] .folder-header:hover { background: var(--mantine-color-dark-5) !important; }
|
|
@@ -127,8 +162,17 @@ details summary:hover {
|
|
|
127
162
|
[data-mantine-color-scheme="dark"] #chat-messages h2,
|
|
128
163
|
[data-mantine-color-scheme="dark"] #chat-messages h3 { color: var(--mantine-color-dark-0); }
|
|
129
164
|
[data-mantine-color-scheme="dark"] #chat-messages strong { color: var(--mantine-color-dark-0); }
|
|
130
|
-
[data-mantine-color-scheme="dark"] #chat-messages code {
|
|
131
|
-
|
|
165
|
+
[data-mantine-color-scheme="dark"] #chat-messages code {
|
|
166
|
+
background: var(--mantine-color-dark-6);
|
|
167
|
+
color: var(--mantine-color-dark-0);
|
|
168
|
+
}
|
|
169
|
+
[data-mantine-color-scheme="dark"] #chat-messages pre {
|
|
170
|
+
background: var(--mantine-color-dark-6);
|
|
171
|
+
color: var(--mantine-color-dark-0);
|
|
172
|
+
}
|
|
173
|
+
[data-mantine-color-scheme="dark"] #chat-messages pre code {
|
|
174
|
+
color: var(--mantine-color-dark-0);
|
|
175
|
+
}
|
|
132
176
|
|
|
133
177
|
/* DMC Theme Integration */
|
|
134
178
|
:root {
|
|
@@ -748,6 +792,104 @@ details summary:hover {
|
|
|
748
792
|
color: var(--mantine-color-dark-1);
|
|
749
793
|
}
|
|
750
794
|
|
|
795
|
+
/* Canvas Item Container with Header - New Enhanced Layout */
|
|
796
|
+
.canvas-item-container {
|
|
797
|
+
background: var(--mantine-color-body);
|
|
798
|
+
border: 1px solid var(--mantine-color-gray-3);
|
|
799
|
+
border-radius: 6px;
|
|
800
|
+
margin-bottom: 12px;
|
|
801
|
+
overflow: hidden;
|
|
802
|
+
transition: box-shadow 0.15s ease;
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
.canvas-item-container:hover {
|
|
806
|
+
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.08);
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
.canvas-item-header {
|
|
810
|
+
display: flex;
|
|
811
|
+
justify-content: space-between;
|
|
812
|
+
align-items: center;
|
|
813
|
+
padding: 6px 10px;
|
|
814
|
+
border-bottom: 1px solid var(--mantine-color-gray-2);
|
|
815
|
+
background: var(--mantine-color-gray-0);
|
|
816
|
+
}
|
|
817
|
+
|
|
818
|
+
.canvas-item-title-text {
|
|
819
|
+
color: var(--mantine-color-text);
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
.canvas-item-time {
|
|
823
|
+
font-size: 11px;
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
.canvas-item-content {
|
|
827
|
+
padding: 12px;
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
.canvas-collapse-btn {
|
|
831
|
+
opacity: 0.6;
|
|
832
|
+
transition: opacity 0.15s ease, transform 0.15s ease;
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
.canvas-collapse-btn:hover {
|
|
836
|
+
opacity: 1;
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
.canvas-delete-btn {
|
|
840
|
+
opacity: 0.4;
|
|
841
|
+
transition: opacity 0.15s ease, color 0.15s ease;
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
.canvas-delete-btn:hover {
|
|
845
|
+
opacity: 1;
|
|
846
|
+
color: var(--mantine-color-red-6) !important;
|
|
847
|
+
}
|
|
848
|
+
|
|
849
|
+
.canvas-item-content-wrapper {
|
|
850
|
+
transition: max-height 0.2s ease-out;
|
|
851
|
+
overflow: hidden;
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
/* Dark mode canvas container */
|
|
855
|
+
[data-mantine-color-scheme="dark"] .canvas-item-container {
|
|
856
|
+
background: var(--mantine-color-dark-6);
|
|
857
|
+
border-color: var(--mantine-color-dark-4);
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
[data-mantine-color-scheme="dark"] .canvas-item-container:hover {
|
|
861
|
+
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
|
|
862
|
+
}
|
|
863
|
+
|
|
864
|
+
[data-mantine-color-scheme="dark"] .canvas-item-header {
|
|
865
|
+
background: var(--mantine-color-dark-7);
|
|
866
|
+
border-bottom-color: var(--mantine-color-dark-4);
|
|
867
|
+
}
|
|
868
|
+
|
|
869
|
+
[data-mantine-color-scheme="dark"] .canvas-item-title-text {
|
|
870
|
+
color: var(--mantine-color-dark-0);
|
|
871
|
+
}
|
|
872
|
+
|
|
873
|
+
[data-mantine-color-scheme="dark"] .canvas-item-time {
|
|
874
|
+
color: var(--mantine-color-dark-2);
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
[data-mantine-color-scheme="dark"] .canvas-collapse-btn {
|
|
878
|
+
color: var(--mantine-color-dark-2);
|
|
879
|
+
}
|
|
880
|
+
|
|
881
|
+
[data-mantine-color-scheme="dark"] .canvas-collapse-btn:hover {
|
|
882
|
+
color: var(--mantine-color-dark-0);
|
|
883
|
+
}
|
|
884
|
+
|
|
885
|
+
[data-mantine-color-scheme="dark"] .canvas-delete-btn {
|
|
886
|
+
color: var(--mantine-color-dark-3);
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
[data-mantine-color-scheme="dark"] .canvas-delete-btn:hover {
|
|
890
|
+
color: var(--mantine-color-red-4) !important;
|
|
891
|
+
}
|
|
892
|
+
|
|
751
893
|
/* ============================================
|
|
752
894
|
Scrollbar - Compact
|
|
753
895
|
============================================ */
|
cowork_dash/canvas.py
CHANGED
|
@@ -4,13 +4,31 @@ import io
|
|
|
4
4
|
import json
|
|
5
5
|
import base64
|
|
6
6
|
import re
|
|
7
|
+
import uuid
|
|
7
8
|
from pathlib import Path
|
|
8
|
-
from typing import Any, Dict, List
|
|
9
|
+
from typing import Any, Dict, List, Optional
|
|
9
10
|
from datetime import datetime
|
|
10
11
|
|
|
11
|
-
|
|
12
|
+
|
|
13
|
+
def generate_canvas_id() -> str:
|
|
14
|
+
"""Generate a unique ID for a canvas item."""
|
|
15
|
+
return f"canvas_{uuid.uuid4().hex[:8]}"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def parse_canvas_object(
|
|
19
|
+
obj: Any,
|
|
20
|
+
workspace_root: Path,
|
|
21
|
+
title: Optional[str] = None,
|
|
22
|
+
item_id: Optional[str] = None
|
|
23
|
+
) -> Dict[str, Any]:
|
|
12
24
|
"""Parse Python objects into canvas-renderable format.
|
|
13
25
|
|
|
26
|
+
Args:
|
|
27
|
+
obj: The Python object to parse (DataFrame, Figure, Image, str, etc.)
|
|
28
|
+
workspace_root: Path to the workspace root directory
|
|
29
|
+
title: Optional title for the canvas item
|
|
30
|
+
item_id: Optional ID for the canvas item (auto-generated if not provided)
|
|
31
|
+
|
|
14
32
|
Supports:
|
|
15
33
|
- pd.DataFrame (inline in markdown)
|
|
16
34
|
- matplotlib.figure.Figure (saved to .canvas/ folder)
|
|
@@ -22,18 +40,30 @@ def parse_canvas_object(obj: Any, workspace_root: Path) -> Dict[str, Any]:
|
|
|
22
40
|
obj_type = type(obj).__name__
|
|
23
41
|
module = type(obj).__module__
|
|
24
42
|
|
|
43
|
+
# Generate ID and timestamp for this item
|
|
44
|
+
canvas_id = item_id or generate_canvas_id()
|
|
45
|
+
created_at = datetime.now().isoformat()
|
|
46
|
+
|
|
47
|
+
# Base metadata that all items will have
|
|
48
|
+
def add_metadata(result: Dict) -> Dict:
|
|
49
|
+
result["id"] = canvas_id
|
|
50
|
+
result["created_at"] = created_at
|
|
51
|
+
if title:
|
|
52
|
+
result["title"] = title
|
|
53
|
+
return result
|
|
54
|
+
|
|
25
55
|
# Ensure .canvas directory exists
|
|
26
56
|
canvas_dir = workspace_root / ".canvas"
|
|
27
57
|
canvas_dir.mkdir(exist_ok=True)
|
|
28
58
|
|
|
29
59
|
# Pandas DataFrame - keep inline
|
|
30
60
|
if module.startswith('pandas') and obj_type == 'DataFrame':
|
|
31
|
-
return {
|
|
61
|
+
return add_metadata({
|
|
32
62
|
"type": "dataframe",
|
|
33
63
|
"data": obj.to_dict('records'),
|
|
34
64
|
"columns": list(obj.columns),
|
|
35
65
|
"html": obj.to_html(index=False, classes="dataframe-table")
|
|
36
|
-
}
|
|
66
|
+
})
|
|
37
67
|
|
|
38
68
|
# Matplotlib Figure - save to file
|
|
39
69
|
elif module.startswith('matplotlib') and 'Figure' in obj_type:
|
|
@@ -50,11 +80,11 @@ def parse_canvas_object(obj: Any, workspace_root: Path) -> Dict[str, Any]:
|
|
|
50
80
|
img_base64 = base64.b64encode(buf.read()).decode('utf-8')
|
|
51
81
|
buf.close()
|
|
52
82
|
|
|
53
|
-
return {
|
|
83
|
+
return add_metadata({
|
|
54
84
|
"type": "matplotlib",
|
|
55
85
|
"file": filename, # Relative to .canvas/ directory where canvas.md lives
|
|
56
86
|
"data": img_base64 # Keep for current session rendering
|
|
57
|
-
}
|
|
87
|
+
})
|
|
58
88
|
|
|
59
89
|
# Plotly Figure - save to file
|
|
60
90
|
elif module.startswith('plotly') and 'Figure' in obj_type:
|
|
@@ -65,11 +95,11 @@ def parse_canvas_object(obj: Any, workspace_root: Path) -> Dict[str, Any]:
|
|
|
65
95
|
plotly_data = json.loads(obj.to_json())
|
|
66
96
|
filepath.write_text(json.dumps(plotly_data, indent=2))
|
|
67
97
|
|
|
68
|
-
return {
|
|
98
|
+
return add_metadata({
|
|
69
99
|
"type": "plotly",
|
|
70
100
|
"file": filename, # Relative to .canvas/ directory where canvas.md lives
|
|
71
101
|
"data": plotly_data # Keep for current session rendering
|
|
72
|
-
}
|
|
102
|
+
})
|
|
73
103
|
|
|
74
104
|
# PIL Image - save to file
|
|
75
105
|
elif module.startswith('PIL') and 'Image' in obj_type:
|
|
@@ -86,11 +116,11 @@ def parse_canvas_object(obj: Any, workspace_root: Path) -> Dict[str, Any]:
|
|
|
86
116
|
img_base64 = base64.b64encode(buf.read()).decode('utf-8')
|
|
87
117
|
buf.close()
|
|
88
118
|
|
|
89
|
-
return {
|
|
119
|
+
return add_metadata({
|
|
90
120
|
"type": "image",
|
|
91
121
|
"file": filename, # Relative to .canvas/ directory where canvas.md lives
|
|
92
122
|
"data": img_base64 # Keep for current session rendering
|
|
93
|
-
}
|
|
123
|
+
})
|
|
94
124
|
|
|
95
125
|
# Plotly dict format - save to file
|
|
96
126
|
elif isinstance(obj, dict) and ('data' in obj or 'layout' in obj):
|
|
@@ -100,11 +130,11 @@ def parse_canvas_object(obj: Any, workspace_root: Path) -> Dict[str, Any]:
|
|
|
100
130
|
|
|
101
131
|
filepath.write_text(json.dumps(obj, indent=2))
|
|
102
132
|
|
|
103
|
-
return {
|
|
133
|
+
return add_metadata({
|
|
104
134
|
"type": "plotly",
|
|
105
135
|
"file": filename, # Relative to .canvas/ directory where canvas.md lives
|
|
106
136
|
"data": obj # Keep for current session rendering
|
|
107
|
-
}
|
|
137
|
+
})
|
|
108
138
|
|
|
109
139
|
# Markdown string - check for Mermaid diagrams - keep inline
|
|
110
140
|
elif isinstance(obj, str):
|
|
@@ -114,26 +144,26 @@ def parse_canvas_object(obj: Any, workspace_root: Path) -> Dict[str, Any]:
|
|
|
114
144
|
match = re.search(r'```mermaid\s*\n?(.*?)```', obj, re.DOTALL | re.IGNORECASE)
|
|
115
145
|
if match:
|
|
116
146
|
mermaid_code = match.group(1).strip()
|
|
117
|
-
return {
|
|
147
|
+
return add_metadata({
|
|
118
148
|
"type": "mermaid",
|
|
119
149
|
"data": mermaid_code
|
|
120
|
-
}
|
|
150
|
+
})
|
|
121
151
|
|
|
122
|
-
return {
|
|
152
|
+
return add_metadata({
|
|
123
153
|
"type": "markdown",
|
|
124
154
|
"data": obj
|
|
125
|
-
}
|
|
155
|
+
})
|
|
126
156
|
|
|
127
157
|
# Unknown type - convert to string - keep inline
|
|
128
158
|
else:
|
|
129
|
-
return {
|
|
159
|
+
return add_metadata({
|
|
130
160
|
"type": "markdown",
|
|
131
161
|
"data": f"```\n{str(obj)}\n```"
|
|
132
|
-
}
|
|
162
|
+
})
|
|
133
163
|
|
|
134
164
|
|
|
135
165
|
def export_canvas_to_markdown(canvas_items: List[Dict], workspace_root: Path, output_path: str = None):
|
|
136
|
-
"""Export canvas to markdown file with file references."""
|
|
166
|
+
"""Export canvas to markdown file with file references and metadata."""
|
|
137
167
|
# Ensure .canvas directory exists
|
|
138
168
|
canvas_dir = workspace_root / ".canvas"
|
|
139
169
|
canvas_dir.mkdir(exist_ok=True)
|
|
@@ -148,6 +178,16 @@ def export_canvas_to_markdown(canvas_items: List[Dict], workspace_root: Path, ou
|
|
|
148
178
|
|
|
149
179
|
for i, parsed in enumerate(canvas_items):
|
|
150
180
|
item_type = parsed.get("type", "unknown")
|
|
181
|
+
item_id = parsed.get("id", f"item_{i}")
|
|
182
|
+
created_at = parsed.get("created_at", "")
|
|
183
|
+
|
|
184
|
+
# Add item metadata as HTML comment (for reload)
|
|
185
|
+
metadata = {"id": item_id, "type": item_type}
|
|
186
|
+
if created_at:
|
|
187
|
+
metadata["created_at"] = created_at
|
|
188
|
+
if "title" in parsed:
|
|
189
|
+
metadata["title"] = parsed["title"]
|
|
190
|
+
lines.append(f"\n<!-- canvas-item: {json.dumps(metadata)} -->")
|
|
151
191
|
|
|
152
192
|
# Add title if present
|
|
153
193
|
if "title" in parsed:
|
|
@@ -188,7 +228,7 @@ def export_canvas_to_markdown(canvas_items: List[Dict], workspace_root: Path, ou
|
|
|
188
228
|
|
|
189
229
|
|
|
190
230
|
def load_canvas_from_markdown(workspace_root: Path, markdown_path: str = None) -> List[Dict]:
|
|
191
|
-
"""Load canvas from markdown file and referenced assets."""
|
|
231
|
+
"""Load canvas from markdown file and referenced assets, preserving metadata."""
|
|
192
232
|
if not markdown_path:
|
|
193
233
|
markdown_path = workspace_root / ".canvas" / "canvas.md"
|
|
194
234
|
else:
|
|
@@ -200,119 +240,193 @@ def load_canvas_from_markdown(workspace_root: Path, markdown_path: str = None) -
|
|
|
200
240
|
content = markdown_path.read_text()
|
|
201
241
|
canvas_items = []
|
|
202
242
|
|
|
203
|
-
# First,
|
|
243
|
+
# First, find all metadata comments to get item boundaries and metadata
|
|
244
|
+
metadata_pattern = r'<!-- canvas-item: ({.*?}) -->'
|
|
245
|
+
metadata_matches = list(re.finditer(metadata_pattern, content))
|
|
246
|
+
|
|
247
|
+
# If we have metadata comments, use them to parse items
|
|
248
|
+
if metadata_matches:
|
|
249
|
+
for i, match in enumerate(metadata_matches):
|
|
250
|
+
try:
|
|
251
|
+
metadata = json.loads(match.group(1))
|
|
252
|
+
except json.JSONDecodeError:
|
|
253
|
+
metadata = {"id": generate_canvas_id()}
|
|
254
|
+
|
|
255
|
+
# Find the content between this metadata and the next (or end of file)
|
|
256
|
+
start = match.end()
|
|
257
|
+
if i + 1 < len(metadata_matches):
|
|
258
|
+
end = metadata_matches[i + 1].start()
|
|
259
|
+
else:
|
|
260
|
+
end = len(content)
|
|
261
|
+
|
|
262
|
+
item_content = content[start:end].strip()
|
|
263
|
+
item = _parse_item_content(item_content, metadata, markdown_path)
|
|
264
|
+
if item:
|
|
265
|
+
canvas_items.append(item)
|
|
266
|
+
else:
|
|
267
|
+
# Fallback: legacy parsing without metadata (backwards compatibility)
|
|
268
|
+
canvas_items = _parse_legacy_canvas(content, markdown_path)
|
|
269
|
+
|
|
270
|
+
return canvas_items
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def _parse_item_content(content: str, metadata: Dict, markdown_path: Path) -> Optional[Dict]:
|
|
274
|
+
"""Parse a single item's content given its metadata."""
|
|
275
|
+
item_type = metadata.get("type", "markdown")
|
|
276
|
+
item = {
|
|
277
|
+
"id": metadata.get("id", generate_canvas_id()),
|
|
278
|
+
"type": item_type,
|
|
279
|
+
}
|
|
280
|
+
if "title" in metadata:
|
|
281
|
+
item["title"] = metadata["title"]
|
|
282
|
+
if "created_at" in metadata:
|
|
283
|
+
item["created_at"] = metadata["created_at"]
|
|
284
|
+
|
|
285
|
+
# Remove title heading if present (we already have it in metadata)
|
|
286
|
+
if "title" in metadata:
|
|
287
|
+
title_pattern = rf'^##\s*{re.escape(metadata["title"])}\s*\n?'
|
|
288
|
+
content = re.sub(title_pattern, '', content, count=1).strip()
|
|
289
|
+
|
|
290
|
+
if item_type == "mermaid":
|
|
291
|
+
match = re.search(r'```mermaid\s*\n(.*?)```', content, re.DOTALL | re.IGNORECASE)
|
|
292
|
+
if match:
|
|
293
|
+
item["data"] = match.group(1).strip()
|
|
294
|
+
return item
|
|
295
|
+
|
|
296
|
+
elif item_type == "plotly":
|
|
297
|
+
match = re.search(r'```plotly\s*\n([^\n]+)\n```', content)
|
|
298
|
+
if match:
|
|
299
|
+
file_ref = match.group(1).strip()
|
|
300
|
+
file_path = markdown_path.parent / file_ref
|
|
301
|
+
if file_path.exists():
|
|
302
|
+
item["file"] = file_ref
|
|
303
|
+
item["data"] = json.loads(file_path.read_text())
|
|
304
|
+
return item
|
|
305
|
+
|
|
306
|
+
elif item_type in ("matplotlib", "image"):
|
|
307
|
+
match = re.search(r'!\[.*?\]\(([^)]+)\)', content)
|
|
308
|
+
if match:
|
|
309
|
+
file_ref = match.group(1)
|
|
310
|
+
if not file_ref.startswith('data:'):
|
|
311
|
+
file_path = markdown_path.parent / file_ref
|
|
312
|
+
if file_path.exists():
|
|
313
|
+
with open(file_path, 'rb') as f:
|
|
314
|
+
item["data"] = base64.b64encode(f.read()).decode('utf-8')
|
|
315
|
+
item["file"] = file_ref
|
|
316
|
+
item["type"] = "image" # Normalize type
|
|
317
|
+
return item
|
|
318
|
+
|
|
319
|
+
elif item_type == "dataframe":
|
|
320
|
+
match = re.search(r'<table.*?</table>', content, re.DOTALL)
|
|
321
|
+
if match:
|
|
322
|
+
item["html"] = match.group(0)
|
|
323
|
+
return item
|
|
324
|
+
|
|
325
|
+
elif item_type == "markdown":
|
|
326
|
+
# Clean up the content
|
|
327
|
+
cleaned = content.strip()
|
|
328
|
+
if cleaned:
|
|
329
|
+
item["data"] = cleaned
|
|
330
|
+
return item
|
|
331
|
+
|
|
332
|
+
return None
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def _parse_legacy_canvas(content: str, markdown_path: Path) -> List[Dict]:
|
|
336
|
+
"""Parse canvas without metadata comments (legacy format)."""
|
|
337
|
+
canvas_items = []
|
|
204
338
|
code_blocks = []
|
|
205
339
|
|
|
206
340
|
# Find all mermaid blocks
|
|
207
341
|
for match in re.finditer(r'```mermaid\s*\n(.*?)```', content, re.DOTALL | re.IGNORECASE):
|
|
208
|
-
start, end = match.span()
|
|
209
342
|
code_blocks.append({
|
|
210
343
|
'type': 'mermaid',
|
|
211
|
-
'start': start,
|
|
212
|
-
'end': end,
|
|
344
|
+
'start': match.start(),
|
|
345
|
+
'end': match.end(),
|
|
213
346
|
'content': match.group(1).strip()
|
|
214
347
|
})
|
|
215
348
|
|
|
216
|
-
# Find all plotly blocks
|
|
349
|
+
# Find all plotly blocks
|
|
217
350
|
for match in re.finditer(r'```plotly\s*\n([^\n]+)\n```', content, re.DOTALL):
|
|
218
|
-
start, end = match.span()
|
|
219
351
|
code_blocks.append({
|
|
220
352
|
'type': 'plotly_file',
|
|
221
|
-
'start': start,
|
|
222
|
-
'end': end,
|
|
353
|
+
'start': match.start(),
|
|
354
|
+
'end': match.end(),
|
|
223
355
|
'content': match.group(1).strip()
|
|
224
356
|
})
|
|
225
357
|
|
|
226
|
-
# Find all image references
|
|
358
|
+
# Find all image references
|
|
227
359
|
for match in re.finditer(r'!\[.*?\]\(([^)]+)\)', content):
|
|
228
|
-
start, end = match.span()
|
|
229
360
|
file_ref = match.group(1)
|
|
230
|
-
# Skip data: URLs (base64 embedded images)
|
|
231
361
|
if not file_ref.startswith('data:'):
|
|
232
362
|
code_blocks.append({
|
|
233
363
|
'type': 'image_file',
|
|
234
|
-
'start': start,
|
|
235
|
-
'end': end,
|
|
364
|
+
'start': match.start(),
|
|
365
|
+
'end': match.end(),
|
|
236
366
|
'content': file_ref
|
|
237
367
|
})
|
|
238
368
|
|
|
239
369
|
# Find all HTML tables
|
|
240
370
|
for match in re.finditer(r'<table.*?</table>', content, re.DOTALL):
|
|
241
|
-
start, end = match.span()
|
|
242
371
|
code_blocks.append({
|
|
243
372
|
'type': 'table',
|
|
244
|
-
'start': start,
|
|
245
|
-
'end': end,
|
|
373
|
+
'start': match.start(),
|
|
374
|
+
'end': match.end(),
|
|
246
375
|
'content': match.group(0)
|
|
247
376
|
})
|
|
248
377
|
|
|
249
|
-
# Sort blocks by position
|
|
250
378
|
code_blocks.sort(key=lambda x: x['start'])
|
|
251
379
|
|
|
252
|
-
# Process content in order
|
|
253
380
|
last_pos = 0
|
|
254
381
|
for block in code_blocks:
|
|
255
|
-
# Add any markdown content before this block
|
|
256
382
|
if block['start'] > last_pos:
|
|
257
383
|
markdown_text = content[last_pos:block['start']].strip()
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
continue
|
|
265
|
-
filtered_lines.append(line)
|
|
266
|
-
|
|
267
|
-
cleaned_text = '\n'.join(filtered_lines).strip()
|
|
268
|
-
if cleaned_text:
|
|
384
|
+
lines = [l for l in markdown_text.split('\n')
|
|
385
|
+
if l.strip() not in ['# Canvas Export', '']
|
|
386
|
+
and not l.strip().startswith('*Generated:')
|
|
387
|
+
and not l.strip().startswith('<!-- canvas-item:')]
|
|
388
|
+
cleaned = '\n'.join(lines).strip()
|
|
389
|
+
if cleaned:
|
|
269
390
|
canvas_items.append({
|
|
391
|
+
"id": generate_canvas_id(),
|
|
270
392
|
"type": "markdown",
|
|
271
|
-
"data":
|
|
393
|
+
"data": cleaned
|
|
272
394
|
})
|
|
273
395
|
|
|
274
|
-
|
|
396
|
+
item = {"id": generate_canvas_id()}
|
|
275
397
|
if block['type'] == 'mermaid':
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
})
|
|
398
|
+
item["type"] = "mermaid"
|
|
399
|
+
item["data"] = block['content']
|
|
400
|
+
canvas_items.append(item)
|
|
280
401
|
elif block['type'] == 'plotly_file':
|
|
281
|
-
|
|
282
|
-
file_path = markdown_path.parent / file_ref
|
|
402
|
+
file_path = markdown_path.parent / block['content']
|
|
283
403
|
if file_path.exists():
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
"data": plotly_data
|
|
289
|
-
})
|
|
404
|
+
item["type"] = "plotly"
|
|
405
|
+
item["file"] = block['content']
|
|
406
|
+
item["data"] = json.loads(file_path.read_text())
|
|
407
|
+
canvas_items.append(item)
|
|
290
408
|
elif block['type'] == 'image_file':
|
|
291
|
-
|
|
292
|
-
file_path = markdown_path.parent / file_ref
|
|
409
|
+
file_path = markdown_path.parent / block['content']
|
|
293
410
|
if file_path.exists():
|
|
294
411
|
with open(file_path, 'rb') as f:
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
"data": img_base64
|
|
300
|
-
})
|
|
412
|
+
item["data"] = base64.b64encode(f.read()).decode('utf-8')
|
|
413
|
+
item["type"] = "image"
|
|
414
|
+
item["file"] = block['content']
|
|
415
|
+
canvas_items.append(item)
|
|
301
416
|
elif block['type'] == 'table':
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
})
|
|
417
|
+
item["type"] = "dataframe"
|
|
418
|
+
item["html"] = block['content']
|
|
419
|
+
canvas_items.append(item)
|
|
306
420
|
|
|
307
421
|
last_pos = block['end']
|
|
308
422
|
|
|
309
|
-
# Add any remaining markdown after the last block
|
|
310
423
|
if last_pos < len(content):
|
|
311
|
-
|
|
312
|
-
if
|
|
424
|
+
remaining = content[last_pos:].strip()
|
|
425
|
+
if remaining:
|
|
313
426
|
canvas_items.append({
|
|
427
|
+
"id": generate_canvas_id(),
|
|
314
428
|
"type": "markdown",
|
|
315
|
-
"data":
|
|
429
|
+
"data": remaining
|
|
316
430
|
})
|
|
317
431
|
|
|
318
432
|
return canvas_items
|
cowork_dash/cli.py
CHANGED
|
@@ -158,6 +158,7 @@ def run_app_cli(args):
|
|
|
158
158
|
host=args.host,
|
|
159
159
|
debug=args.debug,
|
|
160
160
|
title=args.title,
|
|
161
|
+
welcome_message=args.welcome_message,
|
|
161
162
|
config_file=args.config
|
|
162
163
|
)
|
|
163
164
|
|
|
@@ -252,6 +253,12 @@ For more help: https://github.com/dkedar7/cowork-dash
|
|
|
252
253
|
default="./config.py",
|
|
253
254
|
help="Config file path (default: ./config.py)"
|
|
254
255
|
)
|
|
256
|
+
run_parser.add_argument(
|
|
257
|
+
"--welcome-message",
|
|
258
|
+
type=str,
|
|
259
|
+
dest="welcome_message",
|
|
260
|
+
help="Welcome message shown on startup (supports markdown)"
|
|
261
|
+
)
|
|
255
262
|
|
|
256
263
|
# Parse arguments
|
|
257
264
|
args = parser.parse_args()
|