markdown-to-confluence 0.3.5__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/METADATA +150 -17
- markdown_to_confluence-0.4.1.dist-info/RECORD +25 -0
- md2conf/__init__.py +1 -1
- md2conf/__main__.py +20 -17
- md2conf/api.py +529 -216
- md2conf/application.py +85 -96
- md2conf/collection.py +31 -0
- md2conf/converter.py +99 -78
- md2conf/emoji.py +28 -3
- md2conf/extra.py +27 -0
- md2conf/local.py +28 -41
- md2conf/matcher.py +1 -3
- md2conf/mermaid.py +2 -7
- md2conf/metadata.py +0 -2
- md2conf/processor.py +135 -57
- md2conf/properties.py +66 -14
- md2conf/scanner.py +56 -23
- markdown_to_confluence-0.3.5.dist-info/RECORD +0 -23
- {markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/WHEEL +0 -0
- {markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/entry_points.txt +0 -0
- {markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/licenses/LICENSE +0 -0
- {markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/top_level.txt +0 -0
- {markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/zip-safe +0 -0
md2conf/processor.py
CHANGED
|
@@ -6,20 +6,68 @@ Copyright 2022-2025, Levente Hunyadi
|
|
|
6
6
|
:see: https://github.com/hunyadi/md2conf
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
+
import hashlib
|
|
9
10
|
import logging
|
|
10
11
|
import os
|
|
11
12
|
from abc import abstractmethod
|
|
12
13
|
from pathlib import Path
|
|
13
|
-
from typing import Optional
|
|
14
|
+
from typing import Iterable, Optional
|
|
14
15
|
|
|
16
|
+
from .collection import ConfluencePageCollection
|
|
15
17
|
from .converter import ConfluenceDocument, ConfluenceDocumentOptions, ConfluencePageID
|
|
16
18
|
from .matcher import Matcher, MatcherOptions
|
|
17
|
-
from .metadata import
|
|
19
|
+
from .metadata import ConfluenceSiteMetadata
|
|
18
20
|
from .properties import ArgumentError
|
|
21
|
+
from .scanner import Scanner
|
|
19
22
|
|
|
20
23
|
LOGGER = logging.getLogger(__name__)
|
|
21
24
|
|
|
22
25
|
|
|
26
|
+
class DocumentNode:
|
|
27
|
+
absolute_path: Path
|
|
28
|
+
page_id: Optional[str]
|
|
29
|
+
space_key: Optional[str]
|
|
30
|
+
title: Optional[str]
|
|
31
|
+
|
|
32
|
+
_children: list["DocumentNode"]
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
absolute_path: Path,
|
|
37
|
+
page_id: Optional[str],
|
|
38
|
+
space_key: Optional[str] = None,
|
|
39
|
+
title: Optional[str] = None,
|
|
40
|
+
):
|
|
41
|
+
self.absolute_path = absolute_path
|
|
42
|
+
self.page_id = page_id
|
|
43
|
+
self.space_key = space_key
|
|
44
|
+
self.title = title
|
|
45
|
+
self._children = []
|
|
46
|
+
|
|
47
|
+
def count(self) -> int:
|
|
48
|
+
c = len(self._children)
|
|
49
|
+
for child in self._children:
|
|
50
|
+
c += child.count()
|
|
51
|
+
return c
|
|
52
|
+
|
|
53
|
+
def add_child(self, child: "DocumentNode") -> None:
|
|
54
|
+
self._children.append(child)
|
|
55
|
+
|
|
56
|
+
def children(self) -> Iterable["DocumentNode"]:
|
|
57
|
+
for child in self._children:
|
|
58
|
+
yield child
|
|
59
|
+
|
|
60
|
+
def descendants(self) -> Iterable["DocumentNode"]:
|
|
61
|
+
for child in self._children:
|
|
62
|
+
yield child
|
|
63
|
+
yield from child.descendants()
|
|
64
|
+
|
|
65
|
+
def all(self) -> Iterable["DocumentNode"]:
|
|
66
|
+
yield self
|
|
67
|
+
for child in self._children:
|
|
68
|
+
yield from child.all()
|
|
69
|
+
|
|
70
|
+
|
|
23
71
|
class Processor:
|
|
24
72
|
"""
|
|
25
73
|
Processes a single Markdown page or a directory of Markdown pages.
|
|
@@ -29,7 +77,7 @@ class Processor:
|
|
|
29
77
|
site: ConfluenceSiteMetadata
|
|
30
78
|
root_dir: Path
|
|
31
79
|
|
|
32
|
-
page_metadata:
|
|
80
|
+
page_metadata: ConfluencePageCollection
|
|
33
81
|
|
|
34
82
|
def __init__(
|
|
35
83
|
self,
|
|
@@ -40,8 +88,7 @@ class Processor:
|
|
|
40
88
|
self.options = options
|
|
41
89
|
self.site = site
|
|
42
90
|
self.root_dir = root_dir
|
|
43
|
-
|
|
44
|
-
self.page_metadata = {}
|
|
91
|
+
self.page_metadata = ConfluencePageCollection()
|
|
45
92
|
|
|
46
93
|
def process_directory(self, local_dir: Path) -> None:
|
|
47
94
|
"""
|
|
@@ -51,13 +98,16 @@ class Processor:
|
|
|
51
98
|
local_dir = local_dir.resolve(True)
|
|
52
99
|
LOGGER.info("Processing directory: %s", local_dir)
|
|
53
100
|
|
|
54
|
-
# Step 1: build index of all
|
|
55
|
-
self._index_directory(local_dir,
|
|
56
|
-
LOGGER.info("Indexed %d
|
|
101
|
+
# Step 1: build index of all Markdown files in directory hierarchy
|
|
102
|
+
root = self._index_directory(local_dir, None)
|
|
103
|
+
LOGGER.info("Indexed %d document(s)", root.count())
|
|
57
104
|
|
|
58
|
-
# Step 2:
|
|
59
|
-
|
|
60
|
-
|
|
105
|
+
# Step 2: synchronize directory tree structure with page hierarchy in space
|
|
106
|
+
self._synchronize_tree(root, self.options.root_page_id)
|
|
107
|
+
|
|
108
|
+
# Step 3: synchronize files in directory hierarchy with pages in space
|
|
109
|
+
for path, metadata in self.page_metadata.items():
|
|
110
|
+
self._synchronize_page(path, ConfluencePageID(metadata.page_id))
|
|
61
111
|
|
|
62
112
|
def process_page(self, path: Path) -> None:
|
|
63
113
|
"""
|
|
@@ -65,32 +115,44 @@ class Processor:
|
|
|
65
115
|
"""
|
|
66
116
|
|
|
67
117
|
LOGGER.info("Processing page: %s", path)
|
|
68
|
-
self._index_page(path, self.options.root_page_id)
|
|
69
|
-
self._process_page(path)
|
|
70
118
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
self.
|
|
119
|
+
# Step 1: parse Markdown file
|
|
120
|
+
root = self._index_file(path)
|
|
121
|
+
|
|
122
|
+
# Step 2: find matching page in Confluence
|
|
123
|
+
self._synchronize_tree(root, self.options.root_page_id)
|
|
124
|
+
|
|
125
|
+
# Step 3: synchronize document with page in space
|
|
126
|
+
for path, metadata in self.page_metadata.items():
|
|
127
|
+
self._synchronize_page(path, ConfluencePageID(metadata.page_id))
|
|
128
|
+
|
|
129
|
+
def _synchronize_page(self, path: Path, page_id: ConfluencePageID) -> None:
|
|
130
|
+
"""
|
|
131
|
+
Synchronizes a single Markdown document with its corresponding Confluence page.
|
|
132
|
+
"""
|
|
133
|
+
|
|
134
|
+
page_id, document = ConfluenceDocument.create(path, self.options, self.root_dir, self.site, self.page_metadata)
|
|
135
|
+
self._update_page(page_id, document, path)
|
|
76
136
|
|
|
77
137
|
@abstractmethod
|
|
78
|
-
def
|
|
79
|
-
self, absolute_path: Path, parent_id: Optional[ConfluencePageID]
|
|
80
|
-
) -> ConfluencePageMetadata:
|
|
138
|
+
def _synchronize_tree(self, node: DocumentNode, page_id: Optional[ConfluencePageID]) -> None:
|
|
81
139
|
"""
|
|
82
|
-
Creates
|
|
140
|
+
Creates the cross-reference index and synchronizes the directory tree structure with the Confluence page hierarchy.
|
|
141
|
+
|
|
142
|
+
Creates new Confluence pages as necessary, e.g. if no page is linked in the Markdown document, or no page is found with lookup by page title.
|
|
143
|
+
|
|
144
|
+
May update the original Markdown document to add tags to associate the document with its corresponding Confluence page.
|
|
83
145
|
"""
|
|
84
146
|
...
|
|
85
147
|
|
|
86
148
|
@abstractmethod
|
|
87
|
-
def
|
|
88
|
-
|
|
89
|
-
|
|
149
|
+
def _update_page(self, page_id: ConfluencePageID, document: ConfluenceDocument, path: Path) -> None:
|
|
150
|
+
"""
|
|
151
|
+
Saves the document as Confluence Storage Format XHTML.
|
|
152
|
+
"""
|
|
153
|
+
...
|
|
90
154
|
|
|
91
|
-
def _index_directory(
|
|
92
|
-
self, local_dir: Path, parent_id: Optional[ConfluencePageID]
|
|
93
|
-
) -> None:
|
|
155
|
+
def _index_directory(self, local_dir: Path, parent: Optional[DocumentNode]) -> DocumentNode:
|
|
94
156
|
"""
|
|
95
157
|
Indexes Markdown files in a directory hierarchy recursively.
|
|
96
158
|
"""
|
|
@@ -106,21 +168,21 @@ class Processor:
|
|
|
106
168
|
continue
|
|
107
169
|
|
|
108
170
|
if entry.is_file():
|
|
109
|
-
files.append(
|
|
171
|
+
files.append(local_dir / entry.name)
|
|
110
172
|
elif entry.is_dir():
|
|
111
|
-
directories.append(
|
|
173
|
+
directories.append(local_dir / entry.name)
|
|
112
174
|
|
|
113
175
|
# make page act as parent node
|
|
114
176
|
parent_doc: Optional[Path] = None
|
|
115
|
-
if (
|
|
116
|
-
parent_doc =
|
|
117
|
-
elif (
|
|
118
|
-
parent_doc =
|
|
119
|
-
elif (
|
|
120
|
-
parent_doc =
|
|
177
|
+
if (local_dir / "index.md") in files:
|
|
178
|
+
parent_doc = local_dir / "index.md"
|
|
179
|
+
elif (local_dir / "README.md") in files:
|
|
180
|
+
parent_doc = local_dir / "README.md"
|
|
181
|
+
elif (local_dir / f"{local_dir.name}.md") in files:
|
|
182
|
+
parent_doc = local_dir / f"{local_dir.name}.md"
|
|
121
183
|
|
|
122
184
|
if parent_doc is None and self.options.keep_hierarchy:
|
|
123
|
-
parent_doc =
|
|
185
|
+
parent_doc = local_dir / "index.md"
|
|
124
186
|
|
|
125
187
|
# create a blank page for directory entry
|
|
126
188
|
with open(parent_doc, "w"):
|
|
@@ -130,37 +192,55 @@ class Processor:
|
|
|
130
192
|
if parent_doc in files:
|
|
131
193
|
files.remove(parent_doc)
|
|
132
194
|
|
|
133
|
-
#
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
195
|
+
# promote Markdown document in directory as parent page in Confluence
|
|
196
|
+
node = self._index_file(parent_doc)
|
|
197
|
+
if parent is not None:
|
|
198
|
+
parent.add_child(node)
|
|
199
|
+
parent = node
|
|
200
|
+
elif parent is None:
|
|
201
|
+
raise ArgumentError(f"root page requires corresponding top-level Markdown document in {local_dir}")
|
|
137
202
|
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
for doc in files:
|
|
142
|
-
self._index_page(doc, parent_id)
|
|
203
|
+
for file in files:
|
|
204
|
+
node = self._index_file(file)
|
|
205
|
+
parent.add_child(node)
|
|
143
206
|
|
|
144
207
|
for directory in directories:
|
|
145
|
-
self._index_directory(directory,
|
|
208
|
+
self._index_directory(directory, parent)
|
|
209
|
+
|
|
210
|
+
return parent
|
|
146
211
|
|
|
147
|
-
def
|
|
212
|
+
def _index_file(self, path: Path) -> DocumentNode:
|
|
148
213
|
"""
|
|
149
214
|
Indexes a single Markdown file.
|
|
150
215
|
"""
|
|
151
216
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
217
|
+
LOGGER.info("Indexing file: %s", path)
|
|
218
|
+
|
|
219
|
+
# extract information from a Markdown document found in a local directory.
|
|
220
|
+
document = Scanner().read(path)
|
|
221
|
+
|
|
222
|
+
return DocumentNode(
|
|
223
|
+
absolute_path=path,
|
|
224
|
+
page_id=document.page_id,
|
|
225
|
+
space_key=document.space_key,
|
|
226
|
+
title=document.title,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
def _generate_hash(self, absolute_path: Path) -> str:
|
|
230
|
+
"""
|
|
231
|
+
Computes a digest to be used as a unique string.
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
relative_path = absolute_path.relative_to(self.root_dir)
|
|
235
|
+
hash = hashlib.md5(relative_path.as_posix().encode("utf-8"))
|
|
236
|
+
return "".join(f"{c:x}" for c in hash.digest())
|
|
155
237
|
|
|
156
238
|
|
|
157
239
|
class ProcessorFactory:
|
|
158
240
|
options: ConfluenceDocumentOptions
|
|
159
241
|
site: ConfluenceSiteMetadata
|
|
160
242
|
|
|
161
|
-
def __init__(
|
|
162
|
-
self, options: ConfluenceDocumentOptions, site: ConfluenceSiteMetadata
|
|
163
|
-
) -> None:
|
|
243
|
+
def __init__(self, options: ConfluenceDocumentOptions, site: ConfluenceSiteMetadata) -> None:
|
|
164
244
|
self.options = options
|
|
165
245
|
self.site = site
|
|
166
246
|
|
|
@@ -187,9 +267,7 @@ class Converter:
|
|
|
187
267
|
else:
|
|
188
268
|
raise ArgumentError(f"expected: valid file or directory path; got: {path}")
|
|
189
269
|
|
|
190
|
-
def process_directory(
|
|
191
|
-
self, local_dir: Path, root_dir: Optional[Path] = None
|
|
192
|
-
) -> None:
|
|
270
|
+
def process_directory(self, local_dir: Path, root_dir: Optional[Path] = None) -> None:
|
|
193
271
|
"""
|
|
194
272
|
Recursively scans a directory hierarchy for Markdown files, and processes each, resolving cross-references.
|
|
195
273
|
"""
|
md2conf/properties.py
CHANGED
|
@@ -7,7 +7,7 @@ Copyright 2022-2025, Levente Hunyadi
|
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
9
|
import os
|
|
10
|
-
from typing import Optional
|
|
10
|
+
from typing import Optional, overload
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class ArgumentError(ValueError):
|
|
@@ -22,6 +22,42 @@ class ConfluenceError(RuntimeError):
|
|
|
22
22
|
"Raised when a Confluence API call fails."
|
|
23
23
|
|
|
24
24
|
|
|
25
|
+
@overload
|
|
26
|
+
def _validate_domain(domain: str) -> str: ...
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@overload
|
|
30
|
+
def _validate_domain(domain: Optional[str]) -> Optional[str]: ...
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _validate_domain(domain: Optional[str]) -> Optional[str]:
|
|
34
|
+
if domain is None:
|
|
35
|
+
return None
|
|
36
|
+
|
|
37
|
+
if domain.startswith(("http://", "https://")) or domain.endswith("/"):
|
|
38
|
+
raise ArgumentError("Confluence domain looks like a URL; only host name required")
|
|
39
|
+
|
|
40
|
+
return domain
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@overload
|
|
44
|
+
def _validate_base_path(base_path: str) -> str: ...
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@overload
|
|
48
|
+
def _validate_base_path(base_path: Optional[str]) -> Optional[str]: ...
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _validate_base_path(base_path: Optional[str]) -> Optional[str]:
|
|
52
|
+
if base_path is None:
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
if not base_path.startswith("/") or not base_path.endswith("/"):
|
|
56
|
+
raise ArgumentError("Confluence base path must start and end with a '/'")
|
|
57
|
+
|
|
58
|
+
return base_path
|
|
59
|
+
|
|
60
|
+
|
|
25
61
|
class ConfluenceSiteProperties:
|
|
26
62
|
domain: str
|
|
27
63
|
base_path: str
|
|
@@ -42,27 +78,33 @@ class ConfluenceSiteProperties:
|
|
|
42
78
|
if not opt_base_path:
|
|
43
79
|
opt_base_path = "/wiki/"
|
|
44
80
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
"Confluence domain looks like a URL; only host name required"
|
|
48
|
-
)
|
|
49
|
-
if not opt_base_path.startswith("/") or not opt_base_path.endswith("/"):
|
|
50
|
-
raise ArgumentError("Confluence base path must start and end with a '/'")
|
|
51
|
-
|
|
52
|
-
self.domain = opt_domain
|
|
53
|
-
self.base_path = opt_base_path
|
|
81
|
+
self.domain = _validate_domain(opt_domain)
|
|
82
|
+
self.base_path = _validate_base_path(opt_base_path)
|
|
54
83
|
self.space_key = opt_space_key
|
|
55
84
|
|
|
56
85
|
|
|
57
|
-
class ConfluenceConnectionProperties
|
|
58
|
-
"
|
|
86
|
+
class ConfluenceConnectionProperties:
|
|
87
|
+
"""
|
|
88
|
+
Properties related to connecting to Confluence.
|
|
89
|
+
|
|
90
|
+
:param api_url: Confluence API URL. Required for scoped tokens.
|
|
91
|
+
:param user_name: Confluence user name.
|
|
92
|
+
:param api_key: Confluence API key.
|
|
93
|
+
:param headers: Additional HTTP headers to pass to Confluence REST API calls.
|
|
94
|
+
"""
|
|
59
95
|
|
|
96
|
+
domain: Optional[str]
|
|
97
|
+
base_path: Optional[str]
|
|
98
|
+
space_key: Optional[str]
|
|
99
|
+
api_url: Optional[str]
|
|
60
100
|
user_name: Optional[str]
|
|
61
101
|
api_key: str
|
|
62
102
|
headers: Optional[dict[str, str]]
|
|
63
103
|
|
|
64
104
|
def __init__(
|
|
65
105
|
self,
|
|
106
|
+
*,
|
|
107
|
+
api_url: Optional[str] = None,
|
|
66
108
|
domain: Optional[str] = None,
|
|
67
109
|
base_path: Optional[str] = None,
|
|
68
110
|
user_name: Optional[str] = None,
|
|
@@ -70,14 +112,24 @@ class ConfluenceConnectionProperties(ConfluenceSiteProperties):
|
|
|
70
112
|
space_key: Optional[str] = None,
|
|
71
113
|
headers: Optional[dict[str, str]] = None,
|
|
72
114
|
) -> None:
|
|
73
|
-
|
|
74
|
-
|
|
115
|
+
opt_api_url = api_url or os.getenv("CONFLUENCE_API_URL")
|
|
116
|
+
opt_domain = domain or os.getenv("CONFLUENCE_DOMAIN")
|
|
117
|
+
opt_base_path = base_path or os.getenv("CONFLUENCE_PATH")
|
|
118
|
+
opt_space_key = space_key or os.getenv("CONFLUENCE_SPACE_KEY")
|
|
75
119
|
opt_user_name = user_name or os.getenv("CONFLUENCE_USER_NAME")
|
|
76
120
|
opt_api_key = api_key or os.getenv("CONFLUENCE_API_KEY")
|
|
77
121
|
|
|
78
122
|
if not opt_api_key:
|
|
79
123
|
raise ArgumentError("Confluence API key not specified")
|
|
124
|
+
if not opt_api_url and not opt_domain:
|
|
125
|
+
raise ArgumentError("Confluence API URL or domain required")
|
|
126
|
+
if not opt_api_url and not opt_base_path:
|
|
127
|
+
opt_base_path = "/wiki/"
|
|
80
128
|
|
|
129
|
+
self.api_url = opt_api_url
|
|
130
|
+
self.domain = _validate_domain(opt_domain)
|
|
131
|
+
self.base_path = _validate_base_path(opt_base_path)
|
|
132
|
+
self.space_key = opt_space_key
|
|
81
133
|
self.user_name = opt_user_name
|
|
82
134
|
self.api_key = opt_api_key
|
|
83
135
|
self.headers = headers
|
md2conf/scanner.py
CHANGED
|
@@ -9,15 +9,26 @@ Copyright 2022-2025, Levente Hunyadi
|
|
|
9
9
|
import re
|
|
10
10
|
from dataclasses import dataclass
|
|
11
11
|
from pathlib import Path
|
|
12
|
-
from typing import Any, Optional
|
|
12
|
+
from typing import Any, Optional, TypeVar
|
|
13
13
|
|
|
14
14
|
import yaml
|
|
15
|
+
from strong_typing.core import JsonType
|
|
16
|
+
from strong_typing.serialization import DeserializerOptions, json_to_object
|
|
17
|
+
|
|
18
|
+
T = TypeVar("T")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _json_to_object(
|
|
22
|
+
typ: type[T],
|
|
23
|
+
data: JsonType,
|
|
24
|
+
) -> T:
|
|
25
|
+
return json_to_object(typ, data, options=DeserializerOptions(skip_unassigned=True))
|
|
15
26
|
|
|
16
27
|
|
|
17
28
|
def extract_value(pattern: str, text: str) -> tuple[Optional[str], str]:
|
|
18
29
|
values: list[str] = []
|
|
19
30
|
|
|
20
|
-
def _repl_func(matchobj: re.Match) -> str:
|
|
31
|
+
def _repl_func(matchobj: re.Match[str]) -> str:
|
|
21
32
|
values.append(matchobj.group(1))
|
|
22
33
|
return ""
|
|
23
34
|
|
|
@@ -46,16 +57,29 @@ def extract_frontmatter_properties(text: str) -> tuple[Optional[dict[str, Any]],
|
|
|
46
57
|
return properties, text
|
|
47
58
|
|
|
48
59
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
60
|
+
@dataclass
|
|
61
|
+
class DocumentProperties:
|
|
62
|
+
"""
|
|
63
|
+
An object that holds properties extracted from the front-matter of a Markdown document.
|
|
64
|
+
|
|
65
|
+
:param page_id: Confluence page ID.
|
|
66
|
+
:param space_key: Confluence space key.
|
|
67
|
+
:param confluence_page_id: Confluence page ID. (Alternative name for JSON de-serialization.)
|
|
68
|
+
:param confluence_space_key: Confluence space key. (Alternative name for JSON de-serialization.)
|
|
69
|
+
:param generated_by: Text identifying the tool that generated the document.
|
|
70
|
+
:param title: The title extracted from front-matter.
|
|
71
|
+
:param tags: A list of tags (content labels) extracted from front-matter.
|
|
72
|
+
:param properties: A dictionary of key-value pairs extracted from front-matter to apply as page properties.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
page_id: Optional[str]
|
|
76
|
+
space_key: Optional[str]
|
|
77
|
+
confluence_page_id: Optional[str]
|
|
78
|
+
confluence_space_key: Optional[str]
|
|
79
|
+
generated_by: Optional[str]
|
|
80
|
+
title: Optional[str]
|
|
81
|
+
tags: Optional[list[str]]
|
|
82
|
+
properties: Optional[dict[str, JsonType]]
|
|
59
83
|
|
|
60
84
|
|
|
61
85
|
@dataclass
|
|
@@ -67,6 +91,8 @@ class ScannedDocument:
|
|
|
67
91
|
:param space_key: Confluence space key.
|
|
68
92
|
:param generated_by: Text identifying the tool that generated the document.
|
|
69
93
|
:param title: The title extracted from front-matter.
|
|
94
|
+
:param tags: A list of tags (content labels) extracted from front-matter.
|
|
95
|
+
:param properties: A dictionary of key-value pairs extracted from front-matter to apply as page properties.
|
|
70
96
|
:param text: Text that remains after front-matter and inline properties have been extracted.
|
|
71
97
|
"""
|
|
72
98
|
|
|
@@ -74,6 +100,8 @@ class ScannedDocument:
|
|
|
74
100
|
space_key: Optional[str]
|
|
75
101
|
generated_by: Optional[str]
|
|
76
102
|
title: Optional[str]
|
|
103
|
+
tags: Optional[list[str]]
|
|
104
|
+
properties: Optional[dict[str, JsonType]]
|
|
77
105
|
text: str
|
|
78
106
|
|
|
79
107
|
|
|
@@ -88,30 +116,35 @@ class Scanner:
|
|
|
88
116
|
text = f.read()
|
|
89
117
|
|
|
90
118
|
# extract Confluence page ID
|
|
91
|
-
page_id, text = extract_value(r"<!--\s+confluence-page-id:\s*(\d+)\s+-->", text)
|
|
119
|
+
page_id, text = extract_value(r"<!--\s+confluence[-_]page[-_]id:\s*(\d+)\s+-->", text)
|
|
92
120
|
|
|
93
121
|
# extract Confluence space key
|
|
94
|
-
space_key, text = extract_value(
|
|
95
|
-
r"<!--\s+confluence-space-key:\s*(\S+)\s+-->", text
|
|
96
|
-
)
|
|
122
|
+
space_key, text = extract_value(r"<!--\s+confluence[-_]space[-_]key:\s*(\S+)\s+-->", text)
|
|
97
123
|
|
|
98
124
|
# extract 'generated-by' tag text
|
|
99
|
-
generated_by, text = extract_value(r"<!--\s+generated-by:\s*(.*)\s+-->", text)
|
|
125
|
+
generated_by, text = extract_value(r"<!--\s+generated[-_]by:\s*(.*)\s+-->", text)
|
|
100
126
|
|
|
101
127
|
title: Optional[str] = None
|
|
128
|
+
tags: Optional[list[str]] = None
|
|
129
|
+
properties: Optional[dict[str, JsonType]] = None
|
|
102
130
|
|
|
103
131
|
# extract front-matter
|
|
104
|
-
|
|
105
|
-
if
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
132
|
+
data, text = extract_frontmatter_properties(text)
|
|
133
|
+
if data is not None:
|
|
134
|
+
p = _json_to_object(DocumentProperties, data)
|
|
135
|
+
page_id = page_id or p.confluence_page_id or p.page_id
|
|
136
|
+
space_key = space_key or p.confluence_space_key or p.space_key
|
|
137
|
+
generated_by = generated_by or p.generated_by
|
|
138
|
+
title = p.title
|
|
139
|
+
tags = p.tags
|
|
140
|
+
properties = p.properties
|
|
110
141
|
|
|
111
142
|
return ScannedDocument(
|
|
112
143
|
page_id=page_id,
|
|
113
144
|
space_key=space_key,
|
|
114
145
|
generated_by=generated_by,
|
|
115
146
|
title=title,
|
|
147
|
+
tags=tags,
|
|
148
|
+
properties=properties,
|
|
116
149
|
text=text,
|
|
117
150
|
)
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
markdown_to_confluence-0.3.5.dist-info/licenses/LICENSE,sha256=Pv43so2bPfmKhmsrmXFyAvS7M30-1i1tzjz6-dfhyOo,1077
|
|
2
|
-
md2conf/__init__.py,sha256=Uaqb3maQScpYs3FiH8kuM6pUh5JzE4Vy52MgU9pvMTw,402
|
|
3
|
-
md2conf/__main__.py,sha256=bFcfmSnTWeuhmDm7bJ3jJabZ2S8W9biuAP6_R-Cc9As,8034
|
|
4
|
-
md2conf/api.py,sha256=VxrAJ4yCsdGFVAEQQWw5aONwsMz0b6KvN4EMLXCKOwE,26905
|
|
5
|
-
md2conf/application.py,sha256=SIM4yLHaLnvG7wRJLbRvptrkc0q4JMuAhDnanqsuYzA,6697
|
|
6
|
-
md2conf/converter.py,sha256=ASXhs7g79dOU4x1QhfvKL8mtwth508GTGcb3AUHigC4,37286
|
|
7
|
-
md2conf/emoji.py,sha256=48QJtOD0F3Be1laYLvAOwe0GxrJS-vcfjtCdiBsNcAc,1960
|
|
8
|
-
md2conf/entities.dtd,sha256=M6NzqL5N7dPs_eUA_6sDsiSLzDaAacrx9LdttiufvYU,30215
|
|
9
|
-
md2conf/local.py,sha256=998bBRpDAOywA-L0KD4_VyuL2Xftflv0ler-uNPQZn4,3866
|
|
10
|
-
md2conf/matcher.py,sha256=y5WEZNklTpUoJtMJlulTvfhl_v-UMU6wySJAKit91ig,4940
|
|
11
|
-
md2conf/mermaid.py,sha256=ZETocFDKi_fSYyVR1pJ7fo207YYFSuT44MSYFQ8-cZ0,2562
|
|
12
|
-
md2conf/metadata.py,sha256=Xozg2PjJnis7VQYQT_edIvTb8u0cs_ZizPOAxc1N8vg,1003
|
|
13
|
-
md2conf/processor.py,sha256=jSLFy8hqZJXf3b79jp31Fn9-cm4j9xq4HDChp9pyhP0,6706
|
|
14
|
-
md2conf/properties.py,sha256=TOCXLdTfYkKjRwZaMgvXw0mNCI4opEUwpBXro2Kv2B4,2467
|
|
15
|
-
md2conf/puppeteer-config.json,sha256=-dMTAN_7kNTGbDlfXzApl0KJpAWna9YKZdwMKbpOb60,159
|
|
16
|
-
md2conf/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
|
-
md2conf/scanner.py,sha256=iF8NCQAFO6Yut5aAQr7uxfWzVMMt9j3T5ADoVVSJWKQ,3543
|
|
18
|
-
markdown_to_confluence-0.3.5.dist-info/METADATA,sha256=NiXwBXtQ5WhHce_JX7TBUSefQSR5jk5fERe46BL4vwE,18462
|
|
19
|
-
markdown_to_confluence-0.3.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
20
|
-
markdown_to_confluence-0.3.5.dist-info/entry_points.txt,sha256=F1zxa1wtEObtbHS-qp46330WVFLHdMnV2wQ-ZorRmX0,50
|
|
21
|
-
markdown_to_confluence-0.3.5.dist-info/top_level.txt,sha256=_FJfl_kHrHNidyjUOuS01ngu_jDsfc-ZjSocNRJnTzU,8
|
|
22
|
-
markdown_to_confluence-0.3.5.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
23
|
-
markdown_to_confluence-0.3.5.dist-info/RECORD,,
|
|
File without changes
|
{markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{markdown_to_confluence-0.3.5.dist-info → markdown_to_confluence-0.4.1.dist-info}/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|