ssb-pubmd 0.1.0__tar.gz → 0.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: ssb-pubmd
3
- Version: 0.1.0
3
+ Version: 0.1.2
4
4
  Summary: SSB Pubmd
5
5
  License: MIT
6
6
  Author: Olav Landsverk
@@ -13,12 +13,15 @@ Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
14
  Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
- Requires-Dist: cryptography (>=45.0.5,<46.0.0)
17
- Requires-Dist: google-cloud-secret-manager (>=2.24.0,<3.0.0)
16
+ Requires-Dist: dapla-auth-client (>=1.2.5,<2.0.0)
17
+ Requires-Dist: ipynbname (>=2025.8.0.0,<2026.0.0.0)
18
+ Requires-Dist: narwhals (>=2.15.0,<3.0.0)
18
19
  Requires-Dist: nbformat (>=5.10.4,<6.0.0)
19
- Requires-Dist: pyjwt (>=2.10.1,<3.0.0)
20
+ Requires-Dist: nh3 (>=0.3.2,<0.4.0)
21
+ Requires-Dist: pandocfilters (>=1.5.1,<2.0.0)
22
+ Requires-Dist: pydantic (>=2.12.5,<3.0.0)
20
23
  Requires-Dist: requests (>=2.32.4,<3.0.0)
21
- Requires-Dist: types-requests (>=2.32.4.20250611,<3.0.0.0)
24
+ Requires-Dist: watchfiles (>=1.1.1,<2.0.0)
22
25
  Project-URL: Changelog, https://github.com/statisticsnorway/ssb-pubmd/releases
23
26
  Project-URL: Documentation, https://statisticsnorway.github.io/ssb-pubmd
24
27
  Project-URL: Homepage, https://github.com/statisticsnorway/ssb-pubmd
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "ssb-pubmd"
3
- version = "0.1.0"
3
+ version = "0.1.2"
4
4
  description = "SSB Pubmd"
5
5
  authors = ["Olav Landsverk <stud-oll@ssb.no>"]
6
6
  license = "MIT"
@@ -9,6 +9,8 @@ homepage = "https://github.com/statisticsnorway/ssb-pubmd"
9
9
  repository = "https://github.com/statisticsnorway/ssb-pubmd"
10
10
  documentation = "https://statisticsnorway.github.io/ssb-pubmd"
11
11
  classifiers = ["Development Status :: 3 - Alpha"]
12
+ include = ["ssb_pubmd/templates"]
13
+
12
14
 
13
15
  [tool.poetry.urls]
14
16
  Changelog = "https://github.com/statisticsnorway/ssb-pubmd/releases"
@@ -16,11 +18,14 @@ Changelog = "https://github.com/statisticsnorway/ssb-pubmd/releases"
16
18
  [tool.poetry.dependencies]
17
19
  python = "^3.10"
18
20
  requests = "^2.32.4"
19
- types-requests = "^2.32.4.20250611"
20
- google-cloud-secret-manager = "^2.24.0"
21
- pyjwt = "^2.10.1"
22
- cryptography = "^45.0.5"
23
21
  nbformat = "^5.10.4"
22
+ nh3 = "^0.3.2"
23
+ pandocfilters = "^1.5.1"
24
+ ipynbname = "^2025.8.0.0"
25
+ pydantic = "^2.12.5"
26
+ narwhals = "^2.15.0"
27
+ watchfiles = "^1.1.1"
28
+ dapla-auth-client = "^1.2.5"
24
29
 
25
30
  [tool.poetry.group.dev.dependencies]
26
31
  pygments = ">=2.10.0"
@@ -38,6 +43,12 @@ sphinx-click = ">=3.0.2"
38
43
  typeguard = ">=2.13.3"
39
44
  xdoctest = { extras = ["colors"], version = ">=0.15.10" }
40
45
  myst-parser = { version = ">=0.16.1" }
46
+ black = "^25.1.0"
47
+ darglint = "^1.8.1"
48
+ types-requests = "^2.32.4.20260107"
49
+ pandas = "^2.3.3"
50
+ types-pyyaml = "^6.0.12.20250915"
51
+ quarto = "^0.1.0"
41
52
 
42
53
  [tool.pytest.ini_options]
43
54
  pythonpath = ["ssb_pubmd"]
@@ -67,10 +78,10 @@ show_error_context = true
67
78
  explicit_package_bases = true
68
79
 
69
80
  [tool.ruff]
70
- force-exclude = true # Apply excludes to pre-commit
81
+ force-exclude = true # Apply excludes to pre-commit
71
82
  show-fixes = true
72
83
  src = ["src", "tests"]
73
- target-version = "py311" # Minimum Python version supported
84
+ target-version = "py311" # Minimum Python version supported
74
85
  include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"]
75
86
  extend-exclude = [
76
87
  "__pycache__",
@@ -83,57 +94,63 @@ extend-exclude = [
83
94
  # Ruff rules may be customized as desired: https://docs.astral.sh/ruff/rules/
84
95
  [tool.ruff.lint]
85
96
  select = [
86
- "A", # prevent using keywords that clobber python builtins
87
- "ANN", # check type annotations
88
- "B", # bugbear: security warnings
89
- "E", # pycodestyle
90
- "F", # pyflakes
91
- "ISC", # implicit string concatenation
92
- "I", # sort imports
93
- "UP", # alert you when better syntax is available in your python version
94
- "RUF", # the ruff developer's own rules
97
+ "A", # prevent using keywords that clobber python builtins
98
+ "ANN", # check type annotations
99
+ "B", # bugbear: security warnings
100
+ "E", # pycodestyle
101
+ "F", # pyflakes
102
+ "ISC", # implicit string concatenation
103
+ "I", # sort imports
104
+ "UP", # alert you when better syntax is available in your python version
105
+ "RUF", # the ruff developer's own rules
95
106
  ]
96
107
  ignore = [
97
- "ANN202", # Don't requiere return type annotation for private functions.
98
- "ANN401", # Allow type annotation with type Any.
99
- "D100", # Supress undocumented-public-module. Only doc of public api required.
100
- "FBT001", # Allow boolean positional arguments in a function.
101
- "FBT002", # Allow boolean default positional arguments in a function.
102
- "E402", # Supress module-import-not-at-top-of-file, needed in jupyter notebooks.
103
- "E501", # Supress line-too-long warnings: trust black's judgement on this one.
108
+ "ANN202", # Don't requiere return type annotation for private functions.
109
+ "ANN401", # Allow type annotation with type Any.
110
+ "D100", # Supress undocumented-public-module. Only doc of public api required.
111
+ "FBT001", # Allow boolean positional arguments in a function.
112
+ "FBT002", # Allow boolean default positional arguments in a function.
113
+ "E402", # Supress module-import-not-at-top-of-file, needed in jupyter notebooks.
114
+ "E501", # Supress line-too-long warnings: trust black's judgement on this one.
104
115
  "PLR2004", # Allow to compare with unnamed numerical constants.
105
116
  ]
106
117
 
107
118
  [tool.ruff.lint.isort]
108
119
  force-single-line = true
120
+ known-first-party = ["ssb_pubmd"]
109
121
 
110
122
  [tool.ruff.lint.mccabe]
111
123
  max-complexity = 15
112
124
 
113
125
  [tool.ruff.lint.pydocstyle]
114
- convention = "google" # You can also use "numpy".
126
+ convention = "google" # You can also use "numpy".
115
127
 
116
128
  [tool.ruff.lint.pylint]
117
129
  max-args = 8
118
130
 
119
131
  [tool.ruff.lint.pep8-naming]
120
- classmethod-decorators = ["classmethod", "validator", "root_validator", "pydantic.validator"]
132
+ classmethod-decorators = [
133
+ "classmethod",
134
+ "validator",
135
+ "root_validator",
136
+ "pydantic.validator",
137
+ ]
121
138
 
122
139
  [tool.ruff.lint.per-file-ignores]
123
140
  "*/__init__.py" = ["F401"]
124
141
  "**/tests/*" = [
125
- "ANN001", # type annotations don't add value for test functions
126
- "ANN002", # type annotations don't add value for test functions
127
- "ANN003", # type annotations don't add value for test functions
128
- "ANN201", # type annotations don't add value for test functions
129
- "ANN204", # type annotations don't add value for test functions
130
- "ANN205", # type annotations don't add value for test functions
131
- "ANN206", # type annotations don't add value for test functions
132
- "D100", # docstrings are overkill for test functions
142
+ "ANN001", # type annotations don't add value for test functions
143
+ "ANN002", # type annotations don't add value for test functions
144
+ "ANN003", # type annotations don't add value for test functions
145
+ "ANN201", # type annotations don't add value for test functions
146
+ "ANN204", # type annotations don't add value for test functions
147
+ "ANN205", # type annotations don't add value for test functions
148
+ "ANN206", # type annotations don't add value for test functions
149
+ "D100", # docstrings are overkill for test functions
133
150
  "D101",
134
151
  "D102",
135
152
  "D103",
136
- "S101", # asserts are encouraged in pytest
153
+ "S101", # asserts are encouraged in pytest
137
154
  ]
138
155
 
139
156
  [build-system]
@@ -0,0 +1,4 @@
1
+ from ssb_pubmd.notebook_client import configure_factbox as Factbox
2
+ from ssb_pubmd.notebook_client import create_highchart as Highchart
3
+
4
+ __all__ = ["Factbox", "Highchart"]
@@ -0,0 +1,13 @@
1
+ import sys
2
+
3
+ from ssb_pubmd.cli import run_cli
4
+ from ssb_pubmd.config import get_config
5
+
6
+
7
+ def main() -> None:
8
+ config = get_config()
9
+ run_cli(sys.argv, config)
10
+
11
+
12
+ if __name__ == "__main__":
13
+ main()
@@ -0,0 +1,185 @@
1
+ from collections.abc import Mapping
2
+ from dataclasses import asdict
3
+ from dataclasses import dataclass
4
+ from typing import Any
5
+ from typing import Literal
6
+ from typing import Protocol
7
+
8
+ import nh3
9
+
10
+
11
+ @dataclass
12
+ class Content:
13
+ title: str
14
+ content_type: str
15
+ publish_folder: str | None = None
16
+ publish_id: str | None = None
17
+
18
+ def to_dict(self) -> dict[str, Any]:
19
+ return asdict(self)
20
+
21
+ def serialize(self) -> dict[str, Any]:
22
+ raise NotImplementedError()
23
+
24
+ class ContentParser(Protocol):
25
+ def parse(self, metadata: Mapping[str, Any], html: str | None) -> Content: ...
26
+
27
+
28
+ @dataclass
29
+ class MimirContent(Content):
30
+ def is_publishable(self) -> bool:
31
+ if self.title == "":
32
+ return False
33
+ if self.publish_id is None and self.publish_folder is None:
34
+ return False
35
+ return True
36
+
37
+ def serialize(self) -> dict[str, Any]:
38
+ if not self.is_publishable():
39
+ raise Exception()
40
+ s: dict[str, Any] = {
41
+ "contentType": "mimir:" + self.content_type,
42
+ "displayName": self.title,
43
+ "parentPath": self.publish_folder,
44
+ "data": {},
45
+ }
46
+ if self.publish_id is not None:
47
+ s["_id"] = self.publish_id
48
+ return s
49
+
50
+
51
+ @dataclass
52
+ class Author:
53
+ name: str
54
+ email: str
55
+
56
+ @dataclass
57
+ class Article(MimirContent):
58
+ content_type: str = "article"
59
+ authors: list[Author] | None = None
60
+ ingress: str = ""
61
+ html_text: str = ""
62
+
63
+ def serialize(self) -> dict[str, Any]:
64
+ s = super().serialize()
65
+ if self.authors:
66
+ s["data"]["authorItemSet"] = [asdict(author) for author in self.authors]
67
+ s["data"]["ingress"] = self.ingress
68
+ s["data"]["articleText"] = self.html_text
69
+ return s
70
+
71
+
72
+ GraphType = Literal["line", "pie", "column", "bar", "area", "barNegative"]
73
+
74
+
75
+ @dataclass
76
+ class Highchart(MimirContent):
77
+ content_type: str = "highchart"
78
+ graph_type: GraphType = "line"
79
+ html_table: str | None = None
80
+ tbml: str | None = None
81
+ xlabel: str = "x"
82
+ ylabel: str = "y"
83
+
84
+ def serialize(self) -> dict[str, Any]:
85
+ s = super().serialize()
86
+
87
+ if self.html_table is not None:
88
+ s["data"]["htmlTable"] = self.html_table
89
+ elif self.tbml is not None:
90
+ s["data"]["dataSource"] = {
91
+ "_selected": "tbprocessor",
92
+ "tbprocessor": {"urlOrId": self.tbml},
93
+ }
94
+
95
+ s["data"]["xAxisTitle"] = self.xlabel
96
+ s["data"]["yAxisTitle"] = self.ylabel
97
+
98
+ return s
99
+
100
+
101
+ @dataclass
102
+ class FactBox(MimirContent):
103
+ content_type: str = "factBox"
104
+ display_type: Literal["default", "sneakPeek", "aiIcon"] = "default"
105
+ html_text: str = ""
106
+
107
+ def serialize(self) -> dict[str, Any]:
108
+ s = super().serialize()
109
+ s["data"]["expansionBoxType"] = self.display_type
110
+ s["data"]["text"] = self.html_text
111
+ return s
112
+
113
+
114
+ BASIC_HTML_TAGS = {
115
+ "p",
116
+ "br",
117
+ "strong",
118
+ "em",
119
+ "b",
120
+ "i",
121
+ "ul",
122
+ "ol",
123
+ "li",
124
+ "blockquote",
125
+ "h1",
126
+ "h2",
127
+ "h3",
128
+ "h4",
129
+ "h5",
130
+ "a",
131
+ }
132
+
133
+
134
+ class MimirContentParser:
135
+ def parse(self, metadata: Mapping[str, Any], html: str | None) -> Content:
136
+ match metadata.get("content_type"):
137
+ case "article":
138
+ return self._parse_article(metadata, html)
139
+ case "factBox":
140
+ return self._parse_factbox(metadata, html)
141
+ case "highchart":
142
+ return self._parse_highchart(metadata, html)
143
+ case _:
144
+ return MimirContent(**metadata)
145
+
146
+ def serialize(self, content: Content) -> dict[str, Any]:
147
+ if isinstance(content, MimirContent):
148
+ return content.serialize()
149
+ else:
150
+ raise Exception()
151
+
152
+ @classmethod
153
+ def _parse_article(cls, metadata: Mapping[str, Any], html: str | None) -> Article:
154
+ article = Article(
155
+ title=metadata["title"],
156
+ publish_folder="/ssb" + metadata["path"],
157
+ publish_id=metadata.get("publish_id"),
158
+ authors=[Author(**data) for data in metadata.get("authors", [])],
159
+ ingress=metadata.get("ingress", ""),
160
+ )
161
+ if html is not None:
162
+ allowed_html_tags = BASIC_HTML_TAGS
163
+ html_text = nh3.clean(html, tags=allowed_html_tags)
164
+ article.html_text = html_text
165
+ return article
166
+
167
+ @classmethod
168
+ def _parse_factbox(cls, metadata: Mapping[str, Any], html: str | None) -> FactBox:
169
+ factbox = FactBox(**metadata)
170
+ if html is not None:
171
+ allowed_html_tags = BASIC_HTML_TAGS - {"h2"}
172
+ html_text = nh3.clean(html, tags=allowed_html_tags)
173
+ factbox.html_text = html_text
174
+ return factbox
175
+
176
+ @classmethod
177
+ def _parse_highchart(
178
+ cls, metadata: Mapping[str, Any], html: str | None
179
+ ) -> Highchart:
180
+ highchart = Highchart(**metadata)
181
+ if html is not None:
182
+ allowed_html_tags = {"table", "tbody", "tr", "td"}
183
+ html_table = nh3.clean(html, tags=allowed_html_tags)
184
+ highchart.html_table = html_table
185
+ return highchart
@@ -0,0 +1,149 @@
1
+
2
+ import json
3
+ import subprocess
4
+ from collections.abc import Iterator
5
+ from typing import Any
6
+ from typing import NamedTuple
7
+ from typing import Protocol
8
+ from typing import TypedDict
9
+
10
+ import pandocfilters as pf # type: ignore
11
+
12
+
13
+ class Element(NamedTuple):
14
+ id: str
15
+ inner_html: str | None
16
+
17
+
18
+ class DocumentProcessor(Protocol):
19
+ def load(self, raw_content: str) -> None: ...
20
+ def extract_metadata(self, target_key: str) -> dict[str, Any]: ...
21
+ def extract_elements(self, target_class: str) -> Iterator[Element]: ...
22
+ def replace_element(self, id_: str, new_html: str) -> None: ...
23
+ def extract_html(self) -> str: ...
24
+
25
+
26
+
27
+ class PandocElement(TypedDict):
28
+ t: str
29
+ c: Any
30
+
31
+
32
+ PandocDocument = TypedDict(
33
+ "PandocDocument",
34
+ {
35
+ "pandoc-api-version": list[int],
36
+ "meta": dict[str, Any],
37
+ "blocks": list[PandocElement],
38
+ },
39
+ )
40
+
41
+
42
+ class PandocDocumentProcessor:
43
+ """
44
+ Processor for a pandoc document, i.e. the JSON-serialized pandoc AST of a document.
45
+
46
+ Example pandoc AST with exactly one div:
47
+
48
+ ```json
49
+ {
50
+ "pandoc-api-version": [1, 23, 1],
51
+ "meta": {},
52
+ "blocks": [
53
+ {
54
+ "t": "Div",
55
+ "c": [
56
+ ["my-highchart", ["ssb"], [["title", "My highchart"]]],
57
+ []
58
+ ]
59
+ }
60
+ ]
61
+ }
62
+ ```
63
+ Html equivalent:
64
+ ```html
65
+ <div id="my-highchart" class="ssb" title="My highchart">
66
+ </div>
67
+ ```
68
+ References:
69
+ - Studying the result of command `pandoc FILE -t json`, where FILE is a minimal example document (e.g. Markdown or html).
70
+ - https://github.com/jgm/pandocfilters has some examples of how to work with the format.
71
+ - Note: no formal specification exists.
72
+ """
73
+
74
+ document: PandocDocument
75
+ _element_index: dict[str, int]
76
+
77
+ def load(self, raw_content: str) -> None:
78
+ self.document: PandocDocument = json.loads(raw_content)
79
+ self._element_index = {}
80
+
81
+ def extract_metadata(self, target_key: str) -> dict[str, Any]:
82
+ def meta_to_dict(meta: Any) -> Any:
83
+ t, c = meta.get("t"), meta.get("c")
84
+ if t == "MetaMap":
85
+ return {k: meta_to_dict(v) for k, v in c.items()}
86
+ elif t == "MetaList":
87
+ return [meta_to_dict(v) for v in c]
88
+ else:
89
+ return pf.stringify(c)
90
+
91
+ return meta_to_dict(self.document["meta"][target_key]) # type: ignore
92
+
93
+ def extract_html(self) -> str:
94
+ return self._document_to_html(self.document)
95
+
96
+ def extract_elements(self, target_class: str) -> Iterator[Element]:
97
+ self._element_index = self._generate_element_index(target_class)
98
+
99
+ for id_, i in self._element_index.items():
100
+ element = self.document["blocks"][i]
101
+ inner_blocks: list[PandocElement] = element["c"][1]
102
+ inner_html = self._blocks_to_html(inner_blocks) if inner_blocks else None
103
+ yield Element(id_, inner_html)
104
+
105
+ def replace_element(self, id_: str, new_html: str) -> None:
106
+ i = self._element_index[id_]
107
+ self.document["blocks"][i] = {
108
+ "t": "RawBlock",
109
+ "c": ["html", new_html],
110
+ }
111
+
112
+ def _generate_element_index(self, target_class: str) -> dict[str, int]:
113
+ index = {}
114
+ for i, element in enumerate(self.document["blocks"]):
115
+ if element["t"] != "Div":
116
+ continue
117
+
118
+ id_: str = element["c"][0][0]
119
+ if not id_:
120
+ continue
121
+
122
+ classes: list[str] = element["c"][0][1]
123
+ if target_class not in classes:
124
+ continue
125
+
126
+ index[id_] = i
127
+
128
+ return index
129
+
130
+ @classmethod
131
+ def _blocks_to_html(cls, blocks: list[PandocElement]) -> str:
132
+ document: PandocDocument = {
133
+ "pandoc-api-version": [1, 23, 1],
134
+ "meta": {},
135
+ "blocks": blocks,
136
+ }
137
+ return cls._document_to_html(document)
138
+
139
+ @classmethod
140
+ def _document_to_html(cls, document: PandocDocument) -> str:
141
+ result = subprocess.run(
142
+ ["pandoc", "-f", "json", "-t", "html"],
143
+ input=json.dumps(document),
144
+ text=True,
145
+ capture_output=True,
146
+ check=True,
147
+ )
148
+ html = result.stdout
149
+ return html
@@ -0,0 +1,117 @@
1
+ import os
2
+ from dataclasses import dataclass
3
+ from typing import Any
4
+ from typing import NamedTuple
5
+ from typing import Protocol
6
+
7
+ import requests
8
+ from dapla_auth_client import AuthClient
9
+
10
+ from ssb_pubmd.adapters.content_parser import Content
11
+ from ssb_pubmd.config import Config
12
+
13
+
14
+ class PublishClientError(Exception): ...
15
+
16
+ class HttpClient(Protocol):
17
+ def post(
18
+ self, url: str, headers: dict[str, str], payload: dict[str, Any]
19
+ ) -> dict[str, str]: ...
20
+
21
+ class RequestsHttpClient:
22
+ def post(
23
+ self, url: str, headers: dict[str, str], payload: dict[str, Any]
24
+ ) -> dict[str, str]:
25
+ response = requests.post(
26
+ url,
27
+ headers=headers,
28
+ json=payload,
29
+ )
30
+ body = response.json()
31
+ if not response.ok:
32
+ raise PublishClientError(
33
+ f"Sync failed. Response message: {body.get('msg', 'no message')}"
34
+ )
35
+ return body # type: ignore
36
+
37
+ class TokenClient(Protocol):
38
+ def get_token(self) -> str: ...
39
+
40
+
41
+ class LocalTokenClient:
42
+ def get_token(self) -> str:
43
+ return os.environ.get("OIDC_TOKEN", "")
44
+
45
+
46
+ class DaplaTokenClient:
47
+ def get_token(self) -> str:
48
+ return AuthClient.fetch_personal_token(audiences=["ssbno"])
49
+
50
+ class Response(NamedTuple):
51
+ publish_path: str
52
+ publish_id: str
53
+ publish_url: str
54
+ publish_html: str
55
+
56
+ class PublishClient(Protocol):
57
+ http_client: HttpClient
58
+
59
+ def send_content(self, content: Content) -> Response: ...
60
+
61
+ DEFAULT_HTTP_CLIENT = RequestsHttpClient()
62
+ DEFULT_TOKEN_CLIENT = LocalTokenClient()
63
+ @dataclass
64
+ class MimirPublishClient:
65
+ base_url: str
66
+ endpoint: str
67
+ preview_base_path: str
68
+ http_client: HttpClient = DEFAULT_HTTP_CLIENT
69
+ token_client: TokenClient = DEFULT_TOKEN_CLIENT
70
+
71
+ def _create_headers(self) -> dict[str, str]:
72
+ return {
73
+ "Authorization": f"Bearer {self.token_client.get_token()}",
74
+ "Content-Type": "application/json",
75
+ }
76
+
77
+ def send_content(self, content: Content) -> Response:
78
+ headers = self._create_headers()
79
+ response_body = self.http_client.post(
80
+ url=f"{self.base_url}{self.endpoint}",
81
+ headers=headers,
82
+ payload=content.serialize(),
83
+ )
84
+
85
+ id_ = response_body.get("_id")
86
+ path = response_body.get("_path")
87
+
88
+ if path is None or id_ is None:
89
+ raise PublishClientError("Sync failed. Could not parse response body.")
90
+
91
+ macro_type = (
92
+ content.content_type
93
+ if content.content_type in ["highchart", "factBox"]
94
+ else None
95
+ )
96
+ if id_ is not None and macro_type is not None:
97
+ html = f"<p>[ {macro_type} {content.content_type}=&quot;{id_}&quot; /]</p>"
98
+ else:
99
+ html = ""
100
+
101
+ return Response(
102
+ publish_path=path,
103
+ publish_id=id_,
104
+ publish_url=self.base_url + self.preview_base_path + path,
105
+ publish_html=html,
106
+ )
107
+
108
+
109
+ def get_publish_client(config: Config) -> PublishClient:
110
+ return MimirPublishClient(
111
+ base_url=config.publish_base_url,
112
+ endpoint="/webapp/pubmd",
113
+ preview_base_path=config.publish_admin_path + "/site/preview/default/draft",
114
+ token_client=DaplaTokenClient()
115
+ if config.use_dapla_token_client
116
+ else DEFULT_TOKEN_CLIENT,
117
+ )
@@ -0,0 +1,42 @@
1
+ import json
2
+ from collections.abc import Mapping
3
+ from pathlib import Path
4
+ from typing import Any
5
+ from typing import Protocol
6
+
7
+
8
+ class Storage(Protocol):
9
+ def update(self, key: str, data: Mapping[str, Any]) -> None: ...
10
+ def get(self, key: str) -> dict[str, Any]: ...
11
+
12
+ class LocalFileStorage:
13
+ path: Path
14
+
15
+ def __init__(self, project_folder: Path) -> None:
16
+ self.path = project_folder / ".ssbno.json"
17
+ if not self.path.exists():
18
+ with self.path.open("w") as f:
19
+ json.dump({}, f)
20
+
21
+ def _load(self) -> dict[str, dict[str, Any]]:
22
+ with self.path.open() as f:
23
+ return json.load(f) # type: ignore
24
+
25
+ def _save(self, data: dict[str, dict[str, Any]]) -> None:
26
+ with self.path.open("w") as f:
27
+ json.dump(data, f, indent=2)
28
+
29
+ def update(self, key: str, data: Mapping[str, Any]) -> None:
30
+ store = self._load()
31
+
32
+ current = store.get(key, {})
33
+ for field, value in data.items():
34
+ if value is not None:
35
+ current[field] = value
36
+
37
+ store[key] = current
38
+ self._save(store)
39
+
40
+ def get(self, key: str) -> dict[str, Any]:
41
+ store = self._load()
42
+ return store.get(key, {}).copy()