ebk 0.1.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ebk might be problematic. Click here for more details.

Files changed (84) hide show
  1. ebk/__init__.py +35 -0
  2. ebk/ai/__init__.py +23 -0
  3. ebk/ai/knowledge_graph.py +443 -0
  4. ebk/ai/llm_providers/__init__.py +21 -0
  5. ebk/ai/llm_providers/base.py +230 -0
  6. ebk/ai/llm_providers/ollama.py +362 -0
  7. ebk/ai/metadata_enrichment.py +396 -0
  8. ebk/ai/question_generator.py +328 -0
  9. ebk/ai/reading_companion.py +224 -0
  10. ebk/ai/semantic_search.py +434 -0
  11. ebk/ai/text_extractor.py +394 -0
  12. ebk/cli.py +2828 -680
  13. ebk/config.py +260 -22
  14. ebk/db/__init__.py +37 -0
  15. ebk/db/migrations.py +180 -0
  16. ebk/db/models.py +526 -0
  17. ebk/db/session.py +144 -0
  18. ebk/decorators.py +132 -0
  19. ebk/exports/base_exporter.py +218 -0
  20. ebk/exports/html_library.py +1390 -0
  21. ebk/exports/html_utils.py +117 -0
  22. ebk/exports/hugo.py +7 -3
  23. ebk/exports/jinja_export.py +287 -0
  24. ebk/exports/multi_facet_export.py +164 -0
  25. ebk/exports/symlink_dag.py +479 -0
  26. ebk/extract_metadata.py +76 -7
  27. ebk/library_db.py +899 -0
  28. ebk/plugins/__init__.py +42 -0
  29. ebk/plugins/base.py +502 -0
  30. ebk/plugins/hooks.py +444 -0
  31. ebk/plugins/registry.py +500 -0
  32. ebk/repl/__init__.py +9 -0
  33. ebk/repl/find.py +126 -0
  34. ebk/repl/grep.py +174 -0
  35. ebk/repl/shell.py +1677 -0
  36. ebk/repl/text_utils.py +320 -0
  37. ebk/search_parser.py +413 -0
  38. ebk/server.py +1633 -0
  39. ebk/services/__init__.py +11 -0
  40. ebk/services/import_service.py +442 -0
  41. ebk/services/tag_service.py +282 -0
  42. ebk/services/text_extraction.py +317 -0
  43. ebk/similarity/__init__.py +77 -0
  44. ebk/similarity/base.py +154 -0
  45. ebk/similarity/core.py +445 -0
  46. ebk/similarity/extractors.py +168 -0
  47. ebk/similarity/metrics.py +376 -0
  48. ebk/vfs/__init__.py +101 -0
  49. ebk/vfs/base.py +301 -0
  50. ebk/vfs/library_vfs.py +124 -0
  51. ebk/vfs/nodes/__init__.py +54 -0
  52. ebk/vfs/nodes/authors.py +196 -0
  53. ebk/vfs/nodes/books.py +480 -0
  54. ebk/vfs/nodes/files.py +155 -0
  55. ebk/vfs/nodes/metadata.py +385 -0
  56. ebk/vfs/nodes/root.py +100 -0
  57. ebk/vfs/nodes/similar.py +165 -0
  58. ebk/vfs/nodes/subjects.py +184 -0
  59. ebk/vfs/nodes/tags.py +371 -0
  60. ebk/vfs/resolver.py +228 -0
  61. ebk-0.3.2.dist-info/METADATA +755 -0
  62. ebk-0.3.2.dist-info/RECORD +69 -0
  63. {ebk-0.1.0.dist-info → ebk-0.3.2.dist-info}/WHEEL +1 -1
  64. ebk-0.3.2.dist-info/licenses/LICENSE +21 -0
  65. ebk/imports/__init__.py +0 -0
  66. ebk/imports/calibre.py +0 -144
  67. ebk/imports/ebooks.py +0 -116
  68. ebk/llm.py +0 -58
  69. ebk/manager.py +0 -44
  70. ebk/merge.py +0 -308
  71. ebk/streamlit/__init__.py +0 -0
  72. ebk/streamlit/__pycache__/__init__.cpython-310.pyc +0 -0
  73. ebk/streamlit/__pycache__/display.cpython-310.pyc +0 -0
  74. ebk/streamlit/__pycache__/filters.cpython-310.pyc +0 -0
  75. ebk/streamlit/__pycache__/utils.cpython-310.pyc +0 -0
  76. ebk/streamlit/app.py +0 -185
  77. ebk/streamlit/display.py +0 -168
  78. ebk/streamlit/filters.py +0 -151
  79. ebk/streamlit/utils.py +0 -58
  80. ebk/utils.py +0 -311
  81. ebk-0.1.0.dist-info/METADATA +0 -457
  82. ebk-0.1.0.dist-info/RECORD +0 -29
  83. {ebk-0.1.0.dist-info → ebk-0.3.2.dist-info}/entry_points.txt +0 -0
  84. {ebk-0.1.0.dist-info → ebk-0.3.2.dist-info}/top_level.txt +0 -0
ebk/streamlit/filters.py DELETED
@@ -1,151 +0,0 @@
1
- import pandas as pd
2
- import streamlit as st
3
- import logging
4
-
5
- logger = logging.getLogger(__name__)
6
-
7
- def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame:
8
- """
9
- Sanitizes the DataFrame by ensuring correct data types and handling missing values.
10
- """
11
- # List of columns that should contain lists
12
- list_columns = ['creators', 'subjects', 'file_paths']
13
-
14
- def ensure_list(column):
15
- """
16
- Ensures that each entry in the column is a list. If not, replaces it with an empty list.
17
- """
18
- return column.apply(lambda x: x if isinstance(x, list) else [])
19
-
20
- for col in list_columns:
21
- if col in df.columns:
22
- df[col] = ensure_list(df[col])
23
- logger.debug(f"Processed list column: {col}")
24
- else:
25
- df[col] = [[] for _ in range(len(df))]
26
- logger.debug(f"Created empty list column: {col}")
27
-
28
- # Handle 'identifiers' column
29
- if 'identifiers' in df.columns:
30
- df['identifiers'] = df['identifiers'].apply(lambda x: x if isinstance(x, dict) else {})
31
- logger.debug("Sanitized 'identifiers' column.")
32
- else:
33
- df['identifiers'] = [{} for _ in range(len(df))]
34
- logger.debug("Created empty 'identifiers' column.")
35
-
36
- # Sanitize 'language' column
37
- if 'language' in df.columns:
38
- df['language'] = df['language'].apply(lambda x: x if isinstance(x, str) else '').fillna('').astype(str)
39
- logger.debug("Sanitized 'language' column.")
40
- else:
41
- df['language'] = ['' for _ in range(len(df))]
42
- logger.debug("Created empty 'language' column.")
43
-
44
- # Sanitize 'cover_path' column
45
- if 'cover_path' in df.columns:
46
- df['cover_path'] = df['cover_path'].apply(lambda x: x if isinstance(x, str) else '').fillna('').astype(str)
47
- logger.debug("Sanitized 'cover_path' column.")
48
- else:
49
- df['cover_path'] = ['' for _ in range(len(df))]
50
- logger.debug("Created empty 'cover_path' column.")
51
-
52
- # Sanitize string fields: 'title', 'description'
53
- string_fields = ['title', 'description']
54
- for field in string_fields:
55
- if field in df.columns:
56
- df[field] = df[field].apply(lambda x: x if isinstance(x, str) else '').fillna('').astype(str)
57
- logger.debug(f"Sanitized '{field}' column.")
58
- else:
59
- df[field] = ['' for _ in range(len(df))]
60
- logger.debug(f"Created empty '{field}' column.")
61
-
62
- # Sanitize 'date' column
63
- if 'date' in df.columns:
64
- df['date'] = pd.to_numeric(df['date'], errors='coerce')
65
- logger.debug("Sanitized 'date' column to ensure numeric types.")
66
- else:
67
- df['date'] = [None for _ in range(len(df))]
68
- logger.debug("Created empty 'date' column.")
69
-
70
- return df
71
-
72
- def create_filters(df: pd.DataFrame) -> pd.DataFrame:
73
- """
74
- Creates and applies advanced filters to the DataFrame based on user inputs.
75
- Returns the filtered DataFrame.
76
- """
77
- # Sidebar for Filters
78
- st.sidebar.header("🔍 Filters")
79
-
80
- # Title Search
81
- title_search = st.sidebar.text_input("🔎 Search by Title")
82
-
83
- # Author Filter (Multi-select)
84
- all_creators = sorted(set(creator for creators in df['creators'] for creator in creators))
85
- selected_authors = st.sidebar.multiselect("👤 Filter by Author(s)", all_creators, default=[])
86
-
87
- # Subjects Filter (Multi-select)
88
- all_subjects = sorted(set(subject for subjects in df['subjects'] for subject in subjects))
89
- selected_subjects = st.sidebar.multiselect("📚 Filter by Subject(s)", all_subjects, default=[])
90
-
91
- # Search by Various Libraries
92
- all_libraries = sorted(set(lib for libs in df['virtual_libs'] for lib in libs))
93
- selected_libraries = st.sidebar.multiselect("📚 Filter by Virtual Library(s)", all_libraries, default=[])
94
-
95
- # Language Filter (Multi-select)
96
- all_languages = sorted(set(lang for lang in df['language'] if lang))
97
- selected_languages = st.sidebar.multiselect("🌐 Filter by Language(s)", all_languages, default=[])
98
-
99
- # Publication Date Filter (Range Slider)
100
- selected_years = None
101
- if 'date' in df.columns and pd.api.types.is_numeric_dtype(df['date']):
102
- min_year = int(df['date'].min()) if pd.notna(df['date'].min()) else 0
103
- max_year = int(df['date'].max()) if pd.notna(df['date'].max()) else 0
104
- if min_year and max_year:
105
- selected_years = st.sidebar.slider("📅 Publication Year Range", min_year, max_year, (min_year, max_year))
106
- logger.debug(f"Publication year range selected: {selected_years}")
107
- else:
108
- st.sidebar.info("📅 No valid publication year data available.")
109
- logger.warning("Publication year data is not available or entirely NaN.")
110
- else:
111
- st.sidebar.info("📅 Publication date data is not available or not in a numeric format.")
112
- logger.warning("Publication date data is not available or not numeric.")
113
-
114
- # Identifier Search
115
- identifier_search = st.sidebar.text_input("🔑 Search by Identifier (e.g., ISBN)")
116
-
117
- # Apply Filters
118
- filtered_df = df.copy()
119
-
120
- if title_search:
121
- filtered_df = filtered_df[filtered_df['title'].str.contains(title_search, case=False, na=False)]
122
- logger.debug(f"Applied title search filter: '{title_search}'")
123
-
124
- if selected_authors:
125
- filtered_df = filtered_df[filtered_df['creators'].apply(lambda x: any(creator in selected_authors for creator in x))]
126
- logger.debug(f"Applied author filter: {selected_authors}")
127
-
128
- if selected_subjects:
129
- filtered_df = filtered_df[filtered_df['subjects'].apply(lambda x: any(subject in selected_subjects for subject in x))]
130
- logger.debug(f"Applied subject filter: {selected_subjects}")
131
-
132
- if selected_libraries:
133
- filtered_df = filtered_df[filtered_df['virtual_libs'].apply(lambda x: any(lib in selected_libraries for lib in x))]
134
- logger.debug(f"Applied library filter: {selected_libraries}")
135
-
136
- if selected_languages:
137
- filtered_df = filtered_df[filtered_df['language'].isin(selected_languages)]
138
- logger.debug(f"Applied language filter: {selected_languages}")
139
-
140
- if selected_years:
141
- filtered_df = filtered_df[(filtered_df['date'] >= selected_years[0]) & (filtered_df['date'] <= selected_years[1])]
142
- logger.debug(f"Applied publication year range filter: {selected_years}")
143
-
144
- if identifier_search:
145
- idents = filtered_df['identifiers']
146
- idents_stringified = idents.apply(
147
- lambda x: ' '.join(f"{k}:{v}" for k, v in x.items()) if isinstance(x, dict) else str(x)
148
- )
149
- filtered_df = filtered_df[idents_stringified.str.contains(identifier_search)]
150
-
151
- return filtered_df
ebk/streamlit/utils.py DELETED
@@ -1,58 +0,0 @@
1
- import json
2
- import os
3
- import zipfile
4
- from io import BytesIO
5
- import streamlit as st
6
- import logging
7
- import streamlit as st
8
- from typing import List, Dict
9
- from collections import Counter
10
- from pathlib import Path
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
- def load_metadata(metadata_content: BytesIO) -> list:
15
- """
16
- Loads metadata from the uploaded JSON file.
17
- Returns a list of dictionaries.
18
- """
19
- try:
20
- data = json.load(metadata_content)
21
- logger.debug("Metadata loaded successfully.")
22
- return data
23
- except json.JSONDecodeError as e:
24
- st.error(f"JSON decoding error: {e}")
25
- logger.error(f"JSONDecodeError: {e}")
26
- return []
27
- except Exception as e:
28
- st.error(f"Unexpected error loading metadata.json: {e}")
29
- logger.error(f"Unexpected error: {e}")
30
- return []
31
-
32
- def extract_zip(zip_bytes: BytesIO) -> dict:
33
- """
34
- Extracts a ZIP file in-memory and returns a dictionary of its contents.
35
- Keys are file names, and values are BytesIO objects containing the file data.
36
- """
37
- extracted_files = {}
38
- try:
39
- with zipfile.ZipFile(zip_bytes) as z:
40
- for file_info in z.infolist():
41
- if not file_info.is_dir():
42
- with z.open(file_info) as f:
43
- normalized_path = os.path.normpath(file_info.filename)
44
- # Prevent path traversal
45
- if os.path.commonprefix([normalized_path, os.path.basename(normalized_path)]) != "":
46
- extracted_files[normalized_path] = BytesIO(f.read())
47
- logger.debug(f"Extracted: {normalized_path}")
48
- logger.debug("ZIP archive extracted successfully.")
49
- return extracted_files
50
- except zipfile.BadZipFile:
51
- st.error("The uploaded file is not a valid ZIP archive.")
52
- logger.error("BadZipFile encountered.")
53
- return {}
54
- except Exception as e:
55
- st.error(f"Error extracting ZIP file: {e}")
56
- logger.error(f"Exception during ZIP extraction: {e}")
57
- return {}
58
-
ebk/utils.py DELETED
@@ -1,311 +0,0 @@
1
- import json
2
- import os
3
- from collections import Counter
4
- from pathlib import Path
5
- from typing import List, Dict, Optional
6
- import logging
7
- from jmespath import search as jmes_search
8
- import sys
9
- from rich.console import Console
10
- from rich.table import Table
11
- from rich.markdown import Markdown
12
- from rich import print
13
- import re
14
-
15
- RICH_AVAILABLE = True
16
-
17
- logger = logging.getLogger(__name__)
18
-
19
- def search_jmes(lib_dir: str, expression: str):
20
- """
21
- Search entries in an ebk library using a JMESPath expression. This is a
22
- very flexible way to search for entries in the library, but may have a
23
- steep learning curve.
24
-
25
- Args:
26
- lib_dir (str): Path to the ebk library directory
27
- expression (str): Search expression (JMESPath)
28
-
29
- Returns:
30
- Any: Result of the JMESPath search
31
- """
32
- library = load_library(lib_dir)
33
- if not library:
34
- logger.error(f"Failed to load the library at {lib_dir}")
35
- return []
36
-
37
- result = jmes_search(expression, library)
38
-
39
- return result
40
-
41
- def search_regex(lib_dir: str, expression: str, fields: List[str] = ["title"]):
42
-
43
- library = load_library(lib_dir)
44
- results = []
45
- for entry in library:
46
- for key, value in entry.items():
47
- if key in fields and value:
48
- if isinstance(value, str) and re.search(expression, value):
49
- results.append(entry)
50
- break
51
-
52
- return results
53
-
54
-
55
- def load_library(lib_dir: str) -> List[Dict]:
56
- """
57
- Load an ebk library from the specified directory.
58
-
59
- Args:
60
- lib_dir (str): Path to the ebk library directory
61
-
62
- Returns:
63
- List[Dict]: List of entries in the library
64
- """
65
- lib_dir = Path(lib_dir)
66
- metadata_path = lib_dir / "metadata.json"
67
- if not metadata_path.exists():
68
- logger.error(f"Metadata file not found at {metadata_path}")
69
- return []
70
-
71
- with open(metadata_path, "r") as f:
72
- try:
73
- library = json.load(f)
74
- return library
75
- except json.JSONDecodeError as e:
76
- logger.error(f"Error decoding JSON from {metadata_path}: {e}")
77
- return []
78
-
79
- def get_library_statistics(lib_dir: str,
80
- keywords: List[str] = None) -> Dict:
81
- """
82
- Compute statistics for an ebk library.
83
-
84
- Args:
85
- lib_dir (str): Path to the ebk library directory.
86
- keywords (List[str]): Keywords to search for in titles (default: None).
87
-
88
- Returns:
89
- dict: A dictionary or markdown with statistics about the library.
90
- """
91
-
92
- # Load the library
93
- library = load_library(lib_dir)
94
- if not library:
95
- logger.error(f"Failed to load the library at {lib_dir}")
96
- return {}
97
-
98
- # Initialize counters and statistics
99
- stats = {
100
- "total_entries": 0,
101
- "languages": Counter(),
102
- "creators_count": 0,
103
- "average_creators_per_entry": 0,
104
- "most_creators_in_entry": 0,
105
- "least_creators_in_entry": 0,
106
- "top_creators": Counter(),
107
- "subjects": Counter(),
108
- "most_common_subjects": [],
109
- "average_title_length": 0,
110
- "longest_title": "",
111
- "shortest_title": "",
112
- "virtual_libs": Counter(),
113
- "titles_with_keywords": Counter(),
114
- }
115
-
116
- title_lengths = []
117
-
118
- for entry in library:
119
- # Total entries
120
- stats["total_entries"] += 1
121
-
122
- # Languages
123
- language = entry.get("language", "unknown")
124
- stats["languages"][language] += 1
125
-
126
- # Creators
127
- creators = entry.get("creators", [])
128
- stats["creators_count"] += len(creators)
129
- stats["top_creators"].update(creators)
130
- stats["most_creators_in_entry"] = max(stats["most_creators_in_entry"], len(creators))
131
- if stats["least_creators_in_entry"] == 0 or len(creators) < stats["least_creators_in_entry"]:
132
- stats["least_creators_in_entry"] = len(creators)
133
-
134
- # Subjects
135
- subjects = entry.get("subjects", [])
136
- stats["subjects"].update(subjects)
137
-
138
- # Titles
139
- title = entry.get("title", "")
140
- if title:
141
- title_lengths.append(len(title))
142
- if len(title) > len(stats["longest_title"]):
143
- stats["longest_title"] = title
144
- if not stats["shortest_title"] or len(title) < len(stats["shortest_title"]):
145
- stats["shortest_title"] = title
146
-
147
- # Keywords
148
- for keyword in keywords:
149
- if keyword.lower() in title.lower():
150
- stats["titles_with_keywords"][keyword] += 1
151
-
152
- # Virtual Libraries
153
- virtual_libs = entry.get("virtual_libs", [])
154
- stats["virtual_libs"].update(virtual_libs)
155
-
156
- # Post-process statistics
157
- stats["average_creators_per_entry"] = round(stats["creators_count"] / stats["total_entries"], 2)
158
- stats["average_title_length"] = round(sum(title_lengths) / len(title_lengths), 2) if title_lengths else 0
159
- stats["most_common_subjects"] = stats["subjects"].most_common(5)
160
- stats["languages"] = dict(stats["languages"])
161
- stats["top_creators"] = dict(stats["top_creators"].most_common(5))
162
- stats["titles_with_keywords"] = dict(stats["titles_with_keywords"])
163
- stats["virtual_libs"] = dict(stats["virtual_libs"])
164
-
165
- return stats
166
-
167
- def get_unique_filename(target_path: str) -> str:
168
- """
169
- If target_path already exists, generate a new path with (1), (2), etc.
170
- Otherwise just return target_path.
171
-
172
- Example:
173
- 'myfile.pdf' -> if it exists -> 'myfile (1).pdf' -> if that exists -> 'myfile (2).pdf'
174
- """
175
- if not os.path.exists(target_path):
176
- return target_path
177
-
178
- base, ext = os.path.splitext(target_path)
179
- counter = 1
180
- new_path = f"{base} ({counter}){ext}"
181
- while os.path.exists(new_path):
182
- counter += 1
183
- new_path = f"{base} ({counter}){ext}"
184
-
185
- return new_path
186
-
187
- def enumerate_ebooks(metadata_list: List[Dict],
188
- lib_path: Path,
189
- indices: Optional[List[int]] = None,
190
- detailed: Optional[bool] = False) -> None:
191
- """
192
- Enumerates and displays the ebooks in the specified library directory.
193
-
194
- For each ebook, displays its index, title, creators, and a clickable link to the first PDF file.
195
-
196
- Args:
197
- metadata_list (List[Dict]): List of metadata dictionaries for each ebook.
198
- indices (List[int]): List of indices to display (default: None).
199
- """
200
- console = Console()
201
-
202
- total_books = len(metadata_list)
203
- if total_books == 0:
204
- console.print("[yellow]No ebooks found in the library.[/yellow]")
205
- return
206
-
207
- if indices is None:
208
- indices = range(total_books)
209
-
210
- console.print(f"📚 [bold]Found {total_books} ebook(s) in the library:[/bold]\n")
211
-
212
- table = Table(show_header=True, header_style="bold magenta")
213
- table.add_column("#", style="dim")
214
- table.add_column("Title")
215
- table.add_column("Creators")
216
- table.add_column("Link")
217
-
218
- if detailed:
219
- table.add_column("Subjects")
220
- table.add_column("Language")
221
- table.add_column("Date")
222
- table.add_column("Identifiers")
223
- table.add_column("Publisher")
224
- table.add_column("File Size")
225
- table.add_column("Virtual Libraries")
226
- table.add_column("UID")
227
-
228
- for i, book in enumerate(metadata_list, start=0):
229
-
230
- if i not in indices:
231
- continue
232
-
233
- title = book.get('title', '-')
234
- creators = book.get('creators', ['-'])
235
- if not isinstance(creators, list):
236
- creators = [str(creators)]
237
- creators_str = ', '.join(creators)
238
-
239
- ebook_paths = book.get('file_paths', [])
240
- ebook_path = ebook_paths[0] if ebook_paths else None
241
-
242
- if ebook_path:
243
- ebook_full_path = lib_path / ebook_path
244
- if ebook_full_path.exists():
245
- # Resolve the path to an absolute path
246
- resolved_path = ebook_full_path.resolve()
247
- # Convert Windows paths to URL format if necessary
248
- if sys.platform.startswith('win'):
249
- ebook_link = resolved_path.as_uri()
250
- else:
251
- ebook_link = f"file://{resolved_path}"
252
- link_display = f"[link={ebook_link}]🔗 Open[/link]"
253
- else:
254
- ebook_link = "File not found"
255
- link_display = "[red]🔗 Not Found[/red]"
256
- else:
257
- ebook_link = "Unknown"
258
- link_display = "[red]🔗 Unknown[/red]"
259
-
260
- table.add_row(str(i), title, creators_str, link_display)
261
-
262
- console.print(table)
263
- console.print("\n") # Add some spacing
264
-
265
- def get_index_by_unique_id(lib_dir: str, id: str) -> int:
266
- """
267
- Get the index of an entry in the library by its unique ID.
268
-
269
- Args:
270
- lib_dir (str): Path to the ebk library directory.
271
- id (str): Unique ID to search for.
272
-
273
- Returns:
274
- int: Index of the entry with the specified unique ID. -1 if not found.
275
-
276
- Raises:
277
- ValueError: If the library cannot be loaded.
278
- """
279
-
280
- library = load_library(lib_dir)
281
- if not library:
282
- raise ValueError("Failed to load the library.")
283
-
284
- for i, entry in enumerate(library):
285
- if entry.get('unique_id') == id:
286
- return i
287
-
288
- return -1
289
-
290
- def print_json_as_table(data):
291
- """
292
- Pretty print JSON data as a table using Rich.
293
-
294
- Args:
295
- data: JSON data to print
296
- """
297
- if not RICH_AVAILABLE:
298
- print(json.dumps(data, indent=2))
299
- return
300
-
301
- if isinstance(data, dict):
302
- table = Table(show_header=True, header_style="bold magenta")
303
- table.add_column("Key", style="dim", width=20)
304
- table.add_column("Value", width=80)
305
- for key, value in data.items():
306
- table.add_row(str(key), str(value))
307
- console = Console()
308
- console.print(table)
309
- else:
310
- print(data)
311
-