aspose-cells-foss 25.12.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. aspose/__init__.py +14 -0
  2. aspose/cells/__init__.py +31 -0
  3. aspose/cells/cell.py +350 -0
  4. aspose/cells/constants.py +44 -0
  5. aspose/cells/converters/__init__.py +13 -0
  6. aspose/cells/converters/csv_converter.py +55 -0
  7. aspose/cells/converters/json_converter.py +46 -0
  8. aspose/cells/converters/markdown_converter.py +453 -0
  9. aspose/cells/drawing/__init__.py +17 -0
  10. aspose/cells/drawing/anchor.py +172 -0
  11. aspose/cells/drawing/collection.py +233 -0
  12. aspose/cells/drawing/image.py +338 -0
  13. aspose/cells/formats.py +80 -0
  14. aspose/cells/formula/__init__.py +10 -0
  15. aspose/cells/formula/evaluator.py +360 -0
  16. aspose/cells/formula/functions.py +433 -0
  17. aspose/cells/formula/tokenizer.py +340 -0
  18. aspose/cells/io/__init__.py +27 -0
  19. aspose/cells/io/csv/__init__.py +8 -0
  20. aspose/cells/io/csv/reader.py +88 -0
  21. aspose/cells/io/csv/writer.py +98 -0
  22. aspose/cells/io/factory.py +138 -0
  23. aspose/cells/io/interfaces.py +48 -0
  24. aspose/cells/io/json/__init__.py +8 -0
  25. aspose/cells/io/json/reader.py +126 -0
  26. aspose/cells/io/json/writer.py +119 -0
  27. aspose/cells/io/md/__init__.py +8 -0
  28. aspose/cells/io/md/reader.py +161 -0
  29. aspose/cells/io/md/writer.py +334 -0
  30. aspose/cells/io/models.py +64 -0
  31. aspose/cells/io/xlsx/__init__.py +9 -0
  32. aspose/cells/io/xlsx/constants.py +312 -0
  33. aspose/cells/io/xlsx/image_writer.py +311 -0
  34. aspose/cells/io/xlsx/reader.py +284 -0
  35. aspose/cells/io/xlsx/writer.py +931 -0
  36. aspose/cells/plugins/__init__.py +6 -0
  37. aspose/cells/plugins/docling_backend/__init__.py +7 -0
  38. aspose/cells/plugins/docling_backend/backend.py +535 -0
  39. aspose/cells/plugins/markitdown_plugin/__init__.py +15 -0
  40. aspose/cells/plugins/markitdown_plugin/plugin.py +128 -0
  41. aspose/cells/range.py +210 -0
  42. aspose/cells/style.py +287 -0
  43. aspose/cells/utils/__init__.py +54 -0
  44. aspose/cells/utils/coordinates.py +68 -0
  45. aspose/cells/utils/exceptions.py +43 -0
  46. aspose/cells/utils/validation.py +102 -0
  47. aspose/cells/workbook.py +352 -0
  48. aspose/cells/worksheet.py +670 -0
  49. aspose_cells_foss-25.12.1.dist-info/METADATA +189 -0
  50. aspose_cells_foss-25.12.1.dist-info/RECORD +53 -0
  51. aspose_cells_foss-25.12.1.dist-info/WHEEL +5 -0
  52. aspose_cells_foss-25.12.1.dist-info/entry_points.txt +2 -0
  53. aspose_cells_foss-25.12.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,48 @@
1
+ """
2
+ Unified interfaces for format handlers.
3
+ """
4
+
5
+ from abc import ABC, abstractmethod
6
+ from typing import TYPE_CHECKING
7
+
8
+ if TYPE_CHECKING:
9
+ from ..workbook import Workbook
10
+ from .models import WorkbookData
11
+
12
+
13
+ class IFormatHandler(ABC):
14
+ """Unified interface for format handlers."""
15
+
16
+ @abstractmethod
17
+ def load_workbook(self, workbook: 'Workbook', file_path: str, **options) -> None:
18
+ """
19
+ Load file into workbook object.
20
+ Maintains compatibility with existing interface.
21
+ """
22
+ pass
23
+
24
+ @abstractmethod
25
+ def save_workbook(self, workbook: 'Workbook', file_path: str, **options) -> None:
26
+ """
27
+ Save workbook object to file.
28
+ Maintains compatibility with existing interface.
29
+ """
30
+ pass
31
+
32
+ def read_to_data(self, file_path: str, **options) -> 'WorkbookData':
33
+ """Read file and return unified data model."""
34
+ from ..workbook import Workbook
35
+ from .models import WorkbookData
36
+
37
+ temp_workbook = Workbook()
38
+ # Clear default sheet since we're loading from file
39
+ temp_workbook._worksheets.clear()
40
+ temp_workbook._active_sheet = None
41
+
42
+ self.load_workbook(temp_workbook, file_path, **options)
43
+ return WorkbookData.from_workbook(temp_workbook)
44
+
45
+ def write_from_data(self, data: 'WorkbookData', file_path: str, **options) -> None:
46
+ """Write unified data model to file."""
47
+ workbook = data.to_workbook()
48
+ self.save_workbook(workbook, file_path, **options)
@@ -0,0 +1,8 @@
1
+ """
2
+ JSON I/O operations.
3
+ """
4
+
5
+ from .reader import JsonReader
6
+ from .writer import JsonWriter
7
+
8
+ __all__ = ["JsonReader", "JsonWriter"]
@@ -0,0 +1,126 @@
1
+ """
2
+ JSON file reader for loading JSON data into workbook format.
3
+ """
4
+
5
+ import json
6
+ from typing import Dict, List, Optional, Union, Any, TYPE_CHECKING
7
+ from pathlib import Path
8
+ from ...formats import CellValue
9
+
10
+ if TYPE_CHECKING:
11
+ from ...workbook import Workbook
12
+
13
+
14
+ class JsonReader:
15
+ """Reader for JSON files."""
16
+
17
+ def __init__(self):
18
+ pass
19
+
20
+ def read(self, file_path: str, **kwargs) -> Union[List[List[CellValue]], Dict[str, List[List[CellValue]]]]:
21
+ """Read JSON file and return data in tabular format."""
22
+ encoding = kwargs.get('encoding', 'utf-8')
23
+
24
+ try:
25
+ with open(file_path, 'r', encoding=encoding) as file:
26
+ data = json.load(file)
27
+
28
+ return self._convert_json_to_tabular(data)
29
+
30
+ except FileNotFoundError:
31
+ raise FileNotFoundError(f"JSON file not found: {file_path}")
32
+ except json.JSONDecodeError as e:
33
+ raise ValueError(f"Invalid JSON format: {e}")
34
+ except Exception as e:
35
+ raise ValueError(f"Error reading JSON file: {e}")
36
+
37
+ def _convert_json_to_tabular(self, data: Any) -> Union[List[List[CellValue]], Dict[str, List[List[CellValue]]]]:
38
+ """Convert JSON data to tabular format."""
39
+ if isinstance(data, dict):
40
+ # Check if it's a multi-sheet format (keys are sheet names)
41
+ if all(isinstance(v, list) for v in data.values()):
42
+ result = {}
43
+ for sheet_name, sheet_data in data.items():
44
+ result[sheet_name] = self._convert_list_to_rows(sheet_data)
45
+ return result
46
+ else:
47
+ # Single object, convert to single row
48
+ return self._convert_dict_to_rows(data)
49
+ elif isinstance(data, list):
50
+ return self._convert_list_to_rows(data)
51
+ else:
52
+ # Single value, create single cell
53
+ return [[self._convert_value(data)]]
54
+
55
+ def _convert_list_to_rows(self, data_list: List[Any]) -> List[List[CellValue]]:
56
+ """Convert list of objects/values to rows."""
57
+ if not data_list:
58
+ return []
59
+
60
+ if isinstance(data_list[0], dict):
61
+ # List of objects - create header from keys
62
+ headers = list(data_list[0].keys()) if data_list else []
63
+ rows = [headers] # Add header row
64
+
65
+ for item in data_list:
66
+ row = []
67
+ for header in headers:
68
+ value = item.get(header) if isinstance(item, dict) else None
69
+ row.append(self._convert_value(value))
70
+ rows.append(row)
71
+
72
+ return rows
73
+ else:
74
+ # List of simple values - convert to single column
75
+ return [[self._convert_value(item)] for item in data_list]
76
+
77
+ def _convert_dict_to_rows(self, data_dict: Dict[str, Any]) -> List[List[CellValue]]:
78
+ """Convert dictionary to rows (key-value pairs)."""
79
+ rows = []
80
+ for key, value in data_dict.items():
81
+ if isinstance(value, (list, dict)):
82
+ # Complex value, convert to JSON string
83
+ value_str = json.dumps(value, ensure_ascii=False)
84
+ rows.append([key, value_str])
85
+ else:
86
+ rows.append([key, self._convert_value(value)])
87
+ return rows
88
+
89
+ def _convert_value(self, value: Any) -> CellValue:
90
+ """Convert JSON value to appropriate cell value."""
91
+ if value is None:
92
+ return None
93
+ elif isinstance(value, bool):
94
+ return value
95
+ elif isinstance(value, (int, float)):
96
+ return value
97
+ elif isinstance(value, str):
98
+ return value
99
+ else:
100
+ # Complex types, convert to JSON string
101
+ return json.dumps(value, ensure_ascii=False)
102
+
103
+ def load_workbook(self, workbook: 'Workbook', file_path: str, **options) -> None:
104
+ """Load JSON file into workbook object."""
105
+ data = self.read(file_path, **options)
106
+
107
+ # Clear existing worksheets
108
+ workbook._worksheets.clear()
109
+ workbook._active_sheet = None
110
+
111
+ if isinstance(data, dict):
112
+ # Multi-sheet format
113
+ for sheet_name, sheet_rows in data.items():
114
+ worksheet = workbook.create_sheet(sheet_name)
115
+ self._populate_worksheet(worksheet, sheet_rows)
116
+ else:
117
+ # Single sheet format
118
+ worksheet = workbook.create_sheet("Sheet1")
119
+ self._populate_worksheet(worksheet, data)
120
+
121
+ def _populate_worksheet(self, worksheet, rows: List[List[CellValue]]) -> None:
122
+ """Populate worksheet with row data."""
123
+ for row_idx, row_data in enumerate(rows, 1):
124
+ for col_idx, cell_value in enumerate(row_data, 1):
125
+ if cell_value is not None:
126
+ worksheet.cell(row_idx, col_idx, cell_value)
@@ -0,0 +1,119 @@
1
+ """
2
+ JSON file writer for saving workbook data to JSON format.
3
+ """
4
+
5
+ import json
6
+ from typing import Dict, List, Optional, Union, TYPE_CHECKING
7
+ from ...formats import CellValue
8
+
9
+ if TYPE_CHECKING:
10
+ from ...workbook import Workbook
11
+ from ...worksheet import Worksheet
12
+
13
+
14
+ class JsonWriter:
15
+ """Writer for JSON files."""
16
+
17
+ def __init__(self):
18
+ pass
19
+
20
+ def write(self, file_path: str, data: Union[List[Dict], Dict], **kwargs) -> None:
21
+ """Write data to JSON file."""
22
+ pretty_print = kwargs.get('pretty_print', False)
23
+ encoding = kwargs.get('encoding', 'utf-8')
24
+
25
+ try:
26
+ with open(file_path, 'w', encoding=encoding) as file:
27
+ if pretty_print:
28
+ json.dump(data, file, indent=2, ensure_ascii=False)
29
+ else:
30
+ json.dump(data, file, ensure_ascii=False)
31
+
32
+ except Exception as e:
33
+ raise ValueError(f"Error writing JSON file: {e}")
34
+
35
+ def write_workbook(self, file_path: str, workbook: 'Workbook', **kwargs) -> None:
36
+ """Write workbook data to JSON file."""
37
+ include_empty_cells = kwargs.get('include_empty_cells', False)
38
+ all_sheets = kwargs.get('all_sheets', False)
39
+ sheet_name = kwargs.get('sheet_name')
40
+
41
+ if sheet_name:
42
+ # Export specific sheet
43
+ if sheet_name in workbook._worksheets:
44
+ worksheet = workbook._worksheets[sheet_name]
45
+ result = self._convert_worksheet(worksheet, include_empty_cells)
46
+ else:
47
+ result = []
48
+ elif all_sheets:
49
+ # Export all sheets with sheet names as keys
50
+ result = {}
51
+ for name, worksheet in workbook._worksheets.items():
52
+ sheet_data = self._convert_worksheet(worksheet, include_empty_cells)
53
+ result[name] = sheet_data
54
+ else:
55
+ # Export only active sheet as simple list
56
+ result = self._convert_worksheet(workbook.active, include_empty_cells)
57
+
58
+ self.write(file_path, result, **kwargs)
59
+
60
+ def _convert_worksheet(self, worksheet: 'Worksheet', include_empty_cells: bool = False) -> List[Dict[str, Union[str, int, float, bool, None]]]:
61
+ """Convert worksheet to list of row dictionaries."""
62
+ if not worksheet._cells and not include_empty_cells:
63
+ return []
64
+
65
+ # Find actual data bounds
66
+ if not worksheet._cells:
67
+ return []
68
+
69
+ max_row = worksheet.max_row
70
+ max_col = worksheet.max_column
71
+
72
+ if max_row == 0 or max_col == 0:
73
+ return []
74
+
75
+ # Convert to list of dictionaries
76
+ result = []
77
+
78
+ # Generate column headers (A, B, C, etc.)
79
+ headers = []
80
+ for col in range(1, max_col + 1):
81
+ col_name = ""
82
+ temp_col = col - 1
83
+ while temp_col >= 0:
84
+ col_name = chr(ord('A') + (temp_col % 26)) + col_name
85
+ temp_col = temp_col // 26 - 1
86
+ headers.append(col_name)
87
+
88
+ # Process all data rows
89
+ for row in range(1, max_row + 1):
90
+ row_data = {}
91
+ has_data = False
92
+
93
+ for col in range(1, max_col + 1):
94
+ cell = worksheet._cells.get((row, col))
95
+ header = headers[col - 1] if col <= len(headers) else f"Column{col}"
96
+
97
+ if cell and cell.value is not None:
98
+ row_data[header] = self._convert_cell_value(cell.value)
99
+ has_data = True
100
+ elif include_empty_cells:
101
+ row_data[header] = None
102
+
103
+ if has_data or include_empty_cells:
104
+ result.append(row_data)
105
+
106
+ return result
107
+
108
+ def _convert_cell_value(self, value: CellValue) -> Union[str, int, float, bool, None]:
109
+ """Convert cell value to JSON-serializable format."""
110
+ if value is None:
111
+ return None
112
+ elif isinstance(value, (str, int, float, bool)):
113
+ return value
114
+ else:
115
+ return str(value)
116
+
117
+ def save_workbook(self, workbook: 'Workbook', file_path: str, **options) -> None:
118
+ """Save workbook to JSON file - unified interface method."""
119
+ self.write_workbook(file_path, workbook, **options)
@@ -0,0 +1,8 @@
1
+ """
2
+ Markdown I/O operations.
3
+ """
4
+
5
+ from .reader import MarkdownReader
6
+ from .writer import MarkdownWriter
7
+
8
+ __all__ = ["MarkdownReader", "MarkdownWriter"]
@@ -0,0 +1,161 @@
1
+ """
2
+ Markdown table reader for loading Markdown table data into workbook format.
3
+ """
4
+
5
+ import re
6
+ from typing import Dict, List, Optional, Union, TYPE_CHECKING
7
+ from pathlib import Path
8
+ from ...formats import CellValue
9
+
10
+ if TYPE_CHECKING:
11
+ from ...workbook import Workbook
12
+
13
+
14
+ class MarkdownReader:
15
+ """Reader for Markdown table files."""
16
+
17
+ def __init__(self):
18
+ pass
19
+
20
+ def read(self, file_path: str, **kwargs) -> Union[List[List[CellValue]], Dict[str, List[List[CellValue]]]]:
21
+ """Read Markdown file and extract table data."""
22
+ encoding = kwargs.get('encoding', 'utf-8')
23
+
24
+ try:
25
+ with open(file_path, 'r', encoding=encoding) as file:
26
+ content = file.read()
27
+
28
+ return self._parse_markdown_tables(content)
29
+
30
+ except FileNotFoundError:
31
+ raise FileNotFoundError(f"Markdown file not found: {file_path}")
32
+ except Exception as e:
33
+ raise ValueError(f"Error reading Markdown file: {e}")
34
+
35
+ def _parse_markdown_tables(self, content: str) -> Union[List[List[CellValue]], Dict[str, List[List[CellValue]]]]:
36
+ """Parse markdown content and extract tables."""
37
+ sections = self._split_by_headers(content)
38
+
39
+ if len(sections) == 1 and sections[0]['name'] == 'default':
40
+ # Single table, return as list
41
+ return self._extract_tables_from_text(sections[0]['content'])
42
+ else:
43
+ # Multiple sections, return as dict
44
+ result = {}
45
+ for section in sections:
46
+ tables = self._extract_tables_from_text(section['content'])
47
+ if tables: # Only add sections with tables
48
+ result[section['name']] = tables
49
+ return result if result else [[]]
50
+
51
+ def _split_by_headers(self, content: str) -> List[Dict[str, str]]:
52
+ """Split content by markdown headers."""
53
+ lines = content.split('\n')
54
+ sections = []
55
+ current_section = {'name': 'default', 'content': ''}
56
+
57
+ for line in lines:
58
+ header_match = re.match(r'^#+\s+(.+)$', line.strip())
59
+ if header_match:
60
+ # Save current section if it has content
61
+ if current_section['content'].strip():
62
+ sections.append(current_section)
63
+ # Start new section
64
+ current_section = {
65
+ 'name': header_match.group(1).strip(),
66
+ 'content': ''
67
+ }
68
+ else:
69
+ current_section['content'] += line + '\n'
70
+
71
+ # Add final section
72
+ if current_section['content'].strip():
73
+ sections.append(current_section)
74
+
75
+ return sections if sections else [{'name': 'default', 'content': content}]
76
+
77
+ def _extract_tables_from_text(self, text: str) -> List[List[CellValue]]:
78
+ """Extract table data from text content."""
79
+ lines = text.split('\n')
80
+ table_rows = []
81
+ in_table = False
82
+
83
+ for line in lines:
84
+ line = line.strip()
85
+ if not line:
86
+ if in_table:
87
+ break # End of table
88
+ continue
89
+
90
+ # Check if this is a table row (starts and ends with |)
91
+ if line.startswith('|') and line.endswith('|'):
92
+ # Skip separator lines (contain only |, -, :, and spaces)
93
+ if re.match(r'^[\|\-:\s]+$', line):
94
+ continue
95
+
96
+ in_table = True
97
+ # Parse table row
98
+ cells = [cell.strip() for cell in line[1:-1].split('|')]
99
+ converted_row = []
100
+ for cell in cells:
101
+ # Unescape markdown special characters
102
+ cell = cell.replace('\\|', '|')
103
+ converted_row.append(self._convert_cell_value(cell))
104
+ table_rows.append(converted_row)
105
+ elif in_table:
106
+ # End of table
107
+ break
108
+
109
+ return table_rows
110
+
111
+ def _convert_cell_value(self, value: str) -> CellValue:
112
+ """Convert string value to appropriate Python type."""
113
+ if not value or value.strip() == "":
114
+ return None
115
+
116
+ value = value.strip()
117
+
118
+ # Try boolean first
119
+ if value.upper() in ('TRUE', 'FALSE'):
120
+ return value.upper() == 'TRUE'
121
+
122
+ # Try integer
123
+ try:
124
+ if '.' not in value and 'e' not in value.lower():
125
+ return int(value)
126
+ except ValueError:
127
+ pass
128
+
129
+ # Try float
130
+ try:
131
+ return float(value)
132
+ except ValueError:
133
+ pass
134
+
135
+ # Return as string
136
+ return value
137
+
138
+ def load_workbook(self, workbook: 'Workbook', file_path: str, **options) -> None:
139
+ """Load Markdown file into workbook object."""
140
+ data = self.read(file_path, **options)
141
+
142
+ # Clear existing worksheets
143
+ workbook._worksheets.clear()
144
+ workbook._active_sheet = None
145
+
146
+ if isinstance(data, dict):
147
+ # Multi-sheet format (multiple headers/sections)
148
+ for sheet_name, table_data in data.items():
149
+ worksheet = workbook.create_sheet(sheet_name)
150
+ self._populate_worksheet(worksheet, table_data)
151
+ else:
152
+ # Single table format
153
+ worksheet = workbook.create_sheet("Sheet1")
154
+ self._populate_worksheet(worksheet, data)
155
+
156
+ def _populate_worksheet(self, worksheet, rows: List[List[CellValue]]) -> None:
157
+ """Populate worksheet with table data."""
158
+ for row_idx, row_data in enumerate(rows, 1):
159
+ for col_idx, cell_value in enumerate(row_data, 1):
160
+ if cell_value is not None:
161
+ worksheet.cell(row_idx, col_idx, cell_value)