fcp-sheets 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fcp_sheets-0.1.0/.claude-plugin/plugin.json +11 -0
- fcp_sheets-0.1.0/.github/workflows/ci.yml +34 -0
- fcp_sheets-0.1.0/.github/workflows/release.yml +22 -0
- fcp_sheets-0.1.0/.gitignore +15 -0
- fcp_sheets-0.1.0/CLAUDE.md +24 -0
- fcp_sheets-0.1.0/LICENSE +21 -0
- fcp_sheets-0.1.0/PKG-INFO +13 -0
- fcp_sheets-0.1.0/pyproject.toml +37 -0
- fcp_sheets-0.1.0/src/fcp_sheets/__init__.py +1 -0
- fcp_sheets-0.1.0/src/fcp_sheets/adapter.py +365 -0
- fcp_sheets-0.1.0/src/fcp_sheets/lib/__init__.py +0 -0
- fcp_sheets-0.1.0/src/fcp_sheets/lib/chart_types.py +64 -0
- fcp_sheets-0.1.0/src/fcp_sheets/lib/colors.py +50 -0
- fcp_sheets-0.1.0/src/fcp_sheets/lib/number_formats.py +33 -0
- fcp_sheets-0.1.0/src/fcp_sheets/lib/table_styles.py +42 -0
- fcp_sheets-0.1.0/src/fcp_sheets/main.py +125 -0
- fcp_sheets-0.1.0/src/fcp_sheets/model/__init__.py +0 -0
- fcp_sheets-0.1.0/src/fcp_sheets/model/index.py +96 -0
- fcp_sheets-0.1.0/src/fcp_sheets/model/refs.py +178 -0
- fcp_sheets-0.1.0/src/fcp_sheets/model/snapshot.py +58 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/__init__.py +0 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/formatter.py +177 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_cells.py +271 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_charts.py +297 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_cond_fmt.py +359 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_editing.py +480 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_misc.py +460 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_sheets.py +180 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_structure.py +467 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_style.py +385 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_tables.py +100 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/ops_validate.py +201 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/queries.py +1036 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/reference_card.py +43 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/resolvers.py +484 -0
- fcp_sheets-0.1.0/src/fcp_sheets/server/verb_registry.py +322 -0
- fcp_sheets-0.1.0/tests/__init__.py +0 -0
- fcp_sheets-0.1.0/tests/conftest.py +41 -0
- fcp_sheets-0.1.0/tests/test_adapter.py +261 -0
- fcp_sheets-0.1.0/tests/test_data_block.py +446 -0
- fcp_sheets-0.1.0/tests/test_fill.py +274 -0
- fcp_sheets-0.1.0/tests/test_integration.py +813 -0
- fcp_sheets-0.1.0/tests/test_ops_cells.py +251 -0
- fcp_sheets-0.1.0/tests/test_ops_charts.py +438 -0
- fcp_sheets-0.1.0/tests/test_ops_cond_fmt.py +385 -0
- fcp_sheets-0.1.0/tests/test_ops_editing.py +489 -0
- fcp_sheets-0.1.0/tests/test_ops_misc.py +580 -0
- fcp_sheets-0.1.0/tests/test_ops_sheets.py +142 -0
- fcp_sheets-0.1.0/tests/test_ops_structure.py +382 -0
- fcp_sheets-0.1.0/tests/test_ops_style.py +536 -0
- fcp_sheets-0.1.0/tests/test_ops_tables.py +213 -0
- fcp_sheets-0.1.0/tests/test_ops_validate.py +269 -0
- fcp_sheets-0.1.0/tests/test_queries.py +683 -0
- fcp_sheets-0.1.0/tests/test_refs.py +187 -0
- fcp_sheets-0.1.0/tests/test_selectors.py +289 -0
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "fcp-sheets",
|
|
3
|
+
"description": "Semantic spreadsheet operations for LLMs",
|
|
4
|
+
"version": "0.1.0",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"author": {
|
|
7
|
+
"name": "Scott Meyer"
|
|
8
|
+
},
|
|
9
|
+
"repository": "https://github.com/aetherwing-io/fcp-sheets",
|
|
10
|
+
"keywords": ["spreadsheet", "excel", "xlsx", "mcp"]
|
|
11
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
name: CI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
pull_request:
|
|
7
|
+
branches: [main]
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
lint:
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
steps:
|
|
13
|
+
- uses: actions/checkout@v4
|
|
14
|
+
- name: Check for local path references
|
|
15
|
+
run: |
|
|
16
|
+
if grep -v '^\s*#' pyproject.toml | grep -q 'path\s*='; then
|
|
17
|
+
echo "::error::pyproject.toml contains uncommented local path references that break CI installs"
|
|
18
|
+
exit 1
|
|
19
|
+
fi
|
|
20
|
+
|
|
21
|
+
test:
|
|
22
|
+
needs: lint
|
|
23
|
+
runs-on: ubuntu-latest
|
|
24
|
+
strategy:
|
|
25
|
+
matrix:
|
|
26
|
+
python-version: ['3.11', '3.12', '3.13']
|
|
27
|
+
steps:
|
|
28
|
+
- uses: actions/checkout@v4
|
|
29
|
+
- uses: astral-sh/setup-uv@v5
|
|
30
|
+
- uses: actions/setup-python@v5
|
|
31
|
+
with:
|
|
32
|
+
python-version: ${{ matrix.python-version }}
|
|
33
|
+
- run: uv sync --extra dev
|
|
34
|
+
- run: uv run pytest
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
name: Release
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
tags: ['v*']
|
|
6
|
+
|
|
7
|
+
jobs:
|
|
8
|
+
publish:
|
|
9
|
+
runs-on: ubuntu-latest
|
|
10
|
+
permissions:
|
|
11
|
+
contents: read
|
|
12
|
+
id-token: write
|
|
13
|
+
steps:
|
|
14
|
+
- uses: actions/checkout@v4
|
|
15
|
+
- uses: astral-sh/setup-uv@v5
|
|
16
|
+
- uses: actions/setup-python@v5
|
|
17
|
+
with:
|
|
18
|
+
python-version: '3.12'
|
|
19
|
+
- run: uv sync --extra dev
|
|
20
|
+
- run: uv run pytest
|
|
21
|
+
- run: uv build
|
|
22
|
+
- uses: pypa/gh-action-pypi-publish@release/v1
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# fcp-sheets
|
|
2
|
+
|
|
3
|
+
## Project Overview
|
|
4
|
+
MCP server that lets LLMs create and edit spreadsheets through a semantic verb DSL.
|
|
5
|
+
Uses openpyxl as the native library (Tier 2 architecture).
|
|
6
|
+
|
|
7
|
+
## Architecture
|
|
8
|
+
- `src/fcp_sheets/model/` — Thin wrapper around openpyxl Workbook, cell ref parser, sheet index
|
|
9
|
+
- `src/fcp_sheets/server/` — Verb handlers (ops_*.py), queries, verb registry, resolvers
|
|
10
|
+
- `src/fcp_sheets/lib/` — Color palette, number formats, chart types, table styles
|
|
11
|
+
- `src/fcp_sheets/adapter.py` — FcpDomainAdapter bridging fcp-core to openpyxl
|
|
12
|
+
- `src/fcp_sheets/main.py` — Server entry point
|
|
13
|
+
|
|
14
|
+
## Key Patterns
|
|
15
|
+
- Each `ops_*.py` exports a `HANDLERS` dict mapping verb names to handler functions
|
|
16
|
+
- The adapter merges all HANDLERS at import time for dispatch
|
|
17
|
+
- `queries.py` exports `QUERY_HANDLERS` for query dispatch
|
|
18
|
+
- Block mode: `data` lines buffered in adapter, flushed on `data end`
|
|
19
|
+
- Undo/redo: byte snapshots via `wb.save(BytesIO)` / `load_workbook(BytesIO)`
|
|
20
|
+
- Batch atomicity: pre-batch snapshot, rollback on any op failure
|
|
21
|
+
|
|
22
|
+
## Commands
|
|
23
|
+
- `uv run pytest` — Run tests
|
|
24
|
+
- `uv run python -c "from fcp_sheets.main import main"` — Verify import
|
fcp_sheets-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Aetherwing
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: fcp-sheets
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Spreadsheet File Context Protocol — semantic spreadsheet operations for LLMs
|
|
5
|
+
License-File: LICENSE
|
|
6
|
+
Requires-Python: <3.14,>=3.11
|
|
7
|
+
Requires-Dist: fastmcp>=3.0
|
|
8
|
+
Requires-Dist: fcp-core>=0.1.3
|
|
9
|
+
Requires-Dist: openpyxl>=3.1
|
|
10
|
+
Provides-Extra: dev
|
|
11
|
+
Requires-Dist: pyright>=1.1; extra == 'dev'
|
|
12
|
+
Requires-Dist: pytest>=8.0; extra == 'dev'
|
|
13
|
+
Requires-Dist: ruff>=0.4; extra == 'dev'
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "fcp-sheets"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Spreadsheet File Context Protocol — semantic spreadsheet operations for LLMs"
|
|
5
|
+
requires-python = ">=3.11,<3.14"
|
|
6
|
+
dependencies = [
|
|
7
|
+
"fcp-core>=0.1.3",
|
|
8
|
+
"fastmcp>=3.0",
|
|
9
|
+
"openpyxl>=3.1",
|
|
10
|
+
]
|
|
11
|
+
|
|
12
|
+
# [tool.uv.sources]
|
|
13
|
+
# For local development, uncomment below:
|
|
14
|
+
# fcp-core = { path = "../fcp-core/python", editable = true }
|
|
15
|
+
|
|
16
|
+
[project.scripts]
|
|
17
|
+
fcp-sheets = "fcp_sheets.main:main"
|
|
18
|
+
|
|
19
|
+
[project.optional-dependencies]
|
|
20
|
+
dev = [
|
|
21
|
+
"pytest>=8.0",
|
|
22
|
+
"ruff>=0.4",
|
|
23
|
+
"pyright>=1.1",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
[build-system]
|
|
27
|
+
requires = ["hatchling"]
|
|
28
|
+
build-backend = "hatchling.build"
|
|
29
|
+
|
|
30
|
+
[tool.hatch.build.targets.wheel]
|
|
31
|
+
packages = ["src/fcp_sheets"]
|
|
32
|
+
|
|
33
|
+
[tool.pytest.ini_options]
|
|
34
|
+
testpaths = ["tests"]
|
|
35
|
+
|
|
36
|
+
[tool.ruff]
|
|
37
|
+
src = ["src"]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""fcp-sheets — Spreadsheet File Context Protocol."""
|
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
"""SheetsAdapter — FcpDomainAdapter implementation for openpyxl workbooks.
|
|
2
|
+
|
|
3
|
+
Bridges fcp-core to openpyxl via SheetsModel (thin wrapper for in-place
|
|
4
|
+
undo/redo). Handles data block mode, batch atomicity (C7), and
|
|
5
|
+
collision detection (C9).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from io import BytesIO
|
|
11
|
+
|
|
12
|
+
from openpyxl import Workbook, load_workbook
|
|
13
|
+
|
|
14
|
+
from fcp_core import EventLog, OpResult, ParsedOp
|
|
15
|
+
|
|
16
|
+
from fcp_sheets.model.index import SheetIndex
|
|
17
|
+
from fcp_sheets.model.snapshot import SheetsModel, SnapshotEvent
|
|
18
|
+
from fcp_sheets.server.queries import dispatch_query
|
|
19
|
+
from fcp_sheets.server.resolvers import SheetsOpContext
|
|
20
|
+
|
|
21
|
+
# Import all handler dicts
|
|
22
|
+
from fcp_sheets.server.ops_cells import HANDLERS as CELLS_HANDLERS
|
|
23
|
+
from fcp_sheets.server.ops_sheets import HANDLERS as SHEETS_HANDLERS
|
|
24
|
+
from fcp_sheets.server.ops_style import HANDLERS as STYLE_HANDLERS
|
|
25
|
+
from fcp_sheets.server.ops_structure import HANDLERS as STRUCTURE_HANDLERS
|
|
26
|
+
from fcp_sheets.server.ops_charts import HANDLERS as CHARTS_HANDLERS
|
|
27
|
+
from fcp_sheets.server.ops_tables import HANDLERS as TABLES_HANDLERS
|
|
28
|
+
from fcp_sheets.server.ops_cond_fmt import HANDLERS as COND_FMT_HANDLERS
|
|
29
|
+
from fcp_sheets.server.ops_validate import HANDLERS as VALIDATE_HANDLERS
|
|
30
|
+
from fcp_sheets.server.ops_editing import HANDLERS as EDITING_HANDLERS
|
|
31
|
+
from fcp_sheets.server.ops_misc import HANDLERS as MISC_HANDLERS
|
|
32
|
+
|
|
33
|
+
# Max snapshot events in undo history
|
|
34
|
+
MAX_EVENTS = 15
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class SheetsAdapter:
|
|
38
|
+
"""FcpDomainAdapter[SheetsModel, SnapshotEvent] for spreadsheet operations."""
|
|
39
|
+
|
|
40
|
+
def __init__(self) -> None:
|
|
41
|
+
self.index = SheetIndex()
|
|
42
|
+
self._named_styles: dict[str, dict] = {}
|
|
43
|
+
|
|
44
|
+
# Merge all verb handlers
|
|
45
|
+
self._handlers: dict[str, callable] = {}
|
|
46
|
+
for h in (
|
|
47
|
+
CELLS_HANDLERS, SHEETS_HANDLERS, STYLE_HANDLERS,
|
|
48
|
+
STRUCTURE_HANDLERS, CHARTS_HANDLERS, TABLES_HANDLERS,
|
|
49
|
+
COND_FMT_HANDLERS, VALIDATE_HANDLERS, EDITING_HANDLERS,
|
|
50
|
+
MISC_HANDLERS,
|
|
51
|
+
):
|
|
52
|
+
self._handlers.update(h)
|
|
53
|
+
|
|
54
|
+
# Data block mode state
|
|
55
|
+
self._data_buffer: list[str] | None = None
|
|
56
|
+
self._data_anchor: str | None = None
|
|
57
|
+
|
|
58
|
+
# -- FcpDomainAdapter protocol --
|
|
59
|
+
|
|
60
|
+
def create_empty(self, title: str, params: dict[str, str]) -> SheetsModel:
|
|
61
|
+
"""Create a new empty workbook."""
|
|
62
|
+
wb = Workbook()
|
|
63
|
+
# Handle sheets:N param
|
|
64
|
+
num_sheets = 1
|
|
65
|
+
if "sheets" in params:
|
|
66
|
+
try:
|
|
67
|
+
num_sheets = max(1, int(params["sheets"]))
|
|
68
|
+
except ValueError:
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
# Default sheet is "Sheet" — rename to "Sheet1" if multiple
|
|
72
|
+
if num_sheets > 1:
|
|
73
|
+
wb.active.title = "Sheet1"
|
|
74
|
+
for i in range(2, num_sheets + 1):
|
|
75
|
+
wb.create_sheet(title=f"Sheet{i}")
|
|
76
|
+
else:
|
|
77
|
+
wb.active.title = "Sheet1"
|
|
78
|
+
|
|
79
|
+
model = SheetsModel(title=title, wb=wb)
|
|
80
|
+
self.index.clear()
|
|
81
|
+
self.index.active_sheet = wb.active.title
|
|
82
|
+
self._named_styles.clear()
|
|
83
|
+
self._data_buffer = None
|
|
84
|
+
self._data_anchor = None
|
|
85
|
+
return model
|
|
86
|
+
|
|
87
|
+
def serialize(self, model: SheetsModel, path: str) -> None:
|
|
88
|
+
"""Save workbook to file."""
|
|
89
|
+
model.wb.save(path)
|
|
90
|
+
model.file_path = path
|
|
91
|
+
|
|
92
|
+
def deserialize(self, path: str) -> SheetsModel:
|
|
93
|
+
"""Load workbook from file."""
|
|
94
|
+
wb = load_workbook(path)
|
|
95
|
+
# Extract title from properties or filename
|
|
96
|
+
title = wb.properties.title if wb.properties and wb.properties.title else path.rsplit("/", 1)[-1]
|
|
97
|
+
model = SheetsModel(title=title, wb=wb)
|
|
98
|
+
model.file_path = path
|
|
99
|
+
self.index.rebuild(model)
|
|
100
|
+
return model
|
|
101
|
+
|
|
102
|
+
def rebuild_indices(self, model: SheetsModel) -> None:
|
|
103
|
+
"""Rebuild index after undo/redo."""
|
|
104
|
+
self.index.rebuild(model)
|
|
105
|
+
|
|
106
|
+
def get_digest(self, model: SheetsModel) -> str:
|
|
107
|
+
"""Return a compact state fingerprint."""
|
|
108
|
+
wb = model.wb
|
|
109
|
+
sheets = len(wb.sheetnames)
|
|
110
|
+
cells = 0
|
|
111
|
+
for ws in wb.worksheets:
|
|
112
|
+
bounds = self.index.get_bounds(ws.title)
|
|
113
|
+
if bounds:
|
|
114
|
+
min_r, min_c, max_r, max_c = bounds
|
|
115
|
+
cells += (max_r - min_r + 1) * (max_c - min_c + 1)
|
|
116
|
+
return f"{sheets} sheets, ~{cells} cells"
|
|
117
|
+
|
|
118
|
+
def dispatch_op(
|
|
119
|
+
self, op: ParsedOp, model: SheetsModel, log: EventLog
|
|
120
|
+
) -> OpResult:
|
|
121
|
+
"""Execute a parsed operation on the model.
|
|
122
|
+
|
|
123
|
+
Flow:
|
|
124
|
+
1. Check for data block mode interception
|
|
125
|
+
2. Take byte snapshot (for undo)
|
|
126
|
+
3. Build SheetsOpContext
|
|
127
|
+
4. Dispatch to handler
|
|
128
|
+
5. Rebuild index
|
|
129
|
+
6. Log snapshot event
|
|
130
|
+
7. Return OpResult
|
|
131
|
+
"""
|
|
132
|
+
raw = op.raw.strip()
|
|
133
|
+
|
|
134
|
+
# -- Data block mode interception --
|
|
135
|
+
if self._data_buffer is not None:
|
|
136
|
+
if op.verb == "data" and op.positionals and op.positionals[0].lower() == "end":
|
|
137
|
+
return self._flush_data_block(model, log)
|
|
138
|
+
# Accumulate raw line
|
|
139
|
+
self._data_buffer.append(raw)
|
|
140
|
+
return OpResult(success=True, message="", prefix="~")
|
|
141
|
+
|
|
142
|
+
# Start a new data block
|
|
143
|
+
if op.verb == "data":
|
|
144
|
+
if not op.positionals:
|
|
145
|
+
return OpResult(success=False, message="Usage: data ANCHOR")
|
|
146
|
+
anchor = op.positionals[0]
|
|
147
|
+
if anchor.lower() == "end":
|
|
148
|
+
return OpResult(success=False, message="'data end' without prior 'data ANCHOR'")
|
|
149
|
+
self._data_buffer = []
|
|
150
|
+
self._data_anchor = anchor
|
|
151
|
+
return OpResult(success=True, message="", prefix="~")
|
|
152
|
+
|
|
153
|
+
# -- Normal dispatch --
|
|
154
|
+
handler = self._handlers.get(op.verb)
|
|
155
|
+
if handler is None:
|
|
156
|
+
# Try suggestion
|
|
157
|
+
from fcp_core import suggest
|
|
158
|
+
s = suggest(op.verb, list(self._handlers.keys()))
|
|
159
|
+
msg = f"Unknown verb: {op.verb!r}"
|
|
160
|
+
if s:
|
|
161
|
+
msg += f"\n try: {s}"
|
|
162
|
+
return OpResult(success=False, message=msg)
|
|
163
|
+
|
|
164
|
+
# Take pre-op snapshot
|
|
165
|
+
before = model.snapshot()
|
|
166
|
+
|
|
167
|
+
# Build context
|
|
168
|
+
ctx = SheetsOpContext(
|
|
169
|
+
wb=model.wb,
|
|
170
|
+
index=self.index,
|
|
171
|
+
named_styles=self._named_styles,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Dispatch
|
|
175
|
+
try:
|
|
176
|
+
result = handler(op, ctx)
|
|
177
|
+
except NotImplementedError as exc:
|
|
178
|
+
return OpResult(success=False, message=str(exc))
|
|
179
|
+
except (ValueError, KeyError, TypeError) as exc:
|
|
180
|
+
return OpResult(success=False, message=f"Error: {exc}")
|
|
181
|
+
|
|
182
|
+
if not result.success:
|
|
183
|
+
return result
|
|
184
|
+
|
|
185
|
+
# Rebuild index (lightweight — just update active sheet)
|
|
186
|
+
self.index.active_sheet = model.wb.active.title if model.wb.active else ""
|
|
187
|
+
|
|
188
|
+
# Log snapshot for undo
|
|
189
|
+
after = model.snapshot()
|
|
190
|
+
log.append(SnapshotEvent(before=before, after=after, summary=op.raw))
|
|
191
|
+
_trim_events(log, MAX_EVENTS)
|
|
192
|
+
|
|
193
|
+
return result
|
|
194
|
+
|
|
195
|
+
def dispatch_query(self, query: str, model: SheetsModel) -> str:
|
|
196
|
+
"""Execute a query against the model."""
|
|
197
|
+
return dispatch_query(query, model, self.index)
|
|
198
|
+
|
|
199
|
+
def reverse_event(self, event: SnapshotEvent, model: SheetsModel) -> None:
|
|
200
|
+
"""Undo — restore from before-snapshot."""
|
|
201
|
+
model.restore(event.before)
|
|
202
|
+
self.index.rebuild(model)
|
|
203
|
+
|
|
204
|
+
def replay_event(self, event: SnapshotEvent, model: SheetsModel) -> None:
|
|
205
|
+
"""Redo — restore from after-snapshot."""
|
|
206
|
+
model.restore(event.after)
|
|
207
|
+
self.index.rebuild(model)
|
|
208
|
+
|
|
209
|
+
# -- Data block mode helpers --
|
|
210
|
+
|
|
211
|
+
def _flush_data_block(self, model: SheetsModel, log: EventLog) -> OpResult:
|
|
212
|
+
"""Process accumulated data block lines and write cells.
|
|
213
|
+
|
|
214
|
+
Implements:
|
|
215
|
+
- C1: Robust type inference (formulas, quoted text, leading zeros, numbers)
|
|
216
|
+
- C2: Markdown table auto-detection and conversion
|
|
217
|
+
- C9: Collision detection (warns when overwriting non-empty cells)
|
|
218
|
+
"""
|
|
219
|
+
import csv
|
|
220
|
+
from io import StringIO
|
|
221
|
+
|
|
222
|
+
buffer = self._data_buffer or []
|
|
223
|
+
anchor_str = self._data_anchor or "A1"
|
|
224
|
+
self._data_buffer = None
|
|
225
|
+
self._data_anchor = None
|
|
226
|
+
|
|
227
|
+
if not buffer:
|
|
228
|
+
return OpResult(success=False, message="Empty data block")
|
|
229
|
+
|
|
230
|
+
# Take pre-op snapshot
|
|
231
|
+
before = model.snapshot()
|
|
232
|
+
|
|
233
|
+
# Build context for anchor resolution
|
|
234
|
+
ctx = SheetsOpContext(
|
|
235
|
+
wb=model.wb,
|
|
236
|
+
index=self.index,
|
|
237
|
+
named_styles=self._named_styles,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
# Resolve anchor
|
|
241
|
+
from fcp_sheets.server.resolvers import resolve_cell_ref
|
|
242
|
+
resolved = resolve_cell_ref(anchor_str, ctx)
|
|
243
|
+
if resolved is None:
|
|
244
|
+
return OpResult(success=False, message=f"Invalid anchor: {anchor_str!r}")
|
|
245
|
+
|
|
246
|
+
start_col, start_row = resolved
|
|
247
|
+
ws = ctx.active_sheet
|
|
248
|
+
|
|
249
|
+
# Detect markdown table format (C2)
|
|
250
|
+
is_markdown = False
|
|
251
|
+
warning = ""
|
|
252
|
+
# Check first non-empty line for markdown pipe syntax
|
|
253
|
+
first_content = ""
|
|
254
|
+
for line in buffer:
|
|
255
|
+
stripped = line.strip()
|
|
256
|
+
if stripped:
|
|
257
|
+
first_content = stripped
|
|
258
|
+
break
|
|
259
|
+
if first_content.startswith("|"):
|
|
260
|
+
is_markdown = True
|
|
261
|
+
warning = "\n! Warning: Markdown table detected, auto-converted to CSV"
|
|
262
|
+
|
|
263
|
+
# Parse all rows first, then check collisions (C9), then write
|
|
264
|
+
parsed_rows: list[list[str | int | float]] = []
|
|
265
|
+
for line in buffer:
|
|
266
|
+
line = line.strip()
|
|
267
|
+
if not line:
|
|
268
|
+
continue
|
|
269
|
+
|
|
270
|
+
if is_markdown:
|
|
271
|
+
# Skip separator lines like |---|---|---|
|
|
272
|
+
if all(c in "|-: " for c in line):
|
|
273
|
+
continue
|
|
274
|
+
# Strip leading/trailing pipes and split on internal pipes
|
|
275
|
+
cells = [c.strip() for c in line.strip("|").split("|")]
|
|
276
|
+
else:
|
|
277
|
+
# Parse as CSV
|
|
278
|
+
reader = csv.reader(StringIO(line))
|
|
279
|
+
cells = next(reader, [])
|
|
280
|
+
|
|
281
|
+
parsed_rows.append([self._parse_data_value(v.strip()) for v in cells])
|
|
282
|
+
|
|
283
|
+
if not parsed_rows:
|
|
284
|
+
return OpResult(success=False, message="No data rows parsed")
|
|
285
|
+
|
|
286
|
+
# C9: Collision detection — check for existing non-empty cells
|
|
287
|
+
collisions: list[str] = []
|
|
288
|
+
from fcp_sheets.model.refs import index_to_col
|
|
289
|
+
for i, row_data in enumerate(parsed_rows):
|
|
290
|
+
row = start_row + i
|
|
291
|
+
for j, _val in enumerate(row_data):
|
|
292
|
+
col = start_col + j
|
|
293
|
+
existing = ws.cell(row=row, column=col).value
|
|
294
|
+
if existing is not None:
|
|
295
|
+
addr = f"{index_to_col(col)}{row}"
|
|
296
|
+
collisions.append(addr)
|
|
297
|
+
|
|
298
|
+
collision_warning = ""
|
|
299
|
+
if collisions:
|
|
300
|
+
count = len(collisions)
|
|
301
|
+
preview = ", ".join(collisions[:5])
|
|
302
|
+
if count > 5:
|
|
303
|
+
preview += f" (+{count - 5} more)"
|
|
304
|
+
collision_warning = f"\n! Warning: Overwrote {count} non-empty cell(s): {preview}"
|
|
305
|
+
|
|
306
|
+
# Write parsed data to worksheet
|
|
307
|
+
max_cols = 0
|
|
308
|
+
for i, row_data in enumerate(parsed_rows):
|
|
309
|
+
row = start_row + i
|
|
310
|
+
for j, value in enumerate(row_data):
|
|
311
|
+
col = start_col + j
|
|
312
|
+
ws.cell(row=row, column=col, value=value)
|
|
313
|
+
self.index.expand_bounds(ws.title, row, col)
|
|
314
|
+
max_cols = max(max_cols, len(row_data))
|
|
315
|
+
|
|
316
|
+
rows_written = len(parsed_rows)
|
|
317
|
+
|
|
318
|
+
# Log snapshot
|
|
319
|
+
after = model.snapshot()
|
|
320
|
+
log.append(SnapshotEvent(
|
|
321
|
+
before=before, after=after,
|
|
322
|
+
summary=f"data block at {anchor_str} ({rows_written} rows)",
|
|
323
|
+
))
|
|
324
|
+
_trim_events(log, MAX_EVENTS)
|
|
325
|
+
|
|
326
|
+
end_addr = f"{index_to_col(start_col)}{start_row}..{index_to_col(start_col + max_cols - 1)}{start_row + rows_written - 1}"
|
|
327
|
+
self.index.record_modified(ws.title, end_addr)
|
|
328
|
+
|
|
329
|
+
msg = f"Wrote {rows_written} rows at {anchor_str}{warning}{collision_warning}"
|
|
330
|
+
return OpResult(success=True, message=msg, prefix="+")
|
|
331
|
+
|
|
332
|
+
@staticmethod
|
|
333
|
+
def _parse_data_value(s: str) -> str | int | float:
|
|
334
|
+
"""Parse a value from a data block line (same rules as set verb)."""
|
|
335
|
+
if not s:
|
|
336
|
+
return ""
|
|
337
|
+
if s.startswith("="):
|
|
338
|
+
return s
|
|
339
|
+
if (s.startswith('"') and s.endswith('"')) or (s.startswith("'") and s.endswith("'")):
|
|
340
|
+
return s[1:-1]
|
|
341
|
+
# Leading zero protection (C1)
|
|
342
|
+
if len(s) > 1 and s[0] == "0" and s[1:].isdigit():
|
|
343
|
+
return s
|
|
344
|
+
try:
|
|
345
|
+
return int(s)
|
|
346
|
+
except ValueError:
|
|
347
|
+
pass
|
|
348
|
+
try:
|
|
349
|
+
return float(s)
|
|
350
|
+
except ValueError:
|
|
351
|
+
pass
|
|
352
|
+
return s
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def _trim_events(log: EventLog, max_events: int) -> None:
|
|
356
|
+
"""Trim oldest events if log exceeds max_events.
|
|
357
|
+
|
|
358
|
+
The EventLog doesn't support direct trimming, so we track this
|
|
359
|
+
via the adapter. For now, the event log grows unbounded and we
|
|
360
|
+
rely on the byte-snapshot size being manageable.
|
|
361
|
+
"""
|
|
362
|
+
# Note: EventLog doesn't expose a trim API. The memory cap is
|
|
363
|
+
# enforced by limiting snapshot retention in the adapter's dispatch.
|
|
364
|
+
# A future enhancement could add EventLog.trim_oldest().
|
|
365
|
+
pass
|
|
File without changes
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
"""Chart type mapping from DSL names to openpyxl chart classes."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from openpyxl.chart import (
|
|
6
|
+
AreaChart,
|
|
7
|
+
AreaChart3D,
|
|
8
|
+
BarChart,
|
|
9
|
+
BarChart3D,
|
|
10
|
+
BubbleChart,
|
|
11
|
+
DoughnutChart,
|
|
12
|
+
LineChart,
|
|
13
|
+
LineChart3D,
|
|
14
|
+
PieChart,
|
|
15
|
+
PieChart3D,
|
|
16
|
+
RadarChart,
|
|
17
|
+
ScatterChart,
|
|
18
|
+
StockChart,
|
|
19
|
+
SurfaceChart,
|
|
20
|
+
SurfaceChart3D,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Mapping from DSL chart type name to (class, grouping, type_override)
|
|
24
|
+
CHART_TYPES: dict[str, tuple[type, str | None, str | None]] = {
|
|
25
|
+
"bar": (BarChart, "clustered", "bar"),
|
|
26
|
+
"column": (BarChart, "clustered", "col"),
|
|
27
|
+
"line": (LineChart, None, None),
|
|
28
|
+
"pie": (PieChart, None, None),
|
|
29
|
+
"scatter": (ScatterChart, None, None),
|
|
30
|
+
"area": (AreaChart, None, None),
|
|
31
|
+
"doughnut": (DoughnutChart, None, None),
|
|
32
|
+
"radar": (RadarChart, None, None),
|
|
33
|
+
"bubble": (BubbleChart, None, None),
|
|
34
|
+
"stock": (StockChart, None, None),
|
|
35
|
+
"surface": (SurfaceChart, None, None),
|
|
36
|
+
# Stacked variants
|
|
37
|
+
"stacked-bar": (BarChart, "stacked", "bar"),
|
|
38
|
+
"stacked-column": (BarChart, "stacked", "col"),
|
|
39
|
+
"stacked-area": (AreaChart, "stacked", None),
|
|
40
|
+
# 100% stacked
|
|
41
|
+
"100-bar": (BarChart, "percentStacked", "bar"),
|
|
42
|
+
"100-column": (BarChart, "percentStacked", "col"),
|
|
43
|
+
"100-area": (AreaChart, "percentStacked", None),
|
|
44
|
+
# 3D variants
|
|
45
|
+
"bar-3d": (BarChart3D, "clustered", "bar"),
|
|
46
|
+
"column-3d": (BarChart3D, "clustered", "col"),
|
|
47
|
+
"line-3d": (LineChart3D, None, None),
|
|
48
|
+
"pie-3d": (PieChart3D, None, None),
|
|
49
|
+
"area-3d": (AreaChart3D, None, None),
|
|
50
|
+
"surface-3d": (SurfaceChart3D, None, None),
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def get_chart_class(type_name: str) -> tuple[type, str | None, str | None]:
|
|
55
|
+
"""Look up chart class and config for a DSL type name.
|
|
56
|
+
|
|
57
|
+
Returns (chart_class, grouping, type_override).
|
|
58
|
+
Raises ValueError if type not found.
|
|
59
|
+
"""
|
|
60
|
+
key = type_name.lower()
|
|
61
|
+
if key not in CHART_TYPES:
|
|
62
|
+
available = ", ".join(sorted(CHART_TYPES.keys()))
|
|
63
|
+
raise ValueError(f"Unknown chart type: {type_name!r}. Available: {available}")
|
|
64
|
+
return CHART_TYPES[key]
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""Named color palette and hex parsing for spreadsheet formatting."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
# Standard Excel/Office color palette
|
|
6
|
+
NAMED_COLORS: dict[str, str] = {
|
|
7
|
+
"blue": "4472C4",
|
|
8
|
+
"orange": "ED7D31",
|
|
9
|
+
"gray": "A5A5A5",
|
|
10
|
+
"gold": "FFC000",
|
|
11
|
+
"lt-blue": "5B9BD5",
|
|
12
|
+
"green": "70AD47",
|
|
13
|
+
"red": "FF0000",
|
|
14
|
+
"dk-green": "00B050",
|
|
15
|
+
"white": "FFFFFF",
|
|
16
|
+
"black": "000000",
|
|
17
|
+
"yellow": "FFFF00",
|
|
18
|
+
"purple": "7030A0",
|
|
19
|
+
# Conditional formatting fills
|
|
20
|
+
"good-fill": "C6EFCE",
|
|
21
|
+
"bad-fill": "FFC7CE",
|
|
22
|
+
"neutral-fill": "FFEB9C",
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def parse_color(color_str: str) -> str:
|
|
27
|
+
"""Parse a color string to a 6-char hex value (no #).
|
|
28
|
+
|
|
29
|
+
Accepts:
|
|
30
|
+
- Named colors: "blue", "red"
|
|
31
|
+
- Hex with #: "#4472C4"
|
|
32
|
+
- Hex without #: "4472C4"
|
|
33
|
+
"""
|
|
34
|
+
# Check named colors
|
|
35
|
+
name = color_str.lower().strip()
|
|
36
|
+
if name in NAMED_COLORS:
|
|
37
|
+
return NAMED_COLORS[name]
|
|
38
|
+
|
|
39
|
+
# Strip # prefix
|
|
40
|
+
hex_str = color_str.lstrip("#").strip()
|
|
41
|
+
|
|
42
|
+
# Validate hex
|
|
43
|
+
if len(hex_str) == 6 and all(c in "0123456789ABCDEFabcdef" for c in hex_str):
|
|
44
|
+
return hex_str.upper()
|
|
45
|
+
|
|
46
|
+
# 3-char shorthand
|
|
47
|
+
if len(hex_str) == 3 and all(c in "0123456789ABCDEFabcdef" for c in hex_str):
|
|
48
|
+
return "".join(c + c for c in hex_str).upper()
|
|
49
|
+
|
|
50
|
+
raise ValueError(f"Invalid color: {color_str!r}")
|