@josephyan/qingflow-app-user-mcp 0.2.0-beta.45 → 0.2.0-beta.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -3,13 +3,13 @@
3
3
  Install:
4
4
 
5
5
  ```bash
6
- npm install @josephyan/qingflow-app-user-mcp@0.2.0-beta.45
6
+ npm install @josephyan/qingflow-app-user-mcp@0.2.0-beta.47
7
7
  ```
8
8
 
9
9
  Run:
10
10
 
11
11
  ```bash
12
- npx -y -p @josephyan/qingflow-app-user-mcp@0.2.0-beta.45 qingflow-app-user-mcp
12
+ npx -y -p @josephyan/qingflow-app-user-mcp@0.2.0-beta.47 qingflow-app-user-mcp
13
13
  ```
14
14
 
15
15
  Environment:
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@josephyan/qingflow-app-user-mcp",
3
- "version": "0.2.0-beta.45",
3
+ "version": "0.2.0-beta.47",
4
4
  "description": "Operational end-user MCP for Qingflow records, tasks, comments, and directory workflows.",
5
5
  "license": "MIT",
6
6
  "type": "module",
package/pyproject.toml CHANGED
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "qingflow-mcp"
7
- version = "0.2.0b45"
7
+ version = "0.2.0b47"
8
8
  description = "User-authenticated MCP server for Qingflow"
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -32,7 +32,7 @@ Route to exactly one of these specialized paths:
32
32
 
33
33
  - If the user does not know the target `app_key`, discover apps first with `app_list` or `app_search`, then route to the specialized skill
34
34
  - If the app is known but the available data range is unclear, call `app_get` first and inspect `accessible_views`
35
- - If the task is about browsing, reading, creating, updating, deleting, attachments, relations, subtable writes, member/department-field candidate lookup, import templates, import-file verification, authorized local file repair, import execution, or import status, switch to `$qingflow-record-crud`
35
+ - If the task is about browsing, reading, creating, updating, deleting, attachments, relations, subtable writes, member/department-field candidate lookup, import templates, import capability discovery, import-file verification, authorized local file repair, import execution, or import status, switch to `$qingflow-record-crud`
36
36
  - If the task is about todo discovery, task context, approval actions, rollback or transfer, associated report review, or workflow log review, switch to `$qingflow-task-ops`
37
37
  - If the task is about grouped distributions, ratios, rankings, trends, insights, or any final statistical conclusion, switch to `$qingflow-record-analysis`
38
38
  - If the MCP is not connected, authenticated, or bound to the right workspace, switch to `$qingflow-mcp-setup`
@@ -43,6 +43,7 @@ Route to exactly one of these specialized paths:
43
43
  - if a field or target is still ambiguous after schema/task lookup, ask the user to confirm from a short candidate list instead of guessing
44
44
  - if the task can stay read-only, do not write or act
45
45
  - if the task involves a user-uploaded import file, do not modify the file unless the user explicitly authorizes repair or normalization
46
+ - if the task involves record import, call `app_get` first and inspect `data.import_capability` before template download, file repair, or import start
46
47
  - if the current MCP capability is unsupported, the workflow is awkward, or the user's need still cannot be satisfied after reasonable use, summarize the gap, ask whether to submit feedback, and call `feedback_submit` only after explicit user confirmation
47
48
 
48
49
  ## Shared Helper
@@ -137,19 +137,22 @@ Use the import tools for file-based bulk data loading, not `record_write`.
137
137
 
138
138
  ### Import workflow
139
139
 
140
- 1. Get the official template with `record_import_template_get`
141
- 2. Verify the uploaded file with `record_import_verify`
142
- 3. If verification fails, explain the issues first
143
- 4. Only modify the uploaded file if the user explicitly authorizes repair or normalization
144
- 5. If authorized, preserve the original file and write a repaired copy instead of overwriting the source file by default
145
- 6. Use `record_import_repair_local` for authorized `.xlsx` repair
146
- 7. Re-run `record_import_verify` on the repaired copy
147
- 8. Start import only from a successful verification result
148
- 9. Track the job with `record_import_status_get`
140
+ 1. Call `app_get` first and inspect `data.import_capability`
141
+ 2. If `import_capability.can_import=false`, stop before template download, file repair, or import start
142
+ 3. Get the official template with `record_import_template_get`
143
+ 4. Verify the uploaded file with `record_import_verify`
144
+ 5. If verification fails, explain the issues first
145
+ 6. Only modify the uploaded file if the user explicitly authorizes repair or normalization
146
+ 7. If authorized, preserve the original file and write a repaired copy instead of overwriting the source file by default
147
+ 8. Use `record_import_repair_local` for authorized `.xlsx` repair
148
+ 9. Re-run `record_import_verify` on the repaired copy
149
+ 10. Start import only from a successful verification result
150
+ 11. Track the job with `record_import_status_get`
149
151
 
150
152
  ### Import discipline
151
153
 
152
154
  - Do not modify a user-uploaded Excel or CSV file unless the user explicitly authorizes file repair
155
+ - Do not treat file verification as proof of import permission; permission is prechecked from `app_get.import_capability` and still rechecked at `record_import_start`
153
156
  - Do not silently normalize, rename, reorder, or delete columns
154
157
  - Do not fabricate business values to satisfy validation
155
158
  - Only fix format-level issues that keep the user’s intended data semantics intact, such as header alignment, date/number formatting, enum spelling aligned to the template, blank trailing rows, workbook sheet shape, or attachment/url cell normalization
@@ -2,4 +2,4 @@ from __future__ import annotations
2
2
 
3
3
  __all__ = ["__version__"]
4
4
 
5
- __version__ = "0.2.0b45"
5
+ __version__ = "0.2.0b47"
@@ -113,8 +113,10 @@ Analysis answers must include concrete numbers. When applicable, include percent
113
113
 
114
114
  ## Import Path
115
115
 
116
- `record_import_template_get -> record_import_verify -> (optional authorized record_import_repair_local) -> record_import_start -> record_import_status_get`
116
+ `app_get -> record_import_template_get -> record_import_verify -> (optional authorized record_import_repair_local) -> record_import_start -> record_import_status_get`
117
117
 
118
+ - Check `app_get.data.import_capability` before doing import work.
119
+ - If `import_capability.can_import=false`, stop before template download, file repair, or import start.
118
120
  - Import must go through `verify -> start`; do not start directly from a raw file path.
119
121
  - `record_import_start` requires an explicit `being_enter_auditing` choice. Do not assume a default.
120
122
  - Do not modify user-uploaded files unless the user explicitly authorizes repair.
@@ -101,8 +101,10 @@ Analysis answers must include concrete numbers. When applicable, include percent
101
101
 
102
102
  ## Import Path
103
103
 
104
- `record_import_template_get -> record_import_verify -> (optional authorized record_import_repair_local) -> record_import_start -> record_import_status_get`
104
+ `app_get -> record_import_template_get -> record_import_verify -> (optional authorized record_import_repair_local) -> record_import_start -> record_import_status_get`
105
105
 
106
+ - Check `app_get.data.import_capability` before doing import work.
107
+ - If `import_capability.can_import=false`, stop before template download, file repair, or import start.
106
108
  - Import must go through `verify -> start`; do not start directly from a raw file path.
107
109
  - `record_import_start` requires an explicit `being_enter_auditing` choice. Do not assume a default.
108
110
  - Do not modify user-uploaded files unless the user explicitly authorizes repair.
@@ -142,6 +142,7 @@ class AppTools(ToolBase):
142
142
  def runner(session_profile, context):
143
143
  warnings: list[JSONObject] = []
144
144
  app_name = app_key
145
+ base_info: JSONObject | None = None
145
146
 
146
147
  try:
147
148
  base_info = self.backend.request("GET", context, f"/app/{app_key}/baseInfo")
@@ -163,6 +164,8 @@ class AppTools(ToolBase):
163
164
  can_create = self._probe_create_access(context, app_key)
164
165
  accessible_views = self._resolve_accessible_system_views(context, app_key)
165
166
  accessible_views.extend(self._resolve_accessible_custom_views(context, app_key))
167
+ import_capability, import_warnings = _derive_import_capability(base_info)
168
+ warnings.extend(import_warnings)
166
169
 
167
170
  return {
168
171
  "profile": profile,
@@ -174,6 +177,7 @@ class AppTools(ToolBase):
174
177
  "app_key": app_key,
175
178
  "app_name": app_name,
176
179
  "can_create": can_create,
180
+ "import_capability": import_capability,
177
181
  "accessible_views": accessible_views,
178
182
  },
179
183
  }
@@ -694,9 +698,88 @@ def _analysis_supported_for_view_type(view_type: str | None) -> bool:
694
698
  return normalized not in {"boardview", "ganttview"}
695
699
 
696
700
 
701
+ def _derive_import_capability(base_info: Any) -> tuple[JSONObject, list[JSONObject]]:
702
+ warnings: list[JSONObject] = []
703
+ if not isinstance(base_info, dict):
704
+ warnings.append(
705
+ {
706
+ "code": "IMPORT_CAPABILITY_UNAVAILABLE",
707
+ "message": "app_get could not determine import capability because baseInfo was unavailable.",
708
+ }
709
+ )
710
+ return _unknown_import_capability(), warnings
711
+
712
+ has_data_import_status = "dataImportStatus" in base_info
713
+ has_data_manage_status = "dataManageStatus" in base_info
714
+ applicant_import_enabled = _coerce_optional_bool(base_info.get("dataImportStatus")) if has_data_import_status else None
715
+ data_manage_status = _coerce_optional_bool(base_info.get("dataManageStatus")) if has_data_manage_status else None
716
+
717
+ if applicant_import_enabled is True:
718
+ return {
719
+ "can_import": True,
720
+ "auth_source": "apply_auth",
721
+ "applicant_import_enabled": True,
722
+ "data_manage_status": data_manage_status,
723
+ "runtime_checks_required": ["user_disabled", "function_demoted"],
724
+ "confidence": "preflight",
725
+ }, warnings
726
+
727
+ if data_manage_status is True:
728
+ return {
729
+ "can_import": True,
730
+ "auth_source": "data_manage_auth",
731
+ "applicant_import_enabled": applicant_import_enabled,
732
+ "data_manage_status": True,
733
+ "runtime_checks_required": ["user_disabled", "function_demoted"],
734
+ "confidence": "preflight",
735
+ }, warnings
736
+
737
+ if applicant_import_enabled is False and data_manage_status is False:
738
+ return {
739
+ "can_import": False,
740
+ "auth_source": "none",
741
+ "applicant_import_enabled": False,
742
+ "data_manage_status": False,
743
+ "runtime_checks_required": [],
744
+ "confidence": "preflight",
745
+ }, warnings
746
+
747
+ warnings.append(
748
+ {
749
+ "code": "IMPORT_CAPABILITY_UNAVAILABLE",
750
+ "message": "app_get could not fully determine import capability because baseInfo did not include a complete import permission summary.",
751
+ }
752
+ )
753
+ return _unknown_import_capability(
754
+ applicant_import_enabled=applicant_import_enabled,
755
+ data_manage_status=data_manage_status,
756
+ ), warnings
757
+
758
+
759
+ def _unknown_import_capability(
760
+ *,
761
+ applicant_import_enabled: bool | None = None,
762
+ data_manage_status: bool | None = None,
763
+ ) -> JSONObject:
764
+ return {
765
+ "can_import": None,
766
+ "auth_source": "unknown",
767
+ "applicant_import_enabled": applicant_import_enabled,
768
+ "data_manage_status": data_manage_status,
769
+ "runtime_checks_required": [],
770
+ "confidence": "unknown",
771
+ }
772
+
773
+
697
774
  def _coerce_positive_int(value: Any) -> int | None:
698
775
  try:
699
776
  number = int(value)
700
777
  except (TypeError, ValueError):
701
778
  return None
702
779
  return number if number > 0 else None
780
+
781
+
782
+ def _coerce_optional_bool(value: Any) -> bool | None:
783
+ if isinstance(value, bool):
784
+ return value
785
+ return None
@@ -4,6 +4,7 @@ import hashlib
4
4
  import json
5
5
  import mimetypes
6
6
  import shutil
7
+ import tempfile
7
8
  from io import BytesIO
8
9
  from copy import deepcopy
9
10
  from datetime import datetime, timedelta, timezone
@@ -12,15 +13,16 @@ from typing import Any
12
13
  from uuid import uuid4
13
14
 
14
15
  from mcp.server.fastmcp import FastMCP
15
- from openpyxl import load_workbook
16
+ from openpyxl import Workbook, load_workbook
16
17
 
17
18
  from ..config import DEFAULT_PROFILE
18
19
  from ..errors import QingflowApiError
19
20
  from ..import_store import ImportJobStore, ImportVerificationStore
20
21
  from ..json_types import JSONObject
22
+ from .app_tools import _derive_import_capability
21
23
  from .base import ToolBase
22
24
  from .file_tools import FileTools
23
- from .record_tools import RecordTools
25
+ from .record_tools import RecordTools, _build_field_index, _normalize_form_schema
24
26
 
25
27
 
26
28
  SUPPORTED_IMPORT_EXTENSIONS = {".xlsx", ".xls"}
@@ -132,10 +134,46 @@ class ImportTools(ToolBase):
132
134
  return self._failed_template_result(app_key=app_key, error_code="IMPORT_TEMPLATE_UNAUTHORIZED", message="app_key is required")
133
135
 
134
136
  def runner(session_profile, context):
135
- expected_columns, schema_fingerprint = self._expected_import_columns(profile, context, app_key)
137
+ import_capability, import_warnings = self._fetch_import_capability(context, app_key)
138
+ expected_columns, schema_fingerprint = self._expected_import_columns(
139
+ profile,
140
+ context,
141
+ app_key,
142
+ import_capability=import_capability,
143
+ )
136
144
  try:
137
145
  payload = self.backend.request("GET", context, f"/app/{app_key}/apply/excelTemplate")
138
146
  except QingflowApiError as exc:
147
+ if import_capability.get("auth_source") == "apply_auth":
148
+ downloaded_to_path = self._write_local_template(
149
+ expected_columns=expected_columns,
150
+ destination_hint=download_to_path,
151
+ app_key=app_key,
152
+ )
153
+ return {
154
+ "ok": True,
155
+ "status": "partial_success",
156
+ "app_key": app_key,
157
+ "ws_id": session_profile.selected_ws_id,
158
+ "request_route": self.backend.describe_route(context),
159
+ "template_url": None,
160
+ "downloaded_to_path": downloaded_to_path,
161
+ "expected_columns": expected_columns,
162
+ "schema_fingerprint": schema_fingerprint,
163
+ "warnings": import_warnings
164
+ + [
165
+ {
166
+ "code": "IMPORT_TEMPLATE_LOCAL_FALLBACK",
167
+ "message": "Official template download requires data management permission; MCP generated a local applicant-import template instead.",
168
+ }
169
+ ],
170
+ "verification": {
171
+ "schema_fingerprint": schema_fingerprint,
172
+ "template_url_resolved": False,
173
+ "template_downloaded": True,
174
+ "template_source": "local_generated",
175
+ },
176
+ }
139
177
  return self._failed_template_result(
140
178
  app_key=app_key,
141
179
  error_code="IMPORT_TEMPLATE_UNAUTHORIZED",
@@ -151,11 +189,12 @@ class ImportTools(ToolBase):
151
189
  request_route=self.backend.describe_route(context),
152
190
  )
153
191
  downloaded_to_path = None
154
- warnings: list[JSONObject] = []
192
+ warnings: list[JSONObject] = list(import_warnings)
155
193
  verification = {
156
194
  "schema_fingerprint": schema_fingerprint,
157
195
  "template_url_resolved": True,
158
196
  "template_downloaded": False,
197
+ "template_source": "official",
159
198
  }
160
199
  if download_to_path:
161
200
  destination = _resolve_template_download_path(download_to_path, app_key=app_key)
@@ -197,8 +236,43 @@ class ImportTools(ToolBase):
197
236
  return self._failed_verify_result(app_key=app_key, file_path=file_path, error_code="IMPORT_VERIFICATION_FAILED", message="file_path must point to an existing file")
198
237
 
199
238
  def runner(session_profile, context):
200
- expected_columns, schema_fingerprint = self._expected_import_columns(profile, context, app_key)
201
- template_header_titles, header_warnings = self._load_template_header_titles(context, app_key)
239
+ import_capability, import_warnings = self._fetch_import_capability(context, app_key)
240
+ precheck_known = import_capability.get("auth_source") != "unknown"
241
+ if not bool(import_capability.get("can_import")):
242
+ if import_capability.get("auth_source") != "unknown":
243
+ return self._failed_verify_result(
244
+ app_key=app_key,
245
+ file_path=file_path,
246
+ error_code="IMPORT_AUTH_PRECHECK_FAILED",
247
+ message="the current user does not have import permission for this app",
248
+ extra={
249
+ "warnings": import_warnings,
250
+ "verification": {
251
+ "import_auth_prechecked": True,
252
+ "import_auth_precheck_passed": False,
253
+ "backend_verification_passed": False,
254
+ },
255
+ "import_capability": import_capability,
256
+ },
257
+ )
258
+ import_warnings = list(import_warnings) + [
259
+ {
260
+ "code": "IMPORT_AUTH_PRECHECK_SKIPPED",
261
+ "message": "record_import_verify could not determine import permission from app metadata; continuing with file verification only.",
262
+ }
263
+ ]
264
+ expected_columns, schema_fingerprint = self._expected_import_columns(
265
+ profile,
266
+ context,
267
+ app_key,
268
+ import_capability=import_capability,
269
+ )
270
+ template_header_titles, header_warnings = self._load_template_header_titles(
271
+ context,
272
+ app_key,
273
+ import_capability=import_capability,
274
+ expected_columns=expected_columns,
275
+ )
202
276
  local_check = self._local_verify(
203
277
  path=path,
204
278
  app_key=app_key,
@@ -206,7 +280,7 @@ class ImportTools(ToolBase):
206
280
  allowed_header_titles=template_header_titles,
207
281
  schema_fingerprint=schema_fingerprint,
208
282
  )
209
- warnings = deepcopy(local_check["warnings"]) + header_warnings
283
+ warnings = import_warnings + deepcopy(local_check["warnings"]) + header_warnings
210
284
  issues = deepcopy(local_check["issues"])
211
285
  can_import = bool(local_check["can_import"])
212
286
  backend_verification = None
@@ -267,6 +341,7 @@ class ImportTools(ToolBase):
267
341
  "can_import": can_import,
268
342
  "issues": issues,
269
343
  "warnings": warnings,
344
+ "import_capability": import_capability,
270
345
  "apply_rows": backend_verification.get("applyRows") if isinstance(backend_verification, dict) else None,
271
346
  "backend_verification": backend_verification,
272
347
  "local_precheck": local_check,
@@ -287,7 +362,11 @@ class ImportTools(ToolBase):
287
362
  "issues": issues,
288
363
  "repair_suggestions": local_check["repair_suggestions"],
289
364
  "warnings": warnings,
365
+ "import_capability": import_capability,
290
366
  "verification": {
367
+ "import_auth_prechecked": precheck_known,
368
+ "import_auth_precheck_passed": True if precheck_known else None,
369
+ "import_auth_source": import_capability.get("auth_source"),
291
370
  "local_precheck_passed": bool(local_check["local_precheck_passed"]),
292
371
  "backend_verification_passed": isinstance(backend_verification, dict)
293
372
  and backend_verification.get("beingValidated", True) is not False,
@@ -593,8 +672,20 @@ class ImportTools(ToolBase):
593
672
  except RuntimeError as exc:
594
673
  return self._runtime_error_as_result(exc, error_code="IMPORT_STATUS_AMBIGUOUS")
595
674
 
596
- def _expected_import_columns(self, profile: str, context, app_key: str) -> tuple[list[JSONObject], str]: # type: ignore[no-untyped-def]
597
- index = self._record_tools._get_field_index(profile, context, app_key, force_refresh=False)
675
+ def _expected_import_columns(
676
+ self,
677
+ profile: str,
678
+ context,
679
+ app_key: str,
680
+ *,
681
+ import_capability: JSONObject | None = None,
682
+ ) -> tuple[list[JSONObject], str]: # type: ignore[no-untyped-def]
683
+ auth_source = _normalize_optional_text((import_capability or {}).get("auth_source")) or "unknown"
684
+ if auth_source == "data_manage_auth":
685
+ schema = self.backend.request("GET", context, f"/app/{app_key}/form", params={"type": 1})
686
+ index = _build_field_index(_normalize_form_schema(schema))
687
+ else:
688
+ index = self._record_tools._get_field_index(profile, context, app_key, force_refresh=False)
598
689
  ws_id = self.sessions.get_profile(profile).selected_ws_id
599
690
  expected_columns: list[JSONObject] = []
600
691
  for field in index.by_id.values():
@@ -719,7 +810,14 @@ class ImportTools(ToolBase):
719
810
  base_result["error_code"] = "IMPORT_VERIFICATION_FAILED"
720
811
  return base_result
721
812
 
722
- def _load_template_header_titles(self, context, app_key: str) -> tuple[list[str] | None, list[JSONObject]]: # type: ignore[no-untyped-def]
813
+ def _load_template_header_titles(
814
+ self,
815
+ context,
816
+ app_key: str,
817
+ *,
818
+ import_capability: JSONObject | None = None,
819
+ expected_columns: list[JSONObject] | None = None,
820
+ ) -> tuple[list[str] | None, list[JSONObject]]: # type: ignore[no-untyped-def]
723
821
  warnings: list[JSONObject] = []
724
822
  try:
725
823
  payload = self.backend.request("GET", context, f"/app/{app_key}/apply/excelTemplate")
@@ -736,6 +834,17 @@ class ImportTools(ToolBase):
736
834
  normalized_titles = [title for title in titles if title]
737
835
  return normalized_titles or None, warnings
738
836
  except Exception:
837
+ if (
838
+ _normalize_optional_text((import_capability or {}).get("auth_source")) == "apply_auth"
839
+ and expected_columns
840
+ ):
841
+ warnings.append(
842
+ {
843
+ "code": "IMPORT_TEMPLATE_HEADER_LOCAL_FALLBACK",
844
+ "message": "Official template headers require data management permission; local precheck fell back to applicant import columns.",
845
+ }
846
+ )
847
+ return [str(item["title"]) for item in expected_columns], warnings
739
848
  warnings.append(
740
849
  {
741
850
  "code": "IMPORT_TEMPLATE_HEADER_UNAVAILABLE",
@@ -744,6 +853,32 @@ class ImportTools(ToolBase):
744
853
  )
745
854
  return None, warnings
746
855
 
856
+ def _fetch_import_capability(self, context, app_key: str) -> tuple[JSONObject, list[JSONObject]]: # type: ignore[no-untyped-def]
857
+ try:
858
+ payload = self.backend.request("GET", context, f"/app/{app_key}/baseInfo")
859
+ except QingflowApiError:
860
+ payload = None
861
+ return _derive_import_capability(payload)
862
+
863
+ def _write_local_template(
864
+ self,
865
+ *,
866
+ expected_columns: list[JSONObject],
867
+ destination_hint: str | None,
868
+ app_key: str,
869
+ ) -> str:
870
+ if destination_hint:
871
+ destination = _resolve_template_download_path(destination_hint, app_key=app_key)
872
+ else:
873
+ destination = Path(tempfile.gettempdir()) / f"qingflow-import-template-{app_key}-{uuid4().hex[:8]}.xlsx"
874
+ destination.parent.mkdir(parents=True, exist_ok=True)
875
+ workbook = Workbook()
876
+ sheet = workbook.active
877
+ sheet.title = "导入模板"
878
+ sheet.append([str(item["title"]) for item in expected_columns])
879
+ workbook.save(destination)
880
+ return str(destination)
881
+
747
882
  def _failed_template_result(
748
883
  self,
749
884
  *,
@@ -767,8 +902,16 @@ class ImportTools(ToolBase):
767
902
  "message": message,
768
903
  }
769
904
 
770
- def _failed_verify_result(self, *, app_key: str, file_path: str, error_code: str, message: str) -> dict[str, Any]:
771
- return {
905
+ def _failed_verify_result(
906
+ self,
907
+ *,
908
+ app_key: str,
909
+ file_path: str,
910
+ error_code: str,
911
+ message: str,
912
+ extra: dict[str, Any] | None = None,
913
+ ) -> dict[str, Any]:
914
+ payload = {
772
915
  "ok": True,
773
916
  "status": "failed",
774
917
  "error_code": error_code,
@@ -785,11 +928,17 @@ class ImportTools(ToolBase):
785
928
  "repair_suggestions": [],
786
929
  "warnings": [],
787
930
  "verification": {
931
+ "import_auth_prechecked": False,
932
+ "import_auth_precheck_passed": False,
788
933
  "local_precheck_passed": False,
789
934
  "backend_verification_passed": False,
790
935
  },
936
+ "import_capability": None,
791
937
  "message": message,
792
938
  }
939
+ if extra:
940
+ payload.update(extra)
941
+ return payload
793
942
 
794
943
  def _failed_repair_result(self, *, error_code: str, message: str, extra: dict[str, Any] | None = None) -> dict[str, Any]:
795
944
  payload = {
@@ -983,11 +1132,35 @@ def _analyze_headers(
983
1132
  issues: list[JSONObject] = []
984
1133
  repair_suggestions: list[str] = []
985
1134
  if missing:
986
- issues.append(_issue("MISSING_COLUMNS", f"Missing expected columns: {', '.join(missing)}", severity="error"))
1135
+ issues.append(
1136
+ _issue(
1137
+ "MISSING_COLUMNS",
1138
+ f"Missing expected columns: {', '.join(missing)}",
1139
+ severity="error",
1140
+ repairable=True,
1141
+ repair_code="normalize_headers",
1142
+ )
1143
+ )
987
1144
  if extra:
988
- issues.append(_issue("EXTRA_COLUMNS", f"Unexpected columns: {', '.join(extra)}", severity="error"))
1145
+ issues.append(
1146
+ _issue(
1147
+ "EXTRA_COLUMNS",
1148
+ f"Unexpected columns: {', '.join(extra)}",
1149
+ severity="error",
1150
+ repairable=True,
1151
+ repair_code="normalize_headers",
1152
+ )
1153
+ )
989
1154
  if duplicates:
990
- issues.append(_issue("DUPLICATE_COLUMNS", f"Duplicate columns: {', '.join(sorted(set(duplicates)))}", severity="error"))
1155
+ issues.append(
1156
+ _issue(
1157
+ "DUPLICATE_COLUMNS",
1158
+ f"Duplicate columns: {', '.join(sorted(set(duplicates)))}",
1159
+ severity="error",
1160
+ repairable=True,
1161
+ repair_code="normalize_headers",
1162
+ )
1163
+ )
991
1164
  normalized_changes = []
992
1165
  for text in actual_headers:
993
1166
  if not text:
@@ -995,7 +1168,7 @@ def _analyze_headers(
995
1168
  canonical = allowed_by_key.get(_normalize_header_key(text))
996
1169
  if canonical and canonical != text:
997
1170
  normalized_changes.append((text, canonical))
998
- if normalized_changes:
1171
+ if missing or extra or duplicates or normalized_changes:
999
1172
  repair_suggestions.append("normalize_headers")
1000
1173
  return {"issues": issues, "repair_suggestions": repair_suggestions}
1001
1174
 
@@ -1045,7 +1218,8 @@ def _sheet_header_map(sheet) -> dict[str, int]: # type: ignore[no-untyped-def]
1045
1218
  def _repair_headers(sheet, expected_columns: list[JSONObject]) -> bool: # type: ignore[no-untyped-def]
1046
1219
  changed = False
1047
1220
  expected_by_key = {_normalize_header_key(item["title"]): item["title"] for item in expected_columns}
1048
- for cell in next(sheet.iter_rows(min_row=1, max_row=1), []):
1221
+ header_cells = list(next(sheet.iter_rows(min_row=1, max_row=1), []))
1222
+ for cell in header_cells:
1049
1223
  text = _normalize_optional_text(cell.value)
1050
1224
  if text is None:
1051
1225
  continue
@@ -1053,6 +1227,19 @@ def _repair_headers(sheet, expected_columns: list[JSONObject]) -> bool: # type:
1053
1227
  if canonical and canonical != text:
1054
1228
  cell.value = canonical
1055
1229
  changed = True
1230
+ if changed:
1231
+ return True
1232
+
1233
+ # Fallback for template-based files where headers were edited into non-canonical
1234
+ # values but column order is still intact. Keep any extra trailing system columns.
1235
+ for index, column in enumerate(expected_columns, start=1):
1236
+ if index > len(header_cells):
1237
+ break
1238
+ expected_title = str(column["title"]).strip()
1239
+ current_title = _normalize_optional_text(header_cells[index - 1].value)
1240
+ if current_title != expected_title:
1241
+ header_cells[index - 1].value = expected_title
1242
+ changed = True
1056
1243
  return changed
1057
1244
 
1058
1245
 
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import json
3
4
  from typing import Any
4
5
  from uuid import uuid4
5
6
 
@@ -264,6 +265,13 @@ class TaskContextTools(ToolBase):
264
265
  )
265
266
  )
266
267
  before_apply_status = ((task_context.get("record") or {}).get("apply_status"))
268
+ runtime_baseline = self._capture_task_runtime_baseline(
269
+ profile=profile,
270
+ context=context,
271
+ app_key=app_key,
272
+ record_id=record_id,
273
+ workflow_node_id=workflow_node_id,
274
+ )
267
275
  try:
268
276
  raw = self._execute_task_action(
269
277
  profile=profile,
@@ -296,6 +304,7 @@ class TaskContextTools(ToolBase):
296
304
  workflow_node_id=workflow_node_id,
297
305
  action=normalized_action,
298
306
  before_apply_status=before_apply_status,
307
+ runtime_baseline=runtime_baseline,
299
308
  )
300
309
  runtime_verified = bool(verification.get("runtime_continuation_verified"))
301
310
  status = "success" if runtime_verified else "partial_success"
@@ -400,6 +409,7 @@ class TaskContextTools(ToolBase):
400
409
  workflow_node_id: int,
401
410
  action: str,
402
411
  before_apply_status: Any,
412
+ runtime_baseline: dict[str, Any] | None = None,
403
413
  ) -> tuple[dict[str, Any], list[dict[str, Any]]]:
404
414
  verification: dict[str, Any] = {
405
415
  "action_executed": True,
@@ -473,11 +483,34 @@ class TaskContextTools(ToolBase):
473
483
  )
474
484
  verification["downstream_todo_detected"] = downstream_todo_detected
475
485
  verification["initiated_task_visible"] = initiated_visible
476
-
486
+ baseline_downstream_nodes = set()
487
+ baseline_log_count = None
488
+ baseline_log_digest = None
489
+ if isinstance(runtime_baseline, dict):
490
+ baseline_downstream_nodes = set(runtime_baseline.get("downstream_todo_nodes") or [])
491
+ baseline_log_count = runtime_baseline.get("workflow_log_count")
492
+ baseline_log_digest = runtime_baseline.get("workflow_log_digest")
493
+ current_downstream_nodes = {
494
+ int(item.get("workflow_node_id") or 0)
495
+ for item in todo_items
496
+ if isinstance(item, dict)
497
+ and int(item.get("record_id") or 0) == record_id
498
+ and int(item.get("workflow_node_id") or 0) != workflow_node_id
499
+ }
500
+ workflow_log_digest = self._workflow_log_digest(log_items)
501
+ verification["downstream_todo_nodes"] = sorted(node_id for node_id in current_downstream_nodes if node_id > 0)
502
+ verification["downstream_todo_changed"] = current_downstream_nodes != baseline_downstream_nodes
503
+ verification["workflow_log_advanced"] = bool(
504
+ verification.get("workflow_log_visible")
505
+ and (
506
+ (isinstance(baseline_log_count, int) and len(log_items) > baseline_log_count)
507
+ or (baseline_log_digest is not None and workflow_log_digest is not None and workflow_log_digest != baseline_log_digest)
508
+ )
509
+ )
477
510
  runtime_verified = bool(
478
511
  verification.get("record_state_changed")
479
- or downstream_todo_detected
480
- or (verification.get("workflow_log_visible") and len(log_items) > 0)
512
+ or verification.get("downstream_todo_changed")
513
+ or verification.get("workflow_log_advanced")
481
514
  )
482
515
  verification["runtime_continuation_verified"] = runtime_verified
483
516
  if not runtime_verified:
@@ -489,6 +522,53 @@ class TaskContextTools(ToolBase):
489
522
  )
490
523
  return verification, warnings
491
524
 
525
+ def _capture_task_runtime_baseline(
526
+ self,
527
+ *,
528
+ profile: str,
529
+ context: BackendRequestContext,
530
+ app_key: str,
531
+ record_id: int,
532
+ workflow_node_id: int,
533
+ ) -> dict[str, Any]:
534
+ baseline: dict[str, Any] = {
535
+ "workflow_log_visible": False,
536
+ "workflow_log_count": None,
537
+ "workflow_log_digest": None,
538
+ "downstream_todo_nodes": [],
539
+ }
540
+ try:
541
+ log_page = self.backend.request(
542
+ "POST",
543
+ context,
544
+ "/application/workflow/node/record",
545
+ json_body={
546
+ "key": app_key,
547
+ "rowRecordId": record_id,
548
+ "nodeId": workflow_node_id,
549
+ "role": 3,
550
+ "pageNum": 1,
551
+ "pageSize": 50,
552
+ },
553
+ )
554
+ log_items = self._normalize_workflow_logs(log_page)
555
+ baseline["workflow_log_visible"] = True
556
+ baseline["workflow_log_count"] = len(log_items)
557
+ baseline["workflow_log_digest"] = self._workflow_log_digest(log_items)
558
+ except QingflowApiError:
559
+ pass
560
+ todo_items = self._safe_task_list_items(profile=profile, task_box="todo", app_key=app_key)
561
+ baseline["downstream_todo_nodes"] = sorted(
562
+ {
563
+ int(item.get("workflow_node_id") or 0)
564
+ for item in todo_items
565
+ if isinstance(item, dict)
566
+ and int(item.get("record_id") or 0) == record_id
567
+ and int(item.get("workflow_node_id") or 0) != workflow_node_id
568
+ }
569
+ )
570
+ return baseline
571
+
492
572
  def _task_action_visibility_unverified_response(
493
573
  self,
494
574
  *,
@@ -1252,6 +1332,14 @@ class TaskContextTools(ToolBase):
1252
1332
  )
1253
1333
  return items
1254
1334
 
1335
+ def _workflow_log_digest(self, items: list[dict[str, Any]]) -> str | None:
1336
+ if not items:
1337
+ return None
1338
+ try:
1339
+ return json.dumps(items, ensure_ascii=False, sort_keys=True, default=str)
1340
+ except TypeError:
1341
+ return str(items)
1342
+
1255
1343
  def _first_nested_operation_detail(self, operation: dict[str, Any]) -> Any:
1256
1344
  for key in ("approval", "filling", "cc", "applicant", "qRobotAdd", "qRobotUpdate", "webhook", "qRobotSMS", "qRobotMail"):
1257
1345
  value = operation.get(key)