@josephyan/qingflow-app-user-mcp 0.2.0-beta.2 → 0.2.0-beta.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/README.md +12 -2
  2. package/npm/lib/runtime.mjs +37 -0
  3. package/npm/scripts/postinstall.mjs +5 -1
  4. package/package.json +3 -2
  5. package/pyproject.toml +1 -1
  6. package/skills/qingflow-app-user/SKILL.md +230 -0
  7. package/skills/qingflow-app-user/agents/openai.yaml +4 -0
  8. package/skills/qingflow-app-user/references/data-gotchas.md +49 -0
  9. package/skills/qingflow-app-user/references/environments.md +63 -0
  10. package/skills/qingflow-app-user/references/record-patterns.md +110 -0
  11. package/skills/qingflow-app-user/references/workflow-usage.md +26 -0
  12. package/skills/qingflow-record-analysis/SKILL.md +253 -0
  13. package/skills/qingflow-record-analysis/agents/openai.yaml +4 -0
  14. package/skills/qingflow-record-analysis/references/analysis-gotchas.md +141 -0
  15. package/skills/qingflow-record-analysis/references/analysis-patterns.md +113 -0
  16. package/skills/qingflow-record-analysis/references/confidence-reporting.md +92 -0
  17. package/src/qingflow_mcp/__init__.py +1 -1
  18. package/src/qingflow_mcp/builder_facade/models.py +294 -1
  19. package/src/qingflow_mcp/builder_facade/service.py +2727 -235
  20. package/src/qingflow_mcp/server.py +7 -5
  21. package/src/qingflow_mcp/server_app_builder.py +80 -4
  22. package/src/qingflow_mcp/server_app_user.py +8 -182
  23. package/src/qingflow_mcp/solution/compiler/form_compiler.py +1 -1
  24. package/src/qingflow_mcp/solution/compiler/workflow_compiler.py +21 -2
  25. package/src/qingflow_mcp/solution/executor.py +34 -7
  26. package/src/qingflow_mcp/tools/ai_builder_tools.py +1038 -30
  27. package/src/qingflow_mcp/tools/app_tools.py +1 -2
  28. package/src/qingflow_mcp/tools/approval_tools.py +357 -75
  29. package/src/qingflow_mcp/tools/directory_tools.py +158 -28
  30. package/src/qingflow_mcp/tools/record_tools.py +1954 -973
  31. package/src/qingflow_mcp/tools/task_tools.py +376 -225
  32. package/src/qingflow_mcp/tools/workflow_tools.py +78 -4
package/README.md CHANGED
@@ -3,13 +3,13 @@
3
3
  Install:
4
4
 
5
5
  ```bash
6
- npm install @josephyan/qingflow-app-user-mcp@0.2.0-beta.2
6
+ npm install @josephyan/qingflow-app-user-mcp@0.2.0-beta.21
7
7
  ```
8
8
 
9
9
  Run:
10
10
 
11
11
  ```bash
12
- npx -y -p @josephyan/qingflow-app-user-mcp@0.2.0-beta.2 qingflow-app-user-mcp
12
+ npx -y -p @josephyan/qingflow-app-user-mcp@0.2.0-beta.21 qingflow-app-user-mcp
13
13
  ```
14
14
 
15
15
  Environment:
@@ -19,3 +19,13 @@ Environment:
19
19
  - `QINGFLOW_MCP_HOME`
20
20
 
21
21
  This package bootstraps a local Python runtime on first install and then starts the `qingflow-app-user-mcp` stdio MCP server.
22
+
23
+ Bundled skills:
24
+
25
+ - `skills/qingflow-app-user`
26
+ - `skills/qingflow-record-analysis`
27
+
28
+ Note:
29
+
30
+ - The skill files are included in the npm package.
31
+ - On install, the package copies them to `$CODEX_HOME/skills` (or `~/.codex/skills` if `CODEX_HOME` is unset).
@@ -29,6 +29,43 @@ export function getPackageRoot(metaUrl) {
29
29
  return path.resolve(path.dirname(fileURLToPath(metaUrl)), "..", "..");
30
30
  }
31
31
 
32
+ export function getCodexHome() {
33
+ const configured = process.env.CODEX_HOME?.trim();
34
+ if (configured) {
35
+ return path.resolve(configured);
36
+ }
37
+ const home = process.env.HOME || process.env.USERPROFILE;
38
+ if (!home) {
39
+ throw new Error("Cannot resolve CODEX_HOME because HOME is not set.");
40
+ }
41
+ return path.join(home, ".codex");
42
+ }
43
+
44
+ export function installBundledSkills(packageRoot) {
45
+ const skillsSrc = path.join(packageRoot, "skills");
46
+ if (!fs.existsSync(skillsSrc)) {
47
+ return { installed: [], skipped: true, destination: null };
48
+ }
49
+
50
+ const codexHome = getCodexHome();
51
+ const skillsDestRoot = path.join(codexHome, "skills");
52
+ fs.mkdirSync(skillsDestRoot, { recursive: true });
53
+
54
+ const installed = [];
55
+ for (const entry of fs.readdirSync(skillsSrc, { withFileTypes: true })) {
56
+ if (!entry.isDirectory()) {
57
+ continue;
58
+ }
59
+ const src = path.join(skillsSrc, entry.name);
60
+ const dest = path.join(skillsDestRoot, entry.name);
61
+ fs.rmSync(dest, { recursive: true, force: true });
62
+ fs.cpSync(src, dest, { recursive: true });
63
+ installed.push(entry.name);
64
+ }
65
+
66
+ return { installed, skipped: false, destination: skillsDestRoot };
67
+ }
68
+
32
69
  export function getVenvDir(packageRoot) {
33
70
  return path.join(packageRoot, ".npm-python");
34
71
  }
@@ -1,4 +1,4 @@
1
- import { ensurePythonEnv, getPackageRoot } from "../lib/runtime.mjs";
1
+ import { ensurePythonEnv, getPackageRoot, installBundledSkills } from "../lib/runtime.mjs";
2
2
 
3
3
  const packageRoot = getPackageRoot(import.meta.url);
4
4
 
@@ -6,6 +6,10 @@ try {
6
6
  console.log("[qingflow-mcp] Bootstrapping Python runtime...");
7
7
  ensurePythonEnv(packageRoot, { commandName: "qingflow-app-user-mcp" });
8
8
  console.log("[qingflow-mcp] Python runtime is ready.");
9
+ const skills = installBundledSkills(packageRoot);
10
+ if (!skills.skipped) {
11
+ console.log(`[qingflow-mcp] Installed skills to ${skills.destination}: ${skills.installed.join(", ")}`);
12
+ }
9
13
  } catch (error) {
10
14
  console.error(`[qingflow-mcp] postinstall failed: ${error.message}`);
11
15
  process.exit(1);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@josephyan/qingflow-app-user-mcp",
3
- "version": "0.2.0-beta.2",
3
+ "version": "0.2.0-beta.21",
4
4
  "description": "Operational end-user MCP for Qingflow records, tasks, comments, and directory workflows.",
5
5
  "license": "MIT",
6
6
  "type": "module",
@@ -18,7 +18,8 @@
18
18
  "src/qingflow_mcp/py.typed",
19
19
  "qingflow-app-user-mcp",
20
20
  "npm/",
21
- "docs/local-agent-install.md"
21
+ "docs/local-agent-install.md",
22
+ "skills/"
22
23
  ],
23
24
  "engines": {
24
25
  "node": ">=16.16.0"
package/pyproject.toml CHANGED
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "qingflow-mcp"
7
- version = "0.2.0b2"
7
+ version = "0.2.0b21"
8
8
  description = "User-authenticated MCP server for Qingflow"
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -0,0 +1,230 @@
1
+ ---
2
+ name: qingflow-app-user
3
+ description: Use Qingflow apps as an operational end user after the MCP is already connected and authenticated. Use when the user wants to browse, read, write, comment on, or act on existing business records and task-center work. Do not use this skill for schema design or final statistical analysis.
4
+ metadata:
5
+ short-description: Schema-first operational use of Qingflow apps
6
+ ---
7
+
8
+ # Qingflow App User
9
+
10
+ ## Overview
11
+
12
+ This skill is for **operational usage** inside existing Qingflow apps.
13
+
14
+ Use it for:
15
+
16
+ - record browsing
17
+ - record detail lookup
18
+ - record create / update / delete
19
+ - task-center usage
20
+ - record comments
21
+ - approval / reject / rollback / transfer
22
+ - directory lookup
23
+
24
+ Do **not** keep grouped analysis, ratios, rankings, trends, or final statistical conclusions inside this skill.
25
+ For those, switch to [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md).
26
+
27
+ Before operating on data, identify whether the task targets `test` or `prod` and read [references/environments.md](references/environments.md).
28
+ If the user did not specify one, default to `prod`.
29
+
30
+ ## Default Paths
31
+
32
+ Use exactly one of these default paths:
33
+
34
+ 1. Browse records
35
+ `record_schema_get -> record_list`
36
+
37
+ 2. Read one record
38
+ `record_schema_get -> record_get`
39
+
40
+ 3. Write records
41
+ `record_schema_get -> record_write(mode="plan") -> record_write(mode="apply")`
42
+
43
+ 4. Work task center
44
+ `task_summary / task_list / task_facets`
45
+
46
+ 5. Analysis
47
+ Switch to [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md)
48
+
49
+ ## Tool Scope
50
+
51
+ Primary record tools:
52
+
53
+ - `record_schema_get`
54
+ - `record_list`
55
+ - `record_get`
56
+ - `record_write`
57
+
58
+ Directory tools:
59
+
60
+ - `directory_search`
61
+ - `directory_list_internal_users`
62
+ - `directory_list_all_internal_users`
63
+ - `directory_list_internal_departments`
64
+ - `directory_list_all_departments`
65
+ - `directory_list_sub_departments`
66
+ - `directory_list_external_members`
67
+
68
+ Task-center tools:
69
+
70
+ - `task_summary`
71
+ - `task_list`
72
+ - `task_facets`
73
+ - `task_mark_read`
74
+ - `task_mark_all_cc_read`
75
+ - `task_urge`
76
+
77
+ Comments and workflow usage actions:
78
+
79
+ - `record_comment_write`
80
+ - `record_comment_list`
81
+ - `record_comment_mentions`
82
+ - `record_comment_mark_read`
83
+ - `task_approve`
84
+ - `task_reject`
85
+ - `task_rollback_candidates`
86
+ - `task_rollback`
87
+ - `task_transfer_candidates`
88
+ - `task_transfer`
89
+
90
+ Do not use builder-side tools here.
91
+
92
+ ## Standard Operating Order
93
+
94
+ 1. Ensure auth exists
95
+ 2. Ensure workspace is selected
96
+ 3. Confirm target app and whether the task is browse / detail / write / task / analysis
97
+ 4. Run `record_schema_get` before any non-trivial record read or write
98
+ 5. If the request is analysis-like, switch to [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md)
99
+ 6. If the request is write-like, decide `insert / update / delete` before building any payload
100
+ 7. If fields are still ambiguous after `record_schema_get`, ask the user to confirm from a short candidate list instead of guessing
101
+ 8. For high-risk writes, task actions, or production changes, read the current state first whenever practical
102
+ 9. After actions, report the affected `record_id`, `task_id`, counts, or returned item count
103
+
104
+ ## Record Read Rules
105
+
106
+ - Use `record_list` for browse/export/sample inspection only
107
+ - Use `record_get` when `record_id` is known
108
+ - `record_list` accepts:
109
+ - `columns`
110
+ - `where`
111
+ - `order_by`
112
+ - `limit`
113
+ - `page`
114
+ - `record_list` is **not** an analysis tool
115
+ - If a request turns into grouped distributions, ratios, rankings, trends, or final statistical conclusions, switch to [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md)
116
+
117
+ ## Record Write Rules
118
+
119
+ Use `record_write` as the only default write tool.
120
+
121
+ ### Write workflow
122
+
123
+ 1. Run `record_schema_get`
124
+ 2. Decide whether the task is `insert`, `update`, or `delete`
125
+ 3. Build SQL-like JSON clauses
126
+ 4. Run `record_write(mode="plan")`
127
+ 5. If blockers are empty, run `record_write(mode="apply")`
128
+ 6. For important writes, keep `verify_write=true`
129
+
130
+ ### SQL-like JSON DSL
131
+
132
+ The DSL is clause-shaped like SQL, but it is **not raw SQL text**.
133
+
134
+ #### Insert
135
+
136
+ ```json
137
+ {
138
+ "operation": "insert",
139
+ "mode": "plan",
140
+ "values": [
141
+ { "field_id": 12, "value": "测试客户" },
142
+ { "field_id": 18, "value": 1000 }
143
+ ],
144
+ "submit_type": "submit",
145
+ "verify_write": true
146
+ }
147
+ ```
148
+
149
+ #### Update
150
+
151
+ ```json
152
+ {
153
+ "operation": "update",
154
+ "mode": "plan",
155
+ "record_id": 123,
156
+ "set": [
157
+ { "field_id": 18, "value": 2000 }
158
+ ],
159
+ "verify_write": true
160
+ }
161
+ ```
162
+
163
+ #### Delete
164
+
165
+ ```json
166
+ {
167
+ "operation": "delete",
168
+ "mode": "plan",
169
+ "record_ids": [123, 124]
170
+ }
171
+ ```
172
+
173
+ ### Write discipline
174
+
175
+ - `insert` uses `values`
176
+ - `update` uses `set`
177
+ - `delete` uses `record_id` or `record_ids`
178
+ - Do not send raw SQL text
179
+ - Do not invent formulas or expressions
180
+ - Do not use free-form `WHERE` updates or deletes
181
+ - Do not auto-fill missing fields
182
+ - Do not auto-resolve relation targets without first querying them
183
+
184
+ ## Task-Center Rules
185
+
186
+ - Use `task_summary` for headline counts
187
+ - Use `task_list` for flat browsing
188
+ - Use `task_facets` for grouped worksheet or workflow-node buckets
189
+ - `task_box` must be one of:
190
+ - `todo`
191
+ - `initiated`
192
+ - `cc`
193
+ - `done`
194
+ - `flow_status` must be one of:
195
+ - `all`
196
+ - `in_progress`
197
+ - `approved`
198
+ - `rejected`
199
+ - `pending_fix`
200
+ - `urged`
201
+ - `overdue`
202
+ - `due_soon`
203
+ - `unread`
204
+ - `ended`
205
+ - Find the exact task or record first, then use `task_approve`, `task_reject`, `task_rollback`, or `task_transfer`
206
+ - Do not guess `workflow_node_id`
207
+
208
+ ## Directory and Comments
209
+
210
+ - Use `directory_search` for fuzzy member/department lookup
211
+ - Use `directory_list_all_internal_users` and `directory_list_all_departments` only when the user explicitly wants a complete export
212
+ - Use `record_comment_write` after the exact `record_id` is known
213
+ - Use `record_comment_mentions` to resolve mention candidates before building complex comment payloads
214
+
215
+ ## Response Interpretation
216
+
217
+ - `record_list` returns browse/sample data, not final analysis conclusions
218
+ - `record_write(mode="plan")` is static preflight, not runtime execution
219
+ - `record_write(mode="apply")` may still surface verification failures
220
+ - Treat `request_route` as the source of truth for live route debugging
221
+ - Prefer canonical schema titles and aliases in your final wording
222
+ - If only part of the requested work is completed, explicitly disclose which parts are done and which are not
223
+
224
+ ## Resources
225
+
226
+ - Environment switching: [references/environments.md](references/environments.md)
227
+ - Record operation patterns: [references/record-patterns.md](references/record-patterns.md)
228
+ - Workflow usage actions: [references/workflow-usage.md](references/workflow-usage.md)
229
+ - Data gotchas: [references/data-gotchas.md](references/data-gotchas.md)
230
+ - Dedicated analysis workflow: [qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md)
@@ -0,0 +1,4 @@
1
+ interface:
2
+ display_name: "Qingflow App User"
3
+ short_description: "Use Qingflow apps for business data and task operations"
4
+ default_prompt: "Use $qingflow-app-user for ordinary Qingflow record, task, comment, and directory operations. If the task shifts into grouped analysis, insight generation, ranking, trend, or final statistical conclusions, switch to $qingflow-record-analysis instead of keeping the logic here."
@@ -0,0 +1,49 @@
1
+ # Data Gotchas
2
+
3
+ For final statistics, grouped distributions, rankings, trends, or insight-style conclusions, use [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md) instead of keeping that reasoning inside `$qingflow-app-user`.
4
+
5
+ ## Record Reads
6
+
7
+ - `record_list` is for browsing, export, and sample inspection only
8
+ - `record_get` is for one exact record
9
+ - Do not present paged browse output as if it were a grouped or full-population conclusion
10
+ - If the browser and MCP disagree, compare `request_route.base_url` and `request_route.qf_version` first
11
+
12
+ ## Write Preflight
13
+
14
+ - `record_write(mode="plan")` is static preflight only; linked visibility and runtime required rules can still reject writes
15
+ - Use `record_schema_get` when field titles are uncertain instead of guessing ids
16
+ - Prefer `verify_write=true` for complex, relation-heavy, subtable, or production writes
17
+ - `record_write(mode="apply")` may still surface verification failures; do not report success before checking them
18
+
19
+ ## Write Semantics
20
+
21
+ - `insert` uses `values`
22
+ - `update` uses `set`
23
+ - `delete` uses `record_id` or `record_ids`
24
+ - Do not send raw SQL strings
25
+ - Do not fake formula or expression fields
26
+ - Do not perform free-form bulk updates or deletes
27
+ - Do not guess relation targets from display text; resolve the real `record_id` first
28
+
29
+ ## Attachments
30
+
31
+ - Attachment fields are two-step: upload first, then write the returned URL object into the record
32
+ - `file_upload_local` may report `effective_upload_kind=login` even when the requested kind was `attachment`; this is an implementation fallback, not necessarily an error
33
+ - When debugging uploads, surface both `effective_upload_kind` and `upload_protocol`
34
+
35
+ ## Subtables
36
+
37
+ - Subtable fields accept row objects keyed by subfield title, or native `tableValues`
38
+ - Use the current form schema's subfield titles; do not guess nested ids
39
+ - When updating existing subtable rows, preserve row ids if the source record returns them
40
+ - Nested subtable writes are still unsupported
41
+
42
+ ## Unsupported Direct-Write Fields
43
+
44
+ - `14` time range
45
+ - `34` image recognition
46
+ - `35` image generation
47
+ - `36` document parsing
48
+
49
+ Do not fake values for these fields in app-user writes. Stop and explain the limitation.
@@ -0,0 +1,63 @@
1
+ # Environment Switching
2
+
3
+ Use this reference before any data creation, update, delete, or workflow usage action.
4
+
5
+ ## Step 1: Resolve the active environment
6
+
7
+ Decide explicitly whether the task targets:
8
+
9
+ - `test`: demo, mock data, smoke usage validation, training scenarios
10
+ - `prod`: real operational data and live workflow actions
11
+
12
+ If the user did not specify an environment, default to `prod`.
13
+
14
+ ## Test Environment
15
+
16
+ Use test for:
17
+
18
+ - mock or smoke data entry
19
+ - business flow walkthroughs
20
+ - user acceptance demos
21
+ - data correction rehearsals
22
+
23
+ Test behavior:
24
+
25
+ - creating demo data is acceptable
26
+ - default to at least `5` records for mock or smoke datasets unless the user asks for fewer
27
+ - destructive cleanup is acceptable only when the record scope is explicit
28
+
29
+ Known current test backend:
30
+
31
+ - use an explicitly provided non-production backend
32
+
33
+ ## Production Environment
34
+
35
+ Use production for:
36
+
37
+ - live data entry
38
+ - live business record updates
39
+ - comments and workflow actions on real records
40
+ - controlled data correction or deletion
41
+
42
+ Production behavior:
43
+
44
+ - prefer search or get before any write
45
+ - restate the exact app and record scope before update or delete
46
+ - do not create mock, smoke, or demo data unless the user explicitly asks for it
47
+ - for bulk changes, summarize the target count before execution and the affected ids after execution
48
+ - destructive actions need explicit confirmation in the conversation context
49
+
50
+ Production guardrails:
51
+
52
+ - never assume a record id, app id, or workspace id
53
+ - treat `record_write(operation="delete")` as high risk
54
+ - if the task can be answered read-only, do not write
55
+
56
+ ## Reporting Rule
57
+
58
+ For app-user operations, always report:
59
+
60
+ - active environment
61
+ - target app
62
+ - operation type: read, create, update, delete, or workflow action
63
+ - affected record count or ids
@@ -0,0 +1,110 @@
1
+ # Record Patterns
2
+
3
+ If the task shifts into grouped analysis, ratio, ranking, trend, or any final statistical conclusion, switch to [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md).
4
+
5
+ ## Browse Pattern
6
+
7
+ Use `record_schema_get -> record_list` when:
8
+
9
+ - the user wants to browse records
10
+ - the target `record_id` is unknown
11
+ - a delete or update target still needs confirmation
12
+ - the user needs sample rows or a small export
13
+
14
+ Keep the browse DSL simple:
15
+
16
+ - `columns`: field ids only
17
+ - `where`: flat AND filters only
18
+ - `order_by`: field sorting only
19
+ - `limit` and `page`: browsing intent only
20
+
21
+ Do not use `record_list` for grouped conclusions, ratios, rankings, trends, or any final statistical claim.
22
+
23
+ ## Detail Pattern
24
+
25
+ Use `record_schema_get -> record_get` when:
26
+
27
+ - the exact `record_id` is known
28
+ - the user needs one record in detail
29
+ - a write target needs verification before action
30
+
31
+ Prefer passing explicit `columns` when the user only needs a subset of fields.
32
+
33
+ ## Write Pattern
34
+
35
+ Use `record_schema_get -> record_write(mode="plan") -> record_write(mode="apply")`.
36
+
37
+ 1. Confirm the target app
38
+ 2. Resolve fields with `record_schema_get`
39
+ 3. Decide whether the task is `insert`, `update`, or `delete`
40
+ 4. Build SQL-like JSON clauses
41
+ 5. Run `record_write(mode="plan")`
42
+ 6. If blockers are empty, run `record_write(mode="apply")`
43
+ 7. For important writes, keep `verify_write=true`
44
+
45
+ ### Insert
46
+
47
+ ```json
48
+ {
49
+ "operation": "insert",
50
+ "mode": "plan",
51
+ "values": [
52
+ { "field_id": 12, "value": "测试客户" },
53
+ { "field_id": 18, "value": 1000 }
54
+ ],
55
+ "submit_type": "submit",
56
+ "verify_write": true
57
+ }
58
+ ```
59
+
60
+ ### Update
61
+
62
+ ```json
63
+ {
64
+ "operation": "update",
65
+ "mode": "plan",
66
+ "record_id": 123,
67
+ "set": [
68
+ { "field_id": 18, "value": 2000 }
69
+ ],
70
+ "verify_write": true
71
+ }
72
+ ```
73
+
74
+ ### Delete
75
+
76
+ ```json
77
+ {
78
+ "operation": "delete",
79
+ "mode": "plan",
80
+ "record_ids": [123, 124]
81
+ }
82
+ ```
83
+
84
+ ## Write Anti-Patterns
85
+
86
+ Do not do this:
87
+
88
+ - do not send raw SQL text
89
+ - do not build free-form `WHERE` updates or deletes
90
+ - do not invent formulas or expressions
91
+ - do not auto-fill missing required fields
92
+ - do not guess relation targets without first resolving them
93
+ - do not skip `mode="plan"` on non-trivial writes
94
+
95
+ ## Unsupported Direct Writes
96
+
97
+ Do not attempt direct app-user writes for these field types:
98
+
99
+ - `14` time range
100
+ - `34` image recognition
101
+ - `35` image generation
102
+ - `36` document parsing
103
+
104
+ If the payload includes them, stop at `record_write(mode="plan")` and explain that the tool does not support a reliable direct write for those fields yet.
105
+
106
+ ## Relation, Attachment, and Subtable Rules
107
+
108
+ - Relation fields are record-id based. Resolve the referenced target first, then write the relation field with the real `record_id`.
109
+ - Attachment fields are two-step: upload first with `file_upload_local`, then reuse the returned attachment payload in `record_write`.
110
+ - Subtable writes require the current schema shape; when updating existing subtable rows, preserve row ids if the current record exposes them.
@@ -0,0 +1,26 @@
1
+ # Workflow and Task Usage Actions
2
+
3
+ Use these when the user is operating inside an existing process, not redesigning it.
4
+
5
+ Examples:
6
+
7
+ - add a comment to a record
8
+ - approve or reject a workflow task
9
+ - transfer a task
10
+ - roll back a task
11
+ - list todo, initiated, done, or cc tasks
12
+ - inspect workload by worksheet or workflow node
13
+ - urge a pending task
14
+
15
+ Rules:
16
+
17
+ - if the user starts from inbox, todo, workload, cc, or bottleneck language, use `task_*` first
18
+ - use `task_summary` for headline counts
19
+ - use `task_list` for flat browsing
20
+ - use `task_facets` when worksheet or workflow-node buckets matter
21
+ - treat task counts as task-center counts, not record counts
22
+ - switch to `record_*` only after locating the exact business record behind a task
23
+ - identify the exact target first
24
+ - for approve or reject, identify the exact `workflow_node_id` first; prefer task-center results or current audit info, then use `task_approve` or `task_reject`
25
+ - avoid usage-side workflow actions on ambiguous records
26
+ - summarize the final action and target task ids or record ids