@josephyan/qingflow-app-user-mcp 0.2.0-beta.2 → 0.2.0-beta.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/README.md +12 -2
  2. package/npm/lib/runtime.mjs +37 -0
  3. package/npm/scripts/postinstall.mjs +5 -1
  4. package/package.json +3 -2
  5. package/pyproject.toml +1 -1
  6. package/skills/qingflow-app-user/SKILL.md +160 -0
  7. package/skills/qingflow-app-user/agents/openai.yaml +4 -0
  8. package/skills/qingflow-app-user/references/data-gotchas.md +59 -0
  9. package/skills/qingflow-app-user/references/environments.md +63 -0
  10. package/skills/qingflow-app-user/references/record-patterns.md +96 -0
  11. package/skills/qingflow-app-user/references/workflow-usage.md +24 -0
  12. package/skills/qingflow-record-analysis/SKILL.md +253 -0
  13. package/skills/qingflow-record-analysis/agents/openai.yaml +4 -0
  14. package/skills/qingflow-record-analysis/references/analysis-gotchas.md +141 -0
  15. package/skills/qingflow-record-analysis/references/analysis-patterns.md +113 -0
  16. package/skills/qingflow-record-analysis/references/confidence-reporting.md +92 -0
  17. package/src/qingflow_mcp/__init__.py +1 -1
  18. package/src/qingflow_mcp/builder_facade/models.py +294 -1
  19. package/src/qingflow_mcp/builder_facade/service.py +2727 -235
  20. package/src/qingflow_mcp/server.py +2 -0
  21. package/src/qingflow_mcp/server_app_builder.py +80 -4
  22. package/src/qingflow_mcp/server_app_user.py +3 -2
  23. package/src/qingflow_mcp/solution/compiler/form_compiler.py +1 -1
  24. package/src/qingflow_mcp/solution/compiler/workflow_compiler.py +21 -2
  25. package/src/qingflow_mcp/solution/executor.py +34 -7
  26. package/src/qingflow_mcp/tools/ai_builder_tools.py +1038 -30
  27. package/src/qingflow_mcp/tools/app_tools.py +1 -2
  28. package/src/qingflow_mcp/tools/record_tools.py +1249 -767
  29. package/src/qingflow_mcp/tools/workflow_tools.py +78 -4
package/README.md CHANGED
@@ -3,13 +3,13 @@
3
3
  Install:
4
4
 
5
5
  ```bash
6
- npm install @josephyan/qingflow-app-user-mcp@0.2.0-beta.2
6
+ npm install @josephyan/qingflow-app-user-mcp@0.2.0-beta.20
7
7
  ```
8
8
 
9
9
  Run:
10
10
 
11
11
  ```bash
12
- npx -y -p @josephyan/qingflow-app-user-mcp@0.2.0-beta.2 qingflow-app-user-mcp
12
+ npx -y -p @josephyan/qingflow-app-user-mcp@0.2.0-beta.20 qingflow-app-user-mcp
13
13
  ```
14
14
 
15
15
  Environment:
@@ -19,3 +19,13 @@ Environment:
19
19
  - `QINGFLOW_MCP_HOME`
20
20
 
21
21
  This package bootstraps a local Python runtime on first install and then starts the `qingflow-app-user-mcp` stdio MCP server.
22
+
23
+ Bundled skills:
24
+
25
+ - `skills/qingflow-app-user`
26
+ - `skills/qingflow-record-analysis`
27
+
28
+ Note:
29
+
30
+ - The skill files are included in the npm package.
31
+ - On install, the package copies them to `$CODEX_HOME/skills` (or `~/.codex/skills` if `CODEX_HOME` is unset).
@@ -29,6 +29,43 @@ export function getPackageRoot(metaUrl) {
29
29
  return path.resolve(path.dirname(fileURLToPath(metaUrl)), "..", "..");
30
30
  }
31
31
 
32
+ export function getCodexHome() {
33
+ const configured = process.env.CODEX_HOME?.trim();
34
+ if (configured) {
35
+ return path.resolve(configured);
36
+ }
37
+ const home = process.env.HOME || process.env.USERPROFILE;
38
+ if (!home) {
39
+ throw new Error("Cannot resolve CODEX_HOME because HOME is not set.");
40
+ }
41
+ return path.join(home, ".codex");
42
+ }
43
+
44
+ export function installBundledSkills(packageRoot) {
45
+ const skillsSrc = path.join(packageRoot, "skills");
46
+ if (!fs.existsSync(skillsSrc)) {
47
+ return { installed: [], skipped: true, destination: null };
48
+ }
49
+
50
+ const codexHome = getCodexHome();
51
+ const skillsDestRoot = path.join(codexHome, "skills");
52
+ fs.mkdirSync(skillsDestRoot, { recursive: true });
53
+
54
+ const installed = [];
55
+ for (const entry of fs.readdirSync(skillsSrc, { withFileTypes: true })) {
56
+ if (!entry.isDirectory()) {
57
+ continue;
58
+ }
59
+ const src = path.join(skillsSrc, entry.name);
60
+ const dest = path.join(skillsDestRoot, entry.name);
61
+ fs.rmSync(dest, { recursive: true, force: true });
62
+ fs.cpSync(src, dest, { recursive: true });
63
+ installed.push(entry.name);
64
+ }
65
+
66
+ return { installed, skipped: false, destination: skillsDestRoot };
67
+ }
68
+
32
69
  export function getVenvDir(packageRoot) {
33
70
  return path.join(packageRoot, ".npm-python");
34
71
  }
@@ -1,4 +1,4 @@
1
- import { ensurePythonEnv, getPackageRoot } from "../lib/runtime.mjs";
1
+ import { ensurePythonEnv, getPackageRoot, installBundledSkills } from "../lib/runtime.mjs";
2
2
 
3
3
  const packageRoot = getPackageRoot(import.meta.url);
4
4
 
@@ -6,6 +6,10 @@ try {
6
6
  console.log("[qingflow-mcp] Bootstrapping Python runtime...");
7
7
  ensurePythonEnv(packageRoot, { commandName: "qingflow-app-user-mcp" });
8
8
  console.log("[qingflow-mcp] Python runtime is ready.");
9
+ const skills = installBundledSkills(packageRoot);
10
+ if (!skills.skipped) {
11
+ console.log(`[qingflow-mcp] Installed skills to ${skills.destination}: ${skills.installed.join(", ")}`);
12
+ }
9
13
  } catch (error) {
10
14
  console.error(`[qingflow-mcp] postinstall failed: ${error.message}`);
11
15
  process.exit(1);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@josephyan/qingflow-app-user-mcp",
3
- "version": "0.2.0-beta.2",
3
+ "version": "0.2.0-beta.20",
4
4
  "description": "Operational end-user MCP for Qingflow records, tasks, comments, and directory workflows.",
5
5
  "license": "MIT",
6
6
  "type": "module",
@@ -18,7 +18,8 @@
18
18
  "src/qingflow_mcp/py.typed",
19
19
  "qingflow-app-user-mcp",
20
20
  "npm/",
21
- "docs/local-agent-install.md"
21
+ "docs/local-agent-install.md",
22
+ "skills/"
22
23
  ],
23
24
  "engines": {
24
25
  "node": ">=16.16.0"
package/pyproject.toml CHANGED
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "qingflow-mcp"
7
- version = "0.2.0b2"
7
+ version = "0.2.0b20"
8
8
  description = "User-authenticated MCP server for Qingflow"
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -0,0 +1,160 @@
1
+ ---
2
+ name: qingflow-app-user
3
+ description: Use Qingflow apps as an operational end user after the MCP is already connected and authenticated. Use when the user wants to create, search, read, update, or delete business records, inspect or manage task-center work, add comments, or perform workflow usage actions inside an existing app. Do not use this skill to design apps, modify schemas, or build a brand new SolutionSpec.
4
+ metadata:
5
+ short-description: Use Qingflow apps for business data and task operations
6
+ ---
7
+
8
+ # Qingflow App User
9
+
10
+ ## Overview
11
+
12
+ This skill is for business-user operations inside existing Qingflow apps. It focuses on records, task-center usage, comments, and usage-side workflow actions, not app design or system configuration. If the task is about building or changing app structure, switch to `$qingflow-app-builder`.
13
+
14
+ If the user is asking for analysis, grouped distributions, ranking, trend, averages, business insights, or any final statistical conclusion, switch to `$qingflow-record-analysis` instead of keeping that logic inside this skill.
15
+
16
+ Before operating on data, identify whether the task targets `test` or `prod` and read [references/environments.md](references/environments.md). If the user did not specify one, default to `prod`.
17
+ When the task is in `prod`, browser parity matters, or the user says "the page has data but MCP does not", restate the expected `base_url` and `qf_version`, then prefer tools that expose `request_route` so you can confirm the live route before concluding.
18
+
19
+ ## Tool Scope
20
+
21
+ Primary record and data tools:
22
+
23
+ - `record_query`
24
+ - `record_schema_get`
25
+ - `record_write_plan`
26
+ - `record_create`
27
+ - `record_get`
28
+ - `record_update`
29
+ - `record_delete`
30
+
31
+ Directory and organization lookup tools when the user is asking about internal members, departments, org structure, ownership, approver candidates, or wants full contact exports:
32
+
33
+ - `directory_search`
34
+ - `directory_list_internal_users`
35
+ - `directory_list_all_internal_users`
36
+ - `directory_list_internal_departments`
37
+ - `directory_list_all_departments`
38
+ - `directory_list_sub_departments`
39
+ - `directory_list_external_members`
40
+
41
+ Usage-side collaboration and flow tools when needed:
42
+
43
+ - `record_comment_*`
44
+ - `task_approve`
45
+ - `task_reject`
46
+ - `task_rollback*`
47
+ - `task_transfer*`
48
+
49
+ Task-center and inbox tools when the user is asking about pending work, processed work, cc, or workflow workload:
50
+
51
+ - `task_list`
52
+ - `task_list_grouped`
53
+ - `task_statistics`
54
+ - `task_urge`
55
+
56
+ Do not use builder-side tools here:
57
+
58
+ - `app_*`
59
+ - `view_*`
60
+ - `workflow_*`
61
+ - `portal_*`
62
+ - `navigation_*`
63
+ - `package_*`
64
+ - `solution_*`
65
+
66
+ ## Standard Operating Order
67
+
68
+ 1. Ensure auth exists
69
+ 2. Ensure workspace is selected
70
+ 3. Confirm target app, task scope, and operation type
71
+ 4. For org, member, department, approver, or ownership questions, start with `directory_*`
72
+ 5. For inbox, pending, processed, cc, or workload questions, start with `task_statistics`, `task_list`, or `task_list_grouped`
73
+ 6. When a task query identifies the target record, switch to `record_get` or `record_query` for business data details
74
+ 7. For non-trivial record reads, start with `record_query`
75
+ 8. For non-trivial writes, start with `record_write_plan`, especially when using `fields`
76
+ 9. Prefer read-first when changing existing records
77
+ 10. Report the affected task ids, record ids, member ids, department ids, or counts after actions
78
+ 11. For `prod`, complex forms, attachments, or any unfamiliar schema, prefer `record_create(..., verify_write=true)` or read back immediately after create/update
79
+
80
+ ## Data Rules
81
+
82
+ - Prefer `record_query` as the default read entry
83
+ - Treat `record_query(list)` as the default wide-table browse and export endpoint; pass explicit `select_columns`, do not expect raw answer arrays there, and let the tool auto-batch columns when the backend per-request field cap is hit
84
+ - For analysis, grouped distributions, trends, or final statistical conclusions, switch to `$qingflow-record-analysis`
85
+ - Use `request_route` from tool responses to verify the active `base_url` and `qf_version` whenever route mismatches are plausible
86
+ - Use `directory_search` for fuzzy internal lookup across both members and departments
87
+ - Use `directory_list_all_internal_users` when the user explicitly wants a complete internal member list within the current workspace or within a specific department or role
88
+ - Use `directory_list_all_departments` when the user explicitly wants the full department tree or all departments under a root
89
+ - Use `directory_list_internal_departments` for keyword-based department search, not full exports
90
+ - Use `task_statistics` before `task_list` when the user only needs counts
91
+ - Use `task_list_grouped` when worksheet or group buckets matter
92
+ - Use `task_urge` only when the user clearly wants a reminder sent for a pending task
93
+ - Use `record_schema_get` when field selectors are ambiguous; if the task then turns into analysis, switch to `$qingflow-record-analysis`
94
+ - For precise record lookup, use `record_get` when `apply_id` is known
95
+ - Use `record_schema_get` when the user gives field titles and you are not fully sure about the exact schema; do not guess ambiguous fields silently
96
+ - If the task has already shifted into analysis and `record_schema_get` still leaves multiple plausible fields, stop and ask the user to confirm the intended field instead of continuing to try read tools in a loop
97
+ - Treat field selectors as schema-first and platform-generic. Prefer exact field titles, then neutral aliases such as `创建时间`, `新增时间`, `负责人`, `部门`, `时间`, or `阶段` only when the tool resolves them clearly. Do not assume CRM shorthand like `销售`, `商机阶段`, `客户全称`, or similar domain shortcuts apply across arbitrary Qingflow apps
98
+ - For updates, inspect current data first unless the user already provided the exact target and patch
99
+ - For deletes, confirm the exact record scope and report the deleted ids
100
+ - When validating business data volume, use `effective_count` over raw backend totals
101
+ - In `prod`, prefer read-first even more strictly and avoid deletes unless the record scope is explicit in the conversation
102
+ - For attachments, first run `file_upload_local`, then pass the returned `attachment_value` into `record_create` or `record_update`; do not try to write local file paths directly into attachment fields
103
+ - For relation fields, first query the target app and resolve the referenced record `apply_id`; do not assume titles, numbers, or business keys can be written directly into a relation field
104
+ - For subtable fields, write a list of row objects keyed by the subfield titles. When updating existing rows, include `rowId` / `row_id` / `__row_id__` only if the source record already exposes it
105
+ - Treat `14/34/35/36` as unsupported direct-write field types in app-user flows:
106
+ - `14`: time range
107
+ - `34`: image recognition
108
+ - `35`: image generation
109
+ - `36`: document parsing
110
+ - For those unsupported types, stop and explain the limitation instead of inventing payloads
111
+ - Use `record_write_plan` to inspect `write_format.support_level` before non-trivial writes:
112
+ - `full`: generic scalar/select/date writes are directly supported
113
+ - `restricted`: member/department/attachment/relation/subtable writes need the documented presteps
114
+ - `unsupported`: stop and explain the limitation
115
+ - For relation-heavy, attachment, subtable, or production writes, default to `verify_write=true` so field drops are surfaced immediately instead of being reported as success
116
+
117
+ ## Mock and Demo Data
118
+
119
+ When the user asks for demo data, seed, smoke data, or mock data:
120
+
121
+ - default to at least `5` records for the relevant entity unless the user asks for fewer
122
+ - keep titles realistic and business-like
123
+ - vary statuses, dates, and categories enough to make views and charts useful
124
+ - if the task is `prod`, do not create mock or smoke data unless the user explicitly asks for it
125
+
126
+ ## Response Interpretation
127
+
128
+ - `record_query(query_mode="list")` is browse/sample output, not a final analysis result
129
+ - If `record_query(query_mode="list")` reports `row_cap_hit`, `sample_only`, or capped rows, do not present it as full data
130
+ - For grouped distributions, trends, or final statistical conclusions, switch to `$qingflow-record-analysis` and use `record_schema_get -> record_analyze`
131
+ - `record_write_plan` is static preflight, not a guarantee that submit will pass runtime linkage or visibility checks
132
+ - `record_create` now returns integer `apply_id`; you can pass that id directly into `record_get`, `record_update`, or `record_delete`
133
+ - `verify_write=true` means the tool read the record back and compared the written fields; if it returns `status=verification_failed` or `ok=false`, do not report the create or update as successful
134
+ - Relation writes are `apply_id`-based; if the user only gives a title, number, or business key, query the target app first and resolve the real record id before writing
135
+ - Task counts and record counts are not interchangeable; a task query reflects task-center workload, not the underlying record total
136
+ - When reporting task results, include the task dimension that was used, such as pending, processed, cc, node, or worksheet
137
+ - Prefer summarizing titles and counts instead of dumping raw answer arrays
138
+ - When records reference other entities, verify references are coherent before reporting success
139
+ - `file_upload_local` may transparently change `effective_upload_kind` and `upload_protocol`; surface those fields when debugging production upload behavior instead of assuming all uploads are direct `PUT`
140
+
141
+ ## Practical Patterns
142
+
143
+ - Bulk mock data creation: query current data first, run `record_write_plan`, then create missing records
144
+ - Data correction: query, inspect, preflight, update, and re-read
145
+ - Inbox triage: use `task_statistics` first, then `task_list` or `task_list_grouped`, then switch to `record_*` for the underlying record when needed
146
+ - Bottleneck analysis: start with `task_statistics` and `task_list_grouped` before drilling into specific records
147
+ - Workflow collaboration: comment, transfer, or reassign only after identifying the exact record
148
+ - Approval actions: identify the exact record and current node first, then use `task_approve` or `task_reject`; do not guess `nodeId`
149
+ - Demo validation: create at least `5` rows and confirm they are queryable
150
+ - Org export: use `directory_list_all_internal_users` for full member exports and `directory_list_all_departments` for full org-tree exports before mapping owners or departments into record operations
151
+ - Attachment write: upload first, write the returned URL object second, and prefer `verify_write=true`
152
+ - Relation write: query the target app first, capture the referenced record `apply_id`, then write the relation field and verify the readback
153
+ - Production discrepancy triage: compare the response `request_route` with the browser environment before assuming the data query is wrong
154
+ ## Resources
155
+
156
+ - Environment switching: [references/environments.md](references/environments.md)
157
+ - Record operation patterns: [references/record-patterns.md](references/record-patterns.md)
158
+ - Workflow usage actions: [references/workflow-usage.md](references/workflow-usage.md)
159
+ - Data gotchas: [references/data-gotchas.md](references/data-gotchas.md)
160
+ - Dedicated analysis workflow: [qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md)
@@ -0,0 +1,4 @@
1
+ interface:
2
+ display_name: "Qingflow App User"
3
+ short_description: "Use Qingflow apps for business data and task operations"
4
+ default_prompt: "Use $qingflow-app-user for ordinary Qingflow record, task, comment, and directory operations. If the task shifts into grouped analysis, insight generation, ranking, trend, or final statistical conclusions, switch to $qingflow-record-analysis instead of keeping the logic here."
@@ -0,0 +1,59 @@
1
+ # Data Gotchas
2
+
3
+ For final statistics, grouped distributions, or insight-style analysis, use [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md) instead of keeping that reasoning inside `$qingflow-app-user`.
4
+
5
+ ## Counts
6
+
7
+ - Prefer `effective_count`
8
+ - For final analysis, inspect `record_analyze.data.completeness` and `safe_for_final_conclusion` before concluding
9
+ - If `record_analyze.status!=success`, treat the result as exploratory unless the user explicitly asked for a partial sample
10
+ - `record_query(list)` is for browsing and sample inspection. If it reports `row_cap_hit`, `sample_only`, or capped `returned_items`, do not present it as full data
11
+ - When coverage matters, surface:
12
+ - `scanned_count`
13
+ - `presentation.statement_scope`
14
+ - Use narrower views, filters, or smaller analysis questions instead of inventing manual scan settings by hand
15
+ - If the browser and MCP disagree, compare `request_route.base_url` and `request_route.qf_version` first
16
+ - Do not mix a full aggregate total with sample-only list detail in one sentence like “基于全部数据分析”; split the answer into `全量结论` and `样本观察`
17
+
18
+ ## Record titles
19
+
20
+ - Do not dump raw answer arrays to the user unless needed
21
+ - Prefer concise business titles and counts
22
+
23
+ ## Preflight
24
+
25
+ - `record_write_plan` is static preflight only; linked visibility and runtime required rules can still reject writes
26
+ - `record_write_plan` now exposes `write_format.support_level`; check `full / restricted / unsupported` before attempting non-trivial writes
27
+ - Use `record_schema_get` when field titles are uncertain instead of guessing ids
28
+ - For analysis tasks, use the fixed path `record_schema_get -> record_analyze`; do not switch tools blindly after `FIELD_NOT_FOUND` or ambiguity
29
+ - Prefer `strict_full=true` for final statistics or business conclusions
30
+ - `record_create` and `record_update` can do post-write verification with `verify_write=true`; use that for complex, subtable, or production writes
31
+ - `apply_id` is normalized to an integer; pass it directly into later record tools
32
+
33
+ ## Mock data
34
+
35
+ - Default to at least `5` rows per relevant entity unless the user asked for fewer
36
+ - Avoid identical titles and identical statuses across all rows
37
+ - Keep relation references valid
38
+
39
+ ## Attachments
40
+
41
+ - Attachment fields are two-step: upload first, then write the returned URL object into the record
42
+ - `file_upload_local` may report `effective_upload_kind=login` even when the requested kind was `attachment`; this is an implementation fallback, not necessarily an error
43
+ - When debugging uploads, surface both `effective_upload_kind` and `upload_protocol`
44
+
45
+ ## Subtables
46
+
47
+ - Subtable fields accept row objects keyed by subfield title, or native `tableValues`
48
+ - Use the current form schema's subfield titles; do not guess nested ids
49
+ - When updating existing subtable rows, preserve `rowId` if the source record returns it
50
+ - Nested subtable writes are still unsupported
51
+
52
+ ## Unsupported direct-write fields
53
+
54
+ - `14` time range
55
+ - `34` image recognition
56
+ - `35` image generation
57
+ - `36` document parsing
58
+
59
+ Do not fake values for these fields in app-user writes. Stop and explain the limitation.
@@ -0,0 +1,63 @@
1
+ # Environment Switching
2
+
3
+ Use this reference before any data creation, update, delete, or workflow usage action.
4
+
5
+ ## Step 1: Resolve the active environment
6
+
7
+ Decide explicitly whether the task targets:
8
+
9
+ - `test`: demo, mock data, smoke usage validation, training scenarios
10
+ - `prod`: real operational data and live workflow actions
11
+
12
+ If the user did not specify an environment, default to `prod`.
13
+
14
+ ## Test Environment
15
+
16
+ Use test for:
17
+
18
+ - mock or smoke data entry
19
+ - business flow walkthroughs
20
+ - user acceptance demos
21
+ - data correction rehearsals
22
+
23
+ Test behavior:
24
+
25
+ - creating demo data is acceptable
26
+ - default to at least `5` records for mock or smoke datasets unless the user asks for fewer
27
+ - destructive cleanup is acceptable only when the record scope is explicit
28
+
29
+ Known current test backend:
30
+
31
+ - use an explicitly provided non-production backend
32
+
33
+ ## Production Environment
34
+
35
+ Use production for:
36
+
37
+ - live data entry
38
+ - live business record updates
39
+ - comments and workflow actions on real records
40
+ - controlled data correction or deletion
41
+
42
+ Production behavior:
43
+
44
+ - prefer search or get before any write
45
+ - restate the exact app and record scope before update or delete
46
+ - do not create mock, smoke, or demo data unless the user explicitly asks for it
47
+ - for bulk changes, summarize the target count before execution and the affected ids after execution
48
+ - destructive actions need explicit confirmation in the conversation context
49
+
50
+ Production guardrails:
51
+
52
+ - never assume a record id, app id, or workspace id
53
+ - treat `record_delete` as high risk
54
+ - if the task can be answered read-only, do not write
55
+
56
+ ## Reporting Rule
57
+
58
+ For app-user operations, always report:
59
+
60
+ - active environment
61
+ - target app
62
+ - operation type: read, create, update, delete, or workflow action
63
+ - affected record count or ids
@@ -0,0 +1,96 @@
1
+ # Record Patterns
2
+
3
+ If the task shifts into grouped analysis, ratio, ranking, trend, or final statistical conclusions, switch to [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md).
4
+
5
+ ## Query first
6
+
7
+ Use `record_query` first when:
8
+
9
+ - the user only gives a title or business key
10
+ - the target record id is unknown
11
+ - updates or deletes need confirmation
12
+ - ordinary list browsing or spot checks are needed
13
+
14
+ Use [$qingflow-record-analysis](/Users/yanqidong/Documents/qingflow-next/.codex/skills/qingflow-record-analysis/SKILL.md) when:
15
+
16
+ - field titles may be ambiguous
17
+ - filters are still in natural-language shape
18
+ - the result may be used as a final conclusion
19
+ - scan scope or completeness is unclear
20
+ - the user asks for a distribution, ratio, ranking, top-N, or any grouped aggregate
21
+ - the user asks for `分析 / 洞察 / 分布 / 占比 / 平均 / 排名 / 趋势 / 所有 / 全部 / 全国 / 高价值`
22
+
23
+ ## Final analysis pattern
24
+
25
+ 1. Run `record_schema_get`
26
+ 2. Generate one or more field_id-based DSLs
27
+ 3. Run `record_analyze(strict_full=true)` for summary/distribution/trend/cross analysis
28
+ 4. Run `record_query(query_mode="list")` only if you still need sample rows or examples
29
+ 5. Report `scanned_count`, `presentation.statement_scope`, and whether the result is safe for a final conclusion
30
+ 6. If `status=error` or `safe_for_final_conclusion=false`, stop at “partial result” instead of presenting a final business conclusion
31
+ 7. If list rows are sample-only, separate the answer into:
32
+ - `全量可信结论`
33
+ - `样本观察(不作为最终结论)`
34
+ - optional `待验证假设`
35
+
36
+ ## Analysis anti-pattern
37
+
38
+ Do not do this:
39
+
40
+ 1. Run only `record_query(query_mode="list")`
41
+ 2. Get `200` rows back
42
+ 3. Report平均值、占比、地域分布 as if they were based on all records
43
+
44
+ This is not acceptable because the list endpoint can be capped. Use `record_schema_get -> record_analyze` first, then treat list rows as sample-only evidence.
45
+
46
+ ## Create pattern
47
+
48
+ 1. Confirm target app
49
+ 2. Resolve fields with `record_schema_get` if needed. Prefer exact schema titles first; only rely on platform-neutral aliases such as `创建时间`, `负责人`, or `部门` when they resolve cleanly, and do not assume business-domain shorthand like `销售` is portable across apps
50
+ 3. Run `record_write_plan` for non-trivial payloads or any `fields`-based write
51
+ 4. For relation fields, query the target app first and resolve the referenced record `apply_id`
52
+ 5. For attachments, call `file_upload_local` first and reuse the returned `attachment_value`
53
+ 6. For subtable fields, pass a list of row objects keyed by subfield title. When updating existing rows, include `rowId` / `row_id` / `__row_id__` only if the current record already exposes it
54
+ 7. Inspect `record_write_plan.data.support_matrix` or each field's `write_format.support_level` before submit:
55
+ - `full`: direct write is supported
56
+ - `restricted`: follow the documented presteps first
57
+ - `unsupported`: stop and explain the limitation
58
+ 8. For complex forms, production writes, attachments, relation-heavy payloads, or subtables, create with `verify_write=true`
59
+ 9. If verification fails, treat the write as not yet successful and inspect the missing or empty fields before reporting back
60
+ 10. Re-query or fetch the record when validation matters
61
+
62
+ ## Update pattern
63
+
64
+ 1. Query the target records
65
+ 2. Resolve exact `apply_id`
66
+ 3. Run `record_write_plan`
67
+ 4. Update only the intended fields
68
+ 5. Prefer `verify_write=true` for attachment, relation, subtable, or production updates
69
+ 6. Re-read the record if the change is important, attachment-related, subtable-related, or the form has linkage
70
+
71
+ ## Delete pattern
72
+
73
+ 1. Query or fetch the exact record first
74
+ 2. Confirm the target ids
75
+ 3. Delete
76
+ 4. Report affected ids and remaining count when relevant
77
+
78
+ ## Unsupported direct writes
79
+
80
+ Do not attempt direct app-user writes for these field types:
81
+
82
+ - `14` time range
83
+ - `34` image recognition
84
+ - `35` image generation
85
+ - `36` document parsing
86
+
87
+ If the payload includes them, stop at `record_write_plan` and explain that the tool does not build a reliable native payload for those fields yet.
88
+
89
+ ## Relation fields
90
+
91
+ Relation fields are record-id based.
92
+
93
+ - Query the referenced app first
94
+ - Resolve the target record `apply_id`
95
+ - Write the relation field with that id
96
+ - Do not write relation fields with display titles, business keys, or guessed identifiers unless they have already been resolved to the real record id
@@ -0,0 +1,24 @@
1
+ # Workflow and Task Usage Actions
2
+
3
+ Use these when the user is operating inside an existing process, not redesigning it.
4
+
5
+ Examples:
6
+
7
+ - add a comment to a record
8
+ - approve or reject a workflow task
9
+ - transfer a task
10
+ - roll back a record
11
+ - list pending, processed, or cc tasks
12
+ - urge a pending task
13
+
14
+ Rules:
15
+
16
+ - if the user starts from inbox, todo, workload, cc, or bottleneck language, use `task_*` first
17
+ - use `task_statistics` for counts and `task_list` or `task_list_grouped` for browsing
18
+ - use `task_list_grouped` when grouped workload browsing matters
19
+ - treat task counts as task-center counts, not record counts
20
+ - switch to `record_*` after locating the exact business record behind a task
21
+ - identify the exact record first
22
+ - for approve or reject, identify the exact `nodeId` first; prefer task-center results or audit info, then use `task_approve` or `task_reject`
23
+ - avoid usage-side flow actions on ambiguous records
24
+ - summarize the final action and target task ids or record ids