@josephyan/qingflow-mcp 0.1.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +517 -0
- package/docs/local-agent-install.md +213 -0
- package/entry_point.py +13 -0
- package/npm/bin/qingflow-mcp.mjs +7 -0
- package/npm/lib/runtime.mjs +146 -0
- package/npm/scripts/postinstall.mjs +12 -0
- package/package.json +34 -0
- package/pyproject.toml +63 -0
- package/qingflow-mcp +15 -0
- package/src/qingflow_mcp/__init__.py +5 -0
- package/src/qingflow_mcp/__main__.py +5 -0
- package/src/qingflow_mcp/backend_client.py +336 -0
- package/src/qingflow_mcp/config.py +166 -0
- package/src/qingflow_mcp/errors.py +66 -0
- package/src/qingflow_mcp/json_types.py +18 -0
- package/src/qingflow_mcp/server.py +70 -0
- package/src/qingflow_mcp/session_store.py +235 -0
- package/src/qingflow_mcp/solution/__init__.py +6 -0
- package/src/qingflow_mcp/solution/build_assembly_store.py +137 -0
- package/src/qingflow_mcp/solution/compiler/__init__.py +265 -0
- package/src/qingflow_mcp/solution/compiler/chart_compiler.py +96 -0
- package/src/qingflow_mcp/solution/compiler/form_compiler.py +456 -0
- package/src/qingflow_mcp/solution/compiler/icon_utils.py +113 -0
- package/src/qingflow_mcp/solution/compiler/navigation_compiler.py +57 -0
- package/src/qingflow_mcp/solution/compiler/package_compiler.py +19 -0
- package/src/qingflow_mcp/solution/compiler/portal_compiler.py +60 -0
- package/src/qingflow_mcp/solution/compiler/view_compiler.py +51 -0
- package/src/qingflow_mcp/solution/compiler/workflow_compiler.py +134 -0
- package/src/qingflow_mcp/solution/design_session.py +222 -0
- package/src/qingflow_mcp/solution/design_store.py +100 -0
- package/src/qingflow_mcp/solution/executor.py +2064 -0
- package/src/qingflow_mcp/solution/normalizer.py +23 -0
- package/src/qingflow_mcp/solution/run_store.py +221 -0
- package/src/qingflow_mcp/solution/spec_models.py +755 -0
- package/src/qingflow_mcp/tools/__init__.py +1 -0
- package/src/qingflow_mcp/tools/app_tools.py +239 -0
- package/src/qingflow_mcp/tools/approval_tools.py +481 -0
- package/src/qingflow_mcp/tools/auth_tools.py +496 -0
- package/src/qingflow_mcp/tools/base.py +81 -0
- package/src/qingflow_mcp/tools/directory_tools.py +476 -0
- package/src/qingflow_mcp/tools/file_tools.py +375 -0
- package/src/qingflow_mcp/tools/navigation_tools.py +177 -0
- package/src/qingflow_mcp/tools/package_tools.py +142 -0
- package/src/qingflow_mcp/tools/portal_tools.py +100 -0
- package/src/qingflow_mcp/tools/qingbi_report_tools.py +258 -0
- package/src/qingflow_mcp/tools/record_tools.py +4305 -0
- package/src/qingflow_mcp/tools/role_tools.py +94 -0
- package/src/qingflow_mcp/tools/solution_tools.py +1860 -0
- package/src/qingflow_mcp/tools/task_tools.py +677 -0
- package/src/qingflow_mcp/tools/view_tools.py +324 -0
- package/src/qingflow_mcp/tools/workflow_tools.py +311 -0
- package/src/qingflow_mcp/tools/workspace_tools.py +143 -0
|
@@ -0,0 +1,4305 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from datetime import UTC, datetime
|
|
7
|
+
from typing import cast
|
|
8
|
+
|
|
9
|
+
from mcp.server.fastmcp import FastMCP
|
|
10
|
+
|
|
11
|
+
from ..config import DEFAULT_PROFILE, DEFAULT_RECORD_LIST_TYPE
|
|
12
|
+
from ..errors import QingflowApiError, raise_tool_error
|
|
13
|
+
from ..json_types import JSONObject, JSONScalar, JSONValue
|
|
14
|
+
from .base import ToolBase
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
DEFAULT_QUERY_PAGE_SIZE = 50
|
|
18
|
+
DEFAULT_SCAN_MAX_PAGES = 10
|
|
19
|
+
DEFAULT_ROW_LIMIT = 200
|
|
20
|
+
DEFAULT_OUTPUT_PROFILE = "compact"
|
|
21
|
+
MAX_LIST_COLUMN_LIMIT = 20
|
|
22
|
+
MAX_RECORD_COLUMN_LIMIT = 20
|
|
23
|
+
MAX_SUMMARY_PREVIEW_COLUMN_LIMIT = 6
|
|
24
|
+
BACKEND_LIST_SEARCH_FIELD_LIMIT = 10
|
|
25
|
+
SUPPORTED_QUERY_TOOLS = {"record_query", "record_aggregate", "record_get"}
|
|
26
|
+
MEMBER_QUE_TYPES = {5}
|
|
27
|
+
DEPARTMENT_QUE_TYPES = {22}
|
|
28
|
+
DATE_QUE_TYPES = {4}
|
|
29
|
+
SINGLE_SELECT_QUE_TYPES = {10, 11}
|
|
30
|
+
MULTI_SELECT_QUE_TYPES = {12, 15}
|
|
31
|
+
BOOLEAN_QUE_TYPES: set[int] = set()
|
|
32
|
+
ATTACHMENT_QUE_TYPES = {13}
|
|
33
|
+
RELATION_QUE_TYPES = {25}
|
|
34
|
+
SUBTABLE_QUE_TYPES = {18}
|
|
35
|
+
VERIFY_UNSUPPORTED_WRITE_QUE_TYPES = {14, 34, 35, 36}
|
|
36
|
+
DEPARTMENT_MEMBER_JUDGE_PREFIX = "deptId_"
|
|
37
|
+
JUDGE_EQUAL = 0
|
|
38
|
+
JUDGE_UNEQUAL = 1
|
|
39
|
+
JUDGE_GREATER_OR_EQUAL = 5
|
|
40
|
+
JUDGE_LESS_OR_EQUAL = 7
|
|
41
|
+
JUDGE_EQUAL_ANY = 9
|
|
42
|
+
JUDGE_FUZZY_MATCH = 19
|
|
43
|
+
JUDGE_INCLUDE_ANY = 20
|
|
44
|
+
MATCH_TYPE_ACCURACY = 1
|
|
45
|
+
SCOPE_ALL = 1
|
|
46
|
+
SCOPE_NOT_EMPTY = 2
|
|
47
|
+
SCOPE_EMPTY = 3
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass(slots=True)
|
|
51
|
+
class FormField:
|
|
52
|
+
que_id: int
|
|
53
|
+
que_title: str
|
|
54
|
+
que_type: int | None
|
|
55
|
+
required: bool
|
|
56
|
+
readonly: bool
|
|
57
|
+
system: bool
|
|
58
|
+
options: list[str]
|
|
59
|
+
aliases: list[str]
|
|
60
|
+
raw: JSONObject
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass(slots=True)
|
|
64
|
+
class FieldIndex:
|
|
65
|
+
by_id: dict[str, FormField]
|
|
66
|
+
by_title: dict[str, list[FormField]]
|
|
67
|
+
by_alias: dict[str, list[FormField]]
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
@dataclass(slots=True)
|
|
71
|
+
class ViewFilterCondition:
|
|
72
|
+
que_id: int | None
|
|
73
|
+
que_title: str
|
|
74
|
+
que_type: int | None
|
|
75
|
+
judge_type: int | None
|
|
76
|
+
judge_values: list[str]
|
|
77
|
+
judge_value_details: list[JSONObject]
|
|
78
|
+
raw: JSONObject
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass(slots=True)
|
|
82
|
+
class ViewSelection:
|
|
83
|
+
view_key: str
|
|
84
|
+
view_name: str
|
|
85
|
+
conditions: list[list[ViewFilterCondition]]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
@dataclass(slots=True)
|
|
89
|
+
class RecordInputError(Exception):
|
|
90
|
+
message: str
|
|
91
|
+
error_code: str
|
|
92
|
+
fix_hint: str
|
|
93
|
+
details: JSONObject | None = None
|
|
94
|
+
|
|
95
|
+
def __post_init__(self) -> None:
|
|
96
|
+
Exception.__init__(self, self.message)
|
|
97
|
+
|
|
98
|
+
def to_dict(self) -> JSONObject:
|
|
99
|
+
payload: JSONObject = {
|
|
100
|
+
"category": "config",
|
|
101
|
+
"message": self.message,
|
|
102
|
+
"error_code": self.error_code,
|
|
103
|
+
"fix_hint": self.fix_hint,
|
|
104
|
+
}
|
|
105
|
+
if self.details is not None:
|
|
106
|
+
payload["details"] = self.details
|
|
107
|
+
return payload
|
|
108
|
+
|
|
109
|
+
def as_json(self) -> str:
|
|
110
|
+
return json.dumps(self.to_dict(), ensure_ascii=False)
|
|
111
|
+
|
|
112
|
+
def __str__(self) -> str:
|
|
113
|
+
return self.as_json()
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
GENERIC_FIELD_PREFIX_ALIASES = (
|
|
117
|
+
"当前",
|
|
118
|
+
"预计",
|
|
119
|
+
"最近",
|
|
120
|
+
"最新",
|
|
121
|
+
)
|
|
122
|
+
GENERIC_FIELD_ALIAS_OVERRIDES: dict[str, list[str]] = {
|
|
123
|
+
"申请时间": ["新增时间", "创建时间"],
|
|
124
|
+
"更新时间": ["修改时间", "最后更新时间"],
|
|
125
|
+
}
|
|
126
|
+
FIELD_LOOKUP_STRIP_RE = re.compile(r"[\s_()()\[\]【】{}<>·/\\::-]+")
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class RecordTools(ToolBase):
|
|
130
|
+
def __init__(self, sessions, backend) -> None: # type: ignore[no-untyped-def]
|
|
131
|
+
super().__init__(sessions, backend)
|
|
132
|
+
self._form_cache: dict[tuple[str, str], JSONObject] = {}
|
|
133
|
+
self._view_list_cache: dict[tuple[str, str], list[JSONObject]] = {}
|
|
134
|
+
self._view_config_cache: dict[tuple[str, str], JSONObject] = {}
|
|
135
|
+
|
|
136
|
+
def register(self, mcp: FastMCP) -> None:
|
|
137
|
+
@mcp.tool()
|
|
138
|
+
def record_field_resolve(
|
|
139
|
+
profile: str = DEFAULT_PROFILE,
|
|
140
|
+
app_key: str = "",
|
|
141
|
+
query: str | int | None = None,
|
|
142
|
+
queries: list[str | int] | None = None,
|
|
143
|
+
top_k: int = 3,
|
|
144
|
+
fuzzy: bool = True,
|
|
145
|
+
) -> JSONObject:
|
|
146
|
+
return self.record_field_resolve(
|
|
147
|
+
profile=profile,
|
|
148
|
+
app_key=app_key,
|
|
149
|
+
query=query,
|
|
150
|
+
queries=queries,
|
|
151
|
+
top_k=top_k,
|
|
152
|
+
fuzzy=fuzzy,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
@mcp.tool(
|
|
156
|
+
description=(
|
|
157
|
+
"Preflight complex read requests before actual execution. Resolves field selectors, validates required arguments, "
|
|
158
|
+
"and estimates scan scope. Prefer this when the agent is unsure about query shape."
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
def record_query_plan(
|
|
162
|
+
profile: str = DEFAULT_PROFILE,
|
|
163
|
+
tool: str = "record_query",
|
|
164
|
+
arguments: JSONObject | None = None,
|
|
165
|
+
resolve_fields: bool = True,
|
|
166
|
+
) -> JSONObject:
|
|
167
|
+
return self.record_query_plan(profile=profile, tool=tool, arguments=arguments or {}, resolve_fields=resolve_fields)
|
|
168
|
+
|
|
169
|
+
@mcp.tool(
|
|
170
|
+
description=(
|
|
171
|
+
"Static preflight for record create/update payloads. Supports ergonomic fields{} mapping, resolves field titles, "
|
|
172
|
+
"and reports blockers before submit."
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
def record_write_plan(
|
|
176
|
+
profile: str = DEFAULT_PROFILE,
|
|
177
|
+
operation: str = "auto",
|
|
178
|
+
app_key: str = "",
|
|
179
|
+
apply_id: int | None = None,
|
|
180
|
+
answers: list[JSONObject] | None = None,
|
|
181
|
+
fields: JSONObject | None = None,
|
|
182
|
+
force_refresh_form: bool = False,
|
|
183
|
+
) -> JSONObject:
|
|
184
|
+
return self.record_write_plan(
|
|
185
|
+
profile=profile,
|
|
186
|
+
operation=operation,
|
|
187
|
+
app_key=app_key,
|
|
188
|
+
apply_id=apply_id,
|
|
189
|
+
answers=answers or [],
|
|
190
|
+
fields=fields or {},
|
|
191
|
+
force_refresh_form=force_refresh_form,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
@mcp.tool(
|
|
195
|
+
description=(
|
|
196
|
+
"Unified read entry for record list / record detail / summary analysis. "
|
|
197
|
+
"List mode returns flattened wide-table rows only. Use query_mode=auto to route: "
|
|
198
|
+
"apply_id -> record, amount_column/time_range/stat_policy -> summary, otherwise -> list."
|
|
199
|
+
)
|
|
200
|
+
)
|
|
201
|
+
def record_query(
|
|
202
|
+
profile: str = DEFAULT_PROFILE,
|
|
203
|
+
query_mode: str = "auto",
|
|
204
|
+
app_key: str = "",
|
|
205
|
+
apply_id: int | None = None,
|
|
206
|
+
page_num: int = 1,
|
|
207
|
+
page_size: int = DEFAULT_QUERY_PAGE_SIZE,
|
|
208
|
+
requested_pages: int = 1,
|
|
209
|
+
scan_max_pages: int = DEFAULT_SCAN_MAX_PAGES,
|
|
210
|
+
query_key: str | None = None,
|
|
211
|
+
filters: list[JSONObject] | None = None,
|
|
212
|
+
sorts: list[JSONObject] | None = None,
|
|
213
|
+
max_rows: int = DEFAULT_ROW_LIMIT,
|
|
214
|
+
max_columns: int | None = None,
|
|
215
|
+
select_columns: list[str | int] | None = None,
|
|
216
|
+
amount_column: str | int | None = None,
|
|
217
|
+
time_range: JSONObject | None = None,
|
|
218
|
+
stat_policy: JSONObject | None = None,
|
|
219
|
+
strict_full: bool = False,
|
|
220
|
+
output_profile: str = DEFAULT_OUTPUT_PROFILE,
|
|
221
|
+
list_type: int = DEFAULT_RECORD_LIST_TYPE,
|
|
222
|
+
view_key: str | None = None,
|
|
223
|
+
view_name: str | None = None,
|
|
224
|
+
) -> JSONObject:
|
|
225
|
+
return self.record_query(
|
|
226
|
+
profile=profile,
|
|
227
|
+
query_mode=query_mode,
|
|
228
|
+
app_key=app_key,
|
|
229
|
+
apply_id=apply_id,
|
|
230
|
+
page_num=page_num,
|
|
231
|
+
page_size=page_size,
|
|
232
|
+
requested_pages=requested_pages,
|
|
233
|
+
scan_max_pages=scan_max_pages,
|
|
234
|
+
query_key=query_key,
|
|
235
|
+
filters=filters or [],
|
|
236
|
+
sorts=sorts or [],
|
|
237
|
+
max_rows=max_rows,
|
|
238
|
+
max_columns=max_columns,
|
|
239
|
+
select_columns=select_columns or [],
|
|
240
|
+
amount_column=amount_column,
|
|
241
|
+
time_range=time_range or {},
|
|
242
|
+
stat_policy=stat_policy or {},
|
|
243
|
+
strict_full=strict_full,
|
|
244
|
+
output_profile=output_profile,
|
|
245
|
+
list_type=list_type,
|
|
246
|
+
view_key=view_key,
|
|
247
|
+
view_name=view_name,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
@mcp.tool(
|
|
251
|
+
description=(
|
|
252
|
+
"Grouped record analysis endpoint. Aggregates record counts and numeric metrics by selected dimensions. "
|
|
253
|
+
"Use strict_full=true when the result will be used as a final conclusion."
|
|
254
|
+
)
|
|
255
|
+
)
|
|
256
|
+
def record_aggregate(
|
|
257
|
+
profile: str = DEFAULT_PROFILE,
|
|
258
|
+
app_key: str = "",
|
|
259
|
+
group_by: list[str | int] | None = None,
|
|
260
|
+
amount_column: str | int | None = None,
|
|
261
|
+
metrics: list[str] | None = None,
|
|
262
|
+
page_num: int = 1,
|
|
263
|
+
page_size: int = DEFAULT_QUERY_PAGE_SIZE,
|
|
264
|
+
requested_pages: int = 1,
|
|
265
|
+
scan_max_pages: int = DEFAULT_SCAN_MAX_PAGES,
|
|
266
|
+
query_key: str | None = None,
|
|
267
|
+
filters: list[JSONObject] | None = None,
|
|
268
|
+
sorts: list[JSONObject] | None = None,
|
|
269
|
+
time_range: JSONObject | None = None,
|
|
270
|
+
time_bucket: str | None = None,
|
|
271
|
+
max_groups: int = 200,
|
|
272
|
+
strict_full: bool = False,
|
|
273
|
+
output_profile: str = DEFAULT_OUTPUT_PROFILE,
|
|
274
|
+
list_type: int = DEFAULT_RECORD_LIST_TYPE,
|
|
275
|
+
view_key: str | None = None,
|
|
276
|
+
view_name: str | None = None,
|
|
277
|
+
) -> JSONObject:
|
|
278
|
+
return self.record_aggregate(
|
|
279
|
+
profile=profile,
|
|
280
|
+
app_key=app_key,
|
|
281
|
+
group_by=group_by or [],
|
|
282
|
+
amount_column=amount_column,
|
|
283
|
+
metrics=metrics or [],
|
|
284
|
+
page_num=page_num,
|
|
285
|
+
page_size=page_size,
|
|
286
|
+
requested_pages=requested_pages,
|
|
287
|
+
scan_max_pages=scan_max_pages,
|
|
288
|
+
query_key=query_key,
|
|
289
|
+
filters=filters or [],
|
|
290
|
+
sorts=sorts or [],
|
|
291
|
+
time_range=time_range or {},
|
|
292
|
+
time_bucket=time_bucket,
|
|
293
|
+
max_groups=max_groups,
|
|
294
|
+
strict_full=strict_full,
|
|
295
|
+
output_profile=output_profile,
|
|
296
|
+
list_type=list_type,
|
|
297
|
+
view_key=view_key,
|
|
298
|
+
view_name=view_name,
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
@mcp.tool(
|
|
302
|
+
description=(
|
|
303
|
+
"Create one record. Supports explicit answers[] and ergonomic fields{} mapping by exact field title or queId. "
|
|
304
|
+
"Use record_write_plan first for complex payloads."
|
|
305
|
+
)
|
|
306
|
+
)
|
|
307
|
+
def record_create(
|
|
308
|
+
profile: str = DEFAULT_PROFILE,
|
|
309
|
+
app_key: str = "",
|
|
310
|
+
answers: list[JSONObject] | None = None,
|
|
311
|
+
fields: JSONObject | None = None,
|
|
312
|
+
submit_type: int = 1,
|
|
313
|
+
verify_write: bool = False,
|
|
314
|
+
force_refresh_form: bool = False,
|
|
315
|
+
) -> JSONObject:
|
|
316
|
+
return self.record_create(
|
|
317
|
+
profile=profile,
|
|
318
|
+
app_key=app_key,
|
|
319
|
+
answers=answers or [],
|
|
320
|
+
fields=fields or {},
|
|
321
|
+
submit_type=submit_type,
|
|
322
|
+
verify_write=verify_write,
|
|
323
|
+
force_refresh_form=force_refresh_form,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
@mcp.tool()
|
|
327
|
+
def record_get(
|
|
328
|
+
profile: str = DEFAULT_PROFILE,
|
|
329
|
+
app_key: str = "",
|
|
330
|
+
apply_id: int = 0,
|
|
331
|
+
role: int = 1,
|
|
332
|
+
list_type: int | None = None,
|
|
333
|
+
audit_node_id: int | None = None,
|
|
334
|
+
) -> JSONObject:
|
|
335
|
+
return self.record_get(
|
|
336
|
+
profile=profile,
|
|
337
|
+
app_key=app_key,
|
|
338
|
+
apply_id=apply_id,
|
|
339
|
+
role=role,
|
|
340
|
+
list_type=list_type,
|
|
341
|
+
audit_node_id=audit_node_id,
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
@mcp.tool(description=self._high_risk_tool_description(operation="update", target="record data"))
|
|
345
|
+
def record_update(
|
|
346
|
+
profile: str = DEFAULT_PROFILE,
|
|
347
|
+
app_key: str = "",
|
|
348
|
+
apply_id: int = 0,
|
|
349
|
+
answers: list[JSONObject] | None = None,
|
|
350
|
+
fields: JSONObject | None = None,
|
|
351
|
+
role: int = 1,
|
|
352
|
+
verify_write: bool = False,
|
|
353
|
+
force_refresh_form: bool = False,
|
|
354
|
+
) -> JSONObject:
|
|
355
|
+
return self.record_update(
|
|
356
|
+
profile=profile,
|
|
357
|
+
app_key=app_key,
|
|
358
|
+
apply_id=apply_id,
|
|
359
|
+
answers=answers or [],
|
|
360
|
+
fields=fields or {},
|
|
361
|
+
role=role,
|
|
362
|
+
verify_write=verify_write,
|
|
363
|
+
force_refresh_form=force_refresh_form,
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
@mcp.tool(description=self._high_risk_tool_description(operation="delete", target="record data"))
|
|
367
|
+
def record_delete(
|
|
368
|
+
profile: str = DEFAULT_PROFILE,
|
|
369
|
+
app_key: str = "",
|
|
370
|
+
apply_id: int = 0,
|
|
371
|
+
list_type: int = DEFAULT_RECORD_LIST_TYPE,
|
|
372
|
+
) -> JSONObject:
|
|
373
|
+
return self.record_delete(profile=profile, app_key=app_key, apply_id=apply_id, list_type=list_type)
|
|
374
|
+
|
|
375
|
+
def record_field_resolve(
|
|
376
|
+
self,
|
|
377
|
+
*,
|
|
378
|
+
profile: str,
|
|
379
|
+
app_key: str,
|
|
380
|
+
query: str | int | None,
|
|
381
|
+
queries: list[str | int] | None,
|
|
382
|
+
top_k: int,
|
|
383
|
+
fuzzy: bool,
|
|
384
|
+
) -> JSONObject:
|
|
385
|
+
if not app_key:
|
|
386
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
387
|
+
requested = [item for item in (queries or []) if item is not None]
|
|
388
|
+
if query is not None:
|
|
389
|
+
requested = [query]
|
|
390
|
+
if not requested:
|
|
391
|
+
raise_tool_error(QingflowApiError.config_error("query or queries is required"))
|
|
392
|
+
|
|
393
|
+
def runner(session_profile, context):
|
|
394
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=False)
|
|
395
|
+
results = []
|
|
396
|
+
for item in requested:
|
|
397
|
+
text = str(item).strip()
|
|
398
|
+
if not text:
|
|
399
|
+
continue
|
|
400
|
+
results.append({"requested": text, "matches": self._score_field_matches(text, index, fuzzy=fuzzy, top_k=top_k)})
|
|
401
|
+
return {
|
|
402
|
+
"profile": profile,
|
|
403
|
+
"ws_id": session_profile.selected_ws_id,
|
|
404
|
+
"ok": True,
|
|
405
|
+
"request_route": self._request_route_payload(context),
|
|
406
|
+
"data": {
|
|
407
|
+
"app_key": app_key,
|
|
408
|
+
"query_count": len(results),
|
|
409
|
+
"results": results,
|
|
410
|
+
},
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
return self._run_record_tool(profile, runner)
|
|
414
|
+
|
|
415
|
+
def record_query_plan(self, *, profile: str, tool: str, arguments: JSONObject, resolve_fields: bool) -> JSONObject:
|
|
416
|
+
if tool not in SUPPORTED_QUERY_TOOLS:
|
|
417
|
+
raise_tool_error(QingflowApiError.config_error(f"tool must be one of {sorted(SUPPORTED_QUERY_TOOLS)}"))
|
|
418
|
+
normalized = _normalize_plan_arguments(tool, arguments)
|
|
419
|
+
validation = self._validate_plan_arguments(tool, normalized)
|
|
420
|
+
|
|
421
|
+
def runner(session_profile, context):
|
|
422
|
+
field_mapping: list[JSONObject] = []
|
|
423
|
+
view_resolution: JSONObject | None = None
|
|
424
|
+
if resolve_fields and isinstance(normalized.get("app_key"), str) and normalized.get("app_key"):
|
|
425
|
+
index = self._get_field_index(profile, context, cast(str, normalized["app_key"]), force_refresh=False)
|
|
426
|
+
for candidate in _collect_plan_field_candidates(tool, normalized):
|
|
427
|
+
field_mapping.append(self._resolve_plan_candidate(candidate, index))
|
|
428
|
+
view_selection = self._resolve_view_selection(
|
|
429
|
+
profile,
|
|
430
|
+
context,
|
|
431
|
+
cast(str, normalized["app_key"]),
|
|
432
|
+
view_key=_normalize_optional_text(normalized.get("view_key")),
|
|
433
|
+
view_name=_normalize_optional_text(normalized.get("view_name")),
|
|
434
|
+
)
|
|
435
|
+
if view_selection is not None:
|
|
436
|
+
view_resolution = {
|
|
437
|
+
"resolved": True,
|
|
438
|
+
"view_key": view_selection.view_key,
|
|
439
|
+
"view_name": view_selection.view_name,
|
|
440
|
+
"local_filtering": bool(view_selection.conditions),
|
|
441
|
+
"condition_group_count": len(view_selection.conditions),
|
|
442
|
+
}
|
|
443
|
+
estimate = _build_plan_estimate(tool, normalized)
|
|
444
|
+
readiness = _assess_plan_readiness(tool, normalized, validation, field_mapping, estimate)
|
|
445
|
+
return {
|
|
446
|
+
"profile": profile,
|
|
447
|
+
"ws_id": session_profile.selected_ws_id,
|
|
448
|
+
"ok": True,
|
|
449
|
+
"request_route": self._request_route_payload(context),
|
|
450
|
+
"data": {
|
|
451
|
+
"tool": tool,
|
|
452
|
+
"normalized_arguments": normalized,
|
|
453
|
+
"validation": validation,
|
|
454
|
+
"field_mapping": field_mapping,
|
|
455
|
+
"view_resolution": view_resolution,
|
|
456
|
+
"estimate": estimate,
|
|
457
|
+
"ready_for_final_conclusion": readiness["ready_for_final_conclusion"],
|
|
458
|
+
"final_conclusion_blockers": readiness["final_conclusion_blockers"],
|
|
459
|
+
"recommended_next_actions": readiness["recommended_next_actions"],
|
|
460
|
+
},
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
return self._run_record_tool(profile, runner)
|
|
464
|
+
|
|
465
|
+
def record_write_plan(
|
|
466
|
+
self,
|
|
467
|
+
*,
|
|
468
|
+
profile: str,
|
|
469
|
+
operation: str,
|
|
470
|
+
app_key: str,
|
|
471
|
+
apply_id: int | None,
|
|
472
|
+
answers: list[JSONObject] | None = None,
|
|
473
|
+
fields: JSONObject | None = None,
|
|
474
|
+
force_refresh_form: bool = False,
|
|
475
|
+
) -> JSONObject:
|
|
476
|
+
if not app_key:
|
|
477
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
478
|
+
inferred_operation = operation if operation in {"create", "update"} else ("update" if apply_id else "create")
|
|
479
|
+
|
|
480
|
+
def runner(session_profile, context):
|
|
481
|
+
schema = self._get_form_schema(profile, context, app_key, force_refresh=force_refresh_form)
|
|
482
|
+
index = _build_field_index(schema)
|
|
483
|
+
normalized_fields = fields or {}
|
|
484
|
+
normalized_answers_input = answers or []
|
|
485
|
+
resolved_fields = self._collect_write_plan_field_refs(fields=normalized_fields, answers=normalized_answers_input, index=index)
|
|
486
|
+
support_matrix = _summarize_write_support(resolved_fields)
|
|
487
|
+
invalid_fields: list[JSONObject] = []
|
|
488
|
+
normalized_answers: list[JSONObject] = []
|
|
489
|
+
try:
|
|
490
|
+
normalized_answers = self._resolve_answers(
|
|
491
|
+
profile,
|
|
492
|
+
context,
|
|
493
|
+
app_key,
|
|
494
|
+
answers=normalized_answers_input,
|
|
495
|
+
fields=normalized_fields,
|
|
496
|
+
force_refresh_form=force_refresh_form,
|
|
497
|
+
)
|
|
498
|
+
except RecordInputError as error:
|
|
499
|
+
invalid_fields.append(
|
|
500
|
+
{
|
|
501
|
+
"location": _stringify_json(error.details.get("location") if error.details else None),
|
|
502
|
+
"message": error.message,
|
|
503
|
+
"error_code": error.error_code,
|
|
504
|
+
"field": error.details.get("field") if error.details and isinstance(error.details.get("field"), dict) else None,
|
|
505
|
+
"expected_format": error.details.get("expected_format") if error.details and isinstance(error.details.get("expected_format"), dict) else None,
|
|
506
|
+
"received_value": error.details.get("received_value") if error.details else None,
|
|
507
|
+
}
|
|
508
|
+
)
|
|
509
|
+
readonly_or_system_fields = [
|
|
510
|
+
{
|
|
511
|
+
"que_id": entry.get("que_id"),
|
|
512
|
+
"que_title": entry.get("que_title"),
|
|
513
|
+
"que_type": entry.get("que_type"),
|
|
514
|
+
"readonly": entry.get("readonly"),
|
|
515
|
+
"system": entry.get("system"),
|
|
516
|
+
"source": entry.get("source"),
|
|
517
|
+
"requested": entry.get("requested"),
|
|
518
|
+
}
|
|
519
|
+
for entry in resolved_fields
|
|
520
|
+
if bool(entry.get("resolved")) and (bool(entry.get("readonly")) or bool(entry.get("system")))
|
|
521
|
+
]
|
|
522
|
+
provided_field_ids = {
|
|
523
|
+
str(answer.get("queId"))
|
|
524
|
+
for answer in normalized_answers
|
|
525
|
+
if isinstance(answer.get("queId"), int) and int(answer["queId"]) > 0
|
|
526
|
+
}
|
|
527
|
+
missing_required_fields = []
|
|
528
|
+
for field in index.by_id.values():
|
|
529
|
+
if not field.required or str(field.que_id) in provided_field_ids:
|
|
530
|
+
continue
|
|
531
|
+
missing_required_fields.append(
|
|
532
|
+
{
|
|
533
|
+
"que_id": field.que_id,
|
|
534
|
+
"que_title": field.que_title,
|
|
535
|
+
"que_type": field.que_type,
|
|
536
|
+
"reason": "required field not provided",
|
|
537
|
+
}
|
|
538
|
+
)
|
|
539
|
+
question_relations = _collect_question_relations(schema)
|
|
540
|
+
option_links = _collect_option_links(resolved_fields)
|
|
541
|
+
validation = {
|
|
542
|
+
"valid": not invalid_fields and not missing_required_fields and not readonly_or_system_fields,
|
|
543
|
+
"missing_required_fields": missing_required_fields,
|
|
544
|
+
"likely_hidden_required_fields": [],
|
|
545
|
+
"readonly_or_system_fields": readonly_or_system_fields,
|
|
546
|
+
"invalid_fields": invalid_fields,
|
|
547
|
+
"warnings": [
|
|
548
|
+
"record_write_plan is a static preflight built from form metadata; runtime visibility and dynamic linkage can still reject writes."
|
|
549
|
+
],
|
|
550
|
+
}
|
|
551
|
+
blockers = []
|
|
552
|
+
if invalid_fields:
|
|
553
|
+
blockers.append("payload contains invalid field values")
|
|
554
|
+
if missing_required_fields:
|
|
555
|
+
blockers.append("required fields are missing")
|
|
556
|
+
if readonly_or_system_fields:
|
|
557
|
+
blockers.append("payload writes readonly or system-managed fields")
|
|
558
|
+
if question_relations:
|
|
559
|
+
validation["warnings"].append("form contains questionRelations; linked visibility and runtime required rules may differ at submit time.")
|
|
560
|
+
actions = ["Use record_field_resolve when field titles are ambiguous."]
|
|
561
|
+
if support_matrix["restricted"]:
|
|
562
|
+
actions.append("Review write_format.required_presteps for restricted fields before submit.")
|
|
563
|
+
if invalid_fields:
|
|
564
|
+
actions.append("Fix invalid_fields before calling record_create or record_update.")
|
|
565
|
+
if missing_required_fields:
|
|
566
|
+
actions.append("Fill missing required fields before submit.")
|
|
567
|
+
if readonly_or_system_fields:
|
|
568
|
+
actions.append("Remove readonly/system fields from payload before submit.")
|
|
569
|
+
if question_relations:
|
|
570
|
+
actions.append("Treat ready_to_submit as static-only because linked fields can still appear at runtime.")
|
|
571
|
+
return {
|
|
572
|
+
"profile": profile,
|
|
573
|
+
"ws_id": session_profile.selected_ws_id,
|
|
574
|
+
"ok": True,
|
|
575
|
+
"request_route": self._request_route_payload(context),
|
|
576
|
+
"data": {
|
|
577
|
+
"operation": inferred_operation,
|
|
578
|
+
"app_key": app_key,
|
|
579
|
+
"apply_id": apply_id,
|
|
580
|
+
"normalized_answers": normalized_answers,
|
|
581
|
+
"resolved_fields": resolved_fields,
|
|
582
|
+
"support_matrix": support_matrix,
|
|
583
|
+
"validation": validation,
|
|
584
|
+
"dependencies": {
|
|
585
|
+
"question_relations_present": bool(question_relations),
|
|
586
|
+
"question_relations": question_relations,
|
|
587
|
+
"option_links": option_links,
|
|
588
|
+
},
|
|
589
|
+
"ready_to_submit": validation["valid"],
|
|
590
|
+
"blockers": blockers,
|
|
591
|
+
"recommended_next_actions": actions,
|
|
592
|
+
},
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
return self._run_record_tool(profile, runner)
|
|
596
|
+
|
|
597
|
+
def record_query(
|
|
598
|
+
self,
|
|
599
|
+
*,
|
|
600
|
+
profile: str,
|
|
601
|
+
query_mode: str,
|
|
602
|
+
app_key: str,
|
|
603
|
+
apply_id: int | None,
|
|
604
|
+
page_num: int,
|
|
605
|
+
page_size: int,
|
|
606
|
+
requested_pages: int,
|
|
607
|
+
scan_max_pages: int,
|
|
608
|
+
query_key: str | None,
|
|
609
|
+
filters: list[JSONObject],
|
|
610
|
+
sorts: list[JSONObject],
|
|
611
|
+
max_rows: int,
|
|
612
|
+
max_columns: int | None,
|
|
613
|
+
select_columns: list[str | int],
|
|
614
|
+
amount_column: str | int | None,
|
|
615
|
+
time_range: JSONObject,
|
|
616
|
+
stat_policy: JSONObject,
|
|
617
|
+
strict_full: bool,
|
|
618
|
+
output_profile: str,
|
|
619
|
+
list_type: int,
|
|
620
|
+
view_key: str | None = None,
|
|
621
|
+
view_name: str | None = None,
|
|
622
|
+
) -> JSONObject:
|
|
623
|
+
resolved_mode = _resolve_query_mode(query_mode, apply_id=apply_id, amount_column=amount_column, time_range=time_range, stat_policy=stat_policy)
|
|
624
|
+
if resolved_mode == "record":
|
|
625
|
+
return self._record_query_record(
|
|
626
|
+
profile=profile,
|
|
627
|
+
app_key=app_key,
|
|
628
|
+
apply_id=apply_id,
|
|
629
|
+
select_columns=select_columns,
|
|
630
|
+
max_columns=max_columns,
|
|
631
|
+
output_profile=output_profile,
|
|
632
|
+
list_type=list_type,
|
|
633
|
+
)
|
|
634
|
+
if resolved_mode == "summary":
|
|
635
|
+
return self._record_query_summary(
|
|
636
|
+
profile=profile,
|
|
637
|
+
app_key=app_key,
|
|
638
|
+
page_num=page_num,
|
|
639
|
+
page_size=page_size,
|
|
640
|
+
requested_pages=requested_pages,
|
|
641
|
+
scan_max_pages=scan_max_pages,
|
|
642
|
+
query_key=query_key,
|
|
643
|
+
filters=filters,
|
|
644
|
+
sorts=sorts,
|
|
645
|
+
max_rows=max_rows,
|
|
646
|
+
max_columns=max_columns,
|
|
647
|
+
select_columns=select_columns,
|
|
648
|
+
amount_column=amount_column,
|
|
649
|
+
time_range=time_range,
|
|
650
|
+
stat_policy=stat_policy,
|
|
651
|
+
strict_full=strict_full,
|
|
652
|
+
output_profile=output_profile,
|
|
653
|
+
list_type=list_type,
|
|
654
|
+
view_key=view_key,
|
|
655
|
+
view_name=view_name,
|
|
656
|
+
)
|
|
657
|
+
return self._record_query_list(
|
|
658
|
+
profile=profile,
|
|
659
|
+
app_key=app_key,
|
|
660
|
+
page_num=page_num,
|
|
661
|
+
page_size=page_size,
|
|
662
|
+
requested_pages=requested_pages,
|
|
663
|
+
scan_max_pages=scan_max_pages,
|
|
664
|
+
query_key=query_key,
|
|
665
|
+
filters=filters,
|
|
666
|
+
sorts=sorts,
|
|
667
|
+
max_rows=max_rows,
|
|
668
|
+
max_columns=max_columns,
|
|
669
|
+
select_columns=select_columns,
|
|
670
|
+
time_range=time_range,
|
|
671
|
+
output_profile=output_profile,
|
|
672
|
+
list_type=list_type,
|
|
673
|
+
view_key=view_key,
|
|
674
|
+
view_name=view_name,
|
|
675
|
+
)
|
|
676
|
+
|
|
677
|
+
def record_aggregate(
|
|
678
|
+
self,
|
|
679
|
+
*,
|
|
680
|
+
profile: str,
|
|
681
|
+
app_key: str,
|
|
682
|
+
group_by: list[str | int],
|
|
683
|
+
amount_column: str | int | None,
|
|
684
|
+
metrics: list[str],
|
|
685
|
+
page_num: int,
|
|
686
|
+
page_size: int,
|
|
687
|
+
requested_pages: int,
|
|
688
|
+
scan_max_pages: int,
|
|
689
|
+
query_key: str | None,
|
|
690
|
+
filters: list[JSONObject],
|
|
691
|
+
sorts: list[JSONObject],
|
|
692
|
+
time_range: JSONObject,
|
|
693
|
+
time_bucket: str | None,
|
|
694
|
+
max_groups: int,
|
|
695
|
+
strict_full: bool,
|
|
696
|
+
output_profile: str,
|
|
697
|
+
list_type: int,
|
|
698
|
+
view_key: str | None = None,
|
|
699
|
+
view_name: str | None = None,
|
|
700
|
+
) -> JSONObject:
|
|
701
|
+
if not app_key:
|
|
702
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
703
|
+
if max_groups <= 0:
|
|
704
|
+
raise_tool_error(QingflowApiError.config_error("max_groups must be positive"))
|
|
705
|
+
|
|
706
|
+
def runner(session_profile, context):
|
|
707
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=False)
|
|
708
|
+
view_selection = self._resolve_view_selection(profile, context, app_key, view_key=view_key, view_name=view_name)
|
|
709
|
+
dept_member_cache: dict[int, set[int]] = {}
|
|
710
|
+
group_fields = [self._resolve_field_selector(item, index, location="group_by") for item in group_by]
|
|
711
|
+
amount_field = self._resolve_field_selector(amount_column, index, location="amount_column") if amount_column is not None else None
|
|
712
|
+
time_field = self._resolve_time_range_column(time_range, index)
|
|
713
|
+
match_rules = self._resolve_match_rules(context, filters, index)
|
|
714
|
+
sort_rules = self._resolve_sorts(sorts, index)
|
|
715
|
+
match_rules = self._append_time_range_filter(match_rules, time_range, time_field)
|
|
716
|
+
metric_names = _normalize_metrics(metrics, include_sum=amount_field is not None)
|
|
717
|
+
query_id = _query_id()
|
|
718
|
+
pages_to_scan = min(max(requested_pages, 1), max(scan_max_pages, 1))
|
|
719
|
+
current_page = max(page_num, 1)
|
|
720
|
+
scanned_pages = 0
|
|
721
|
+
scanned_records = 0
|
|
722
|
+
source_pages: list[int] = []
|
|
723
|
+
result_amount: int | None = None
|
|
724
|
+
has_more = False
|
|
725
|
+
group_stats: dict[str, JSONObject] = {}
|
|
726
|
+
total_amount = 0.0
|
|
727
|
+
while scanned_pages < pages_to_scan:
|
|
728
|
+
page = self._search_page(
|
|
729
|
+
context,
|
|
730
|
+
app_key=app_key,
|
|
731
|
+
page_num=current_page,
|
|
732
|
+
page_size=page_size,
|
|
733
|
+
query_key=query_key,
|
|
734
|
+
match_rules=match_rules,
|
|
735
|
+
sorts=sort_rules,
|
|
736
|
+
search_que_ids=None,
|
|
737
|
+
list_type=list_type,
|
|
738
|
+
)
|
|
739
|
+
scanned_pages += 1
|
|
740
|
+
source_pages.append(current_page)
|
|
741
|
+
rows = page.get("list")
|
|
742
|
+
items = rows if isinstance(rows, list) else []
|
|
743
|
+
if result_amount is None:
|
|
744
|
+
result_amount = _effective_total(page, page_size)
|
|
745
|
+
has_more = _page_has_more(page, current_page, page_size, len(items))
|
|
746
|
+
for item in items:
|
|
747
|
+
if not isinstance(item, dict):
|
|
748
|
+
continue
|
|
749
|
+
answers = item.get("answers")
|
|
750
|
+
answer_list = answers if isinstance(answers, list) else []
|
|
751
|
+
if not self._matches_view_selection(
|
|
752
|
+
context,
|
|
753
|
+
answer_list,
|
|
754
|
+
view_selection=view_selection,
|
|
755
|
+
dept_member_cache=dept_member_cache,
|
|
756
|
+
):
|
|
757
|
+
continue
|
|
758
|
+
scanned_records += 1
|
|
759
|
+
group_payload = {
|
|
760
|
+
field.que_title: _extract_field_value(answer_list, field)
|
|
761
|
+
for field in group_fields
|
|
762
|
+
}
|
|
763
|
+
if time_bucket and time_field is not None:
|
|
764
|
+
group_payload["time_bucket"] = _to_time_bucket(_extract_field_value(answer_list, time_field), time_bucket)
|
|
765
|
+
group_key = json.dumps(group_payload, ensure_ascii=False, sort_keys=True)
|
|
766
|
+
bucket = group_stats.get(group_key)
|
|
767
|
+
if bucket is None:
|
|
768
|
+
bucket = {"group": group_payload, "count": 0, "amount_total": None, "metrics": {}}
|
|
769
|
+
group_stats[group_key] = bucket
|
|
770
|
+
bucket["count"] = int(bucket["count"]) + 1
|
|
771
|
+
amount_value = _coerce_amount(_extract_field_value(answer_list, amount_field)) if amount_field is not None else None
|
|
772
|
+
if amount_value is not None:
|
|
773
|
+
total_amount += amount_value
|
|
774
|
+
bucket["amount_total"] = float(bucket.get("amount_total") or 0.0) + amount_value
|
|
775
|
+
metrics_payload = bucket["metrics"] if isinstance(bucket.get("metrics"), dict) else {}
|
|
776
|
+
for metric in metric_names:
|
|
777
|
+
current_metric = metrics_payload.get(metric)
|
|
778
|
+
metric_state = current_metric if isinstance(current_metric, dict) else {"count": 0, "sum": 0.0, "min": None, "max": None}
|
|
779
|
+
metric_state["count"] = int(metric_state["count"]) + 1
|
|
780
|
+
if amount_value is not None:
|
|
781
|
+
metric_state["sum"] = float(metric_state["sum"]) + amount_value
|
|
782
|
+
metric_state["min"] = amount_value if metric_state["min"] is None else min(float(metric_state["min"]), amount_value)
|
|
783
|
+
metric_state["max"] = amount_value if metric_state["max"] is None else max(float(metric_state["max"]), amount_value)
|
|
784
|
+
metrics_payload[metric] = metric_state
|
|
785
|
+
bucket["metrics"] = metrics_payload
|
|
786
|
+
if len(group_stats) >= max_groups:
|
|
787
|
+
break
|
|
788
|
+
if len(group_stats) >= max_groups or not has_more:
|
|
789
|
+
break
|
|
790
|
+
current_page += 1
|
|
791
|
+
|
|
792
|
+
groups = []
|
|
793
|
+
for bucket in group_stats.values():
|
|
794
|
+
metrics_payload = bucket["metrics"] if isinstance(bucket.get("metrics"), dict) else {}
|
|
795
|
+
rendered_metrics: JSONObject = {}
|
|
796
|
+
for metric_name, metric_state in metrics_payload.items():
|
|
797
|
+
if not isinstance(metric_state, dict):
|
|
798
|
+
continue
|
|
799
|
+
count = int(metric_state.get("count", 0) or 0)
|
|
800
|
+
amount_sum = float(metric_state.get("sum", 0.0) or 0.0)
|
|
801
|
+
metric_result: JSONObject = {}
|
|
802
|
+
if metric_name == "count":
|
|
803
|
+
metric_result["value"] = count
|
|
804
|
+
else:
|
|
805
|
+
if metric_name == "sum":
|
|
806
|
+
metric_result["value"] = amount_sum
|
|
807
|
+
elif metric_name == "avg":
|
|
808
|
+
metric_result["value"] = (amount_sum / count) if count else None
|
|
809
|
+
elif metric_name == "min":
|
|
810
|
+
metric_result["value"] = metric_state.get("min")
|
|
811
|
+
elif metric_name == "max":
|
|
812
|
+
metric_result["value"] = metric_state.get("max")
|
|
813
|
+
rendered_metrics[metric_name] = metric_result
|
|
814
|
+
groups.append(
|
|
815
|
+
{
|
|
816
|
+
"group": bucket["group"],
|
|
817
|
+
"count": bucket["count"],
|
|
818
|
+
"count_ratio": (int(bucket["count"]) / scanned_records) if scanned_records else 0,
|
|
819
|
+
"amount_total": None if amount_field is None else _coerce_amount(bucket.get("amount_total")),
|
|
820
|
+
"amount_ratio": None,
|
|
821
|
+
"metrics": rendered_metrics,
|
|
822
|
+
}
|
|
823
|
+
)
|
|
824
|
+
groups.sort(key=lambda item: int(item["count"]), reverse=True)
|
|
825
|
+
if amount_field is not None and total_amount > 0:
|
|
826
|
+
for item in groups:
|
|
827
|
+
amount_total = _coerce_amount(item.get("amount_total"))
|
|
828
|
+
item["amount_ratio"] = (amount_total / total_amount) if amount_total is not None else None
|
|
829
|
+
effective_result_amount = scanned_records if view_selection is not None else (result_amount or scanned_records)
|
|
830
|
+
completeness = _build_completeness(
|
|
831
|
+
result_amount=effective_result_amount,
|
|
832
|
+
returned_items=len(groups),
|
|
833
|
+
fetched_pages=scanned_pages,
|
|
834
|
+
requested_pages=pages_to_scan,
|
|
835
|
+
has_more=has_more,
|
|
836
|
+
next_page_token=None,
|
|
837
|
+
is_complete=not has_more and len(groups) < max_groups,
|
|
838
|
+
omitted_items=max(0, effective_result_amount - len(groups)),
|
|
839
|
+
extra={
|
|
840
|
+
"raw_scan_complete": not has_more,
|
|
841
|
+
"scan_limit_hit": has_more,
|
|
842
|
+
"scanned_pages": scanned_pages,
|
|
843
|
+
"scan_limit": pages_to_scan,
|
|
844
|
+
"output_page_complete": len(groups) < max_groups,
|
|
845
|
+
"raw_next_page_token": None,
|
|
846
|
+
"output_next_page_token": None,
|
|
847
|
+
"stop_reason": "source_exhausted" if not has_more else "scan_limit",
|
|
848
|
+
},
|
|
849
|
+
)
|
|
850
|
+
evidence = {
|
|
851
|
+
"query_id": query_id,
|
|
852
|
+
"app_key": app_key,
|
|
853
|
+
"filters": _echo_filters(match_rules),
|
|
854
|
+
"selected_columns": [field.que_title for field in group_fields],
|
|
855
|
+
"time_range": time_range or None,
|
|
856
|
+
"source_pages": source_pages,
|
|
857
|
+
"view": _view_selection_payload(view_selection),
|
|
858
|
+
}
|
|
859
|
+
if strict_full and not bool(completeness.get("raw_scan_complete")):
|
|
860
|
+
self._raise_need_more_data(completeness, evidence, "Aggregate result is incomplete; increase requested_pages or scan_max_pages.")
|
|
861
|
+
response: JSONObject = {
|
|
862
|
+
"profile": profile,
|
|
863
|
+
"ws_id": session_profile.selected_ws_id,
|
|
864
|
+
"ok": True,
|
|
865
|
+
"request_route": self._request_route_payload(context),
|
|
866
|
+
"data": {
|
|
867
|
+
"app_key": app_key,
|
|
868
|
+
"view": _view_selection_payload(view_selection),
|
|
869
|
+
"summary": {
|
|
870
|
+
"total_count": scanned_records,
|
|
871
|
+
"total_amount": total_amount if amount_field is not None else None,
|
|
872
|
+
},
|
|
873
|
+
"groups": groups,
|
|
874
|
+
"completeness": completeness,
|
|
875
|
+
},
|
|
876
|
+
}
|
|
877
|
+
if output_profile == "verbose":
|
|
878
|
+
response["completeness"] = completeness
|
|
879
|
+
response["evidence"] = evidence
|
|
880
|
+
response["resolved_mappings"] = {
|
|
881
|
+
"group_by": [_field_mapping_entry("group_by", field, requested=field.que_title) for field in group_fields],
|
|
882
|
+
"amount_column": _field_mapping_entry("amount", amount_field, requested=amount_field.que_title) if amount_field is not None else None,
|
|
883
|
+
}
|
|
884
|
+
return response
|
|
885
|
+
|
|
886
|
+
return self._run_record_tool(profile, runner)
|
|
887
|
+
|
|
888
|
+
def record_create(
|
|
889
|
+
self,
|
|
890
|
+
*,
|
|
891
|
+
profile: str,
|
|
892
|
+
app_key: str,
|
|
893
|
+
answers: list[JSONObject] | None = None,
|
|
894
|
+
fields: JSONObject | None = None,
|
|
895
|
+
submit_type: int = 1,
|
|
896
|
+
verify_write: bool = False,
|
|
897
|
+
force_refresh_form: bool = False,
|
|
898
|
+
) -> JSONObject:
|
|
899
|
+
if submit_type not in (0, 1):
|
|
900
|
+
raise_tool_error(QingflowApiError.config_error("submit_type must be 0 or 1"))
|
|
901
|
+
|
|
902
|
+
def runner(session_profile, context):
|
|
903
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=force_refresh_form) if verify_write else None
|
|
904
|
+
normalized_answers = self._resolve_answers(
|
|
905
|
+
profile,
|
|
906
|
+
context,
|
|
907
|
+
app_key,
|
|
908
|
+
answers=answers or [],
|
|
909
|
+
fields=fields or {},
|
|
910
|
+
force_refresh_form=force_refresh_form,
|
|
911
|
+
)
|
|
912
|
+
self._validate_record_write(app_key, normalized_answers)
|
|
913
|
+
result = self.backend.request("POST", context, f"/app/{app_key}/apply", json_body={"type": submit_type, "answers": normalized_answers})
|
|
914
|
+
apply_id = _coerce_count(result.get("applyId")) if isinstance(result, dict) else None
|
|
915
|
+
verification = self._verify_record_write_result(
|
|
916
|
+
context,
|
|
917
|
+
app_key=app_key,
|
|
918
|
+
apply_id=apply_id,
|
|
919
|
+
normalized_answers=normalized_answers,
|
|
920
|
+
index=cast(FieldIndex, index),
|
|
921
|
+
) if verify_write and index is not None else None
|
|
922
|
+
verified = True if verification is None else bool(verification.get("verified"))
|
|
923
|
+
return {
|
|
924
|
+
"profile": profile,
|
|
925
|
+
"ws_id": session_profile.selected_ws_id,
|
|
926
|
+
"request_route": self._request_route_payload(context),
|
|
927
|
+
"app_key": app_key,
|
|
928
|
+
"result": result,
|
|
929
|
+
"normalized_answers": normalized_answers,
|
|
930
|
+
"status": "completed" if verified else "verification_failed",
|
|
931
|
+
"ok": verified,
|
|
932
|
+
"apply_id": apply_id,
|
|
933
|
+
"verify_write": verify_write,
|
|
934
|
+
"write_verified": verified if verify_write else None,
|
|
935
|
+
"verification": verification,
|
|
936
|
+
"resource": {"type": "record", "apply_id": apply_id} if apply_id is not None else None,
|
|
937
|
+
}
|
|
938
|
+
|
|
939
|
+
return self._run_record_tool(profile, runner)
|
|
940
|
+
|
|
941
|
+
def record_get(
|
|
942
|
+
self,
|
|
943
|
+
*,
|
|
944
|
+
profile: str,
|
|
945
|
+
app_key: str,
|
|
946
|
+
apply_id: int,
|
|
947
|
+
role: int,
|
|
948
|
+
list_type: int | None,
|
|
949
|
+
audit_node_id: int | None,
|
|
950
|
+
) -> JSONObject:
|
|
951
|
+
normalized_apply_id = self._validate_app_and_record(app_key, apply_id)
|
|
952
|
+
|
|
953
|
+
def runner(session_profile, context):
|
|
954
|
+
params: JSONObject = {"role": role}
|
|
955
|
+
if list_type is not None:
|
|
956
|
+
params["listType"] = list_type
|
|
957
|
+
if audit_node_id is not None:
|
|
958
|
+
params["auditNodeId"] = audit_node_id
|
|
959
|
+
result = self.backend.request("GET", context, f"/app/{app_key}/apply/{normalized_apply_id}", params=params)
|
|
960
|
+
return {
|
|
961
|
+
"profile": profile,
|
|
962
|
+
"ws_id": session_profile.selected_ws_id,
|
|
963
|
+
"request_route": self._request_route_payload(context),
|
|
964
|
+
"app_key": app_key,
|
|
965
|
+
"apply_id": normalized_apply_id,
|
|
966
|
+
"result": result,
|
|
967
|
+
}
|
|
968
|
+
|
|
969
|
+
return self._run_record_tool(profile, runner)
|
|
970
|
+
|
|
971
|
+
def record_search(
|
|
972
|
+
self,
|
|
973
|
+
*,
|
|
974
|
+
profile: str,
|
|
975
|
+
app_key: str,
|
|
976
|
+
page_num: int,
|
|
977
|
+
page_size: int,
|
|
978
|
+
query_key: str | None,
|
|
979
|
+
match_rules: list[JSONObject],
|
|
980
|
+
sorts: list[JSONObject],
|
|
981
|
+
search_que_ids: list[int] | None,
|
|
982
|
+
list_type: int,
|
|
983
|
+
view_key: str | None = None,
|
|
984
|
+
view_name: str | None = None,
|
|
985
|
+
) -> JSONObject:
|
|
986
|
+
if not app_key:
|
|
987
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
988
|
+
|
|
989
|
+
def runner(session_profile, context):
|
|
990
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=False)
|
|
991
|
+
view_selection = self._resolve_view_selection(profile, context, app_key, view_key=view_key, view_name=view_name)
|
|
992
|
+
dept_member_cache: dict[int, set[int]] = {}
|
|
993
|
+
result = self._search_page(
|
|
994
|
+
context,
|
|
995
|
+
app_key=app_key,
|
|
996
|
+
page_num=page_num,
|
|
997
|
+
page_size=page_size,
|
|
998
|
+
query_key=query_key,
|
|
999
|
+
match_rules=match_rules,
|
|
1000
|
+
sorts=sorts,
|
|
1001
|
+
search_que_ids=search_que_ids,
|
|
1002
|
+
list_type=list_type,
|
|
1003
|
+
)
|
|
1004
|
+
rows = result.get("list")
|
|
1005
|
+
raw_rows = rows if isinstance(rows, list) else []
|
|
1006
|
+
filtered_rows = [
|
|
1007
|
+
item
|
|
1008
|
+
for item in raw_rows
|
|
1009
|
+
if isinstance(item, dict)
|
|
1010
|
+
and self._matches_view_selection(
|
|
1011
|
+
context,
|
|
1012
|
+
item.get("answers") if isinstance(item.get("answers"), list) else [],
|
|
1013
|
+
view_selection=view_selection,
|
|
1014
|
+
dept_member_cache=dept_member_cache,
|
|
1015
|
+
)
|
|
1016
|
+
]
|
|
1017
|
+
if isinstance(rows, list):
|
|
1018
|
+
result = dict(result)
|
|
1019
|
+
result["list"] = filtered_rows
|
|
1020
|
+
returned_rows = len(filtered_rows)
|
|
1021
|
+
reported_total = _coerce_count(result.get("total"))
|
|
1022
|
+
if reported_total is None:
|
|
1023
|
+
reported_total = _coerce_count(result.get("count"))
|
|
1024
|
+
effective_count = returned_rows if view_selection is not None else (max(reported_total or 0, returned_rows) if reported_total is not None else returned_rows)
|
|
1025
|
+
return {
|
|
1026
|
+
"profile": profile,
|
|
1027
|
+
"ws_id": session_profile.selected_ws_id,
|
|
1028
|
+
"request_route": self._request_route_payload(context),
|
|
1029
|
+
"app_key": app_key,
|
|
1030
|
+
"page": result,
|
|
1031
|
+
"list_type": list_type,
|
|
1032
|
+
"view": _view_selection_payload(view_selection),
|
|
1033
|
+
"page_num": page_num,
|
|
1034
|
+
"page_size": page_size,
|
|
1035
|
+
"reported_total": reported_total,
|
|
1036
|
+
"returned_rows": returned_rows,
|
|
1037
|
+
"effective_count": effective_count,
|
|
1038
|
+
}
|
|
1039
|
+
|
|
1040
|
+
return self._run_record_tool(profile, runner)
|
|
1041
|
+
|
|
1042
|
+
def record_update(
|
|
1043
|
+
self,
|
|
1044
|
+
*,
|
|
1045
|
+
profile: str,
|
|
1046
|
+
app_key: str,
|
|
1047
|
+
apply_id: int,
|
|
1048
|
+
answers: list[JSONObject] | None = None,
|
|
1049
|
+
fields: JSONObject | None = None,
|
|
1050
|
+
role: int = 1,
|
|
1051
|
+
verify_write: bool = False,
|
|
1052
|
+
force_refresh_form: bool = False,
|
|
1053
|
+
) -> JSONObject:
|
|
1054
|
+
normalized_apply_id = self._validate_app_and_record(app_key, apply_id)
|
|
1055
|
+
|
|
1056
|
+
def runner(session_profile, context):
|
|
1057
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=force_refresh_form) if verify_write else None
|
|
1058
|
+
normalized_answers = self._resolve_answers(
|
|
1059
|
+
profile,
|
|
1060
|
+
context,
|
|
1061
|
+
app_key,
|
|
1062
|
+
answers=answers or [],
|
|
1063
|
+
fields=fields or {},
|
|
1064
|
+
force_refresh_form=force_refresh_form,
|
|
1065
|
+
)
|
|
1066
|
+
self._validate_record_write(app_key, normalized_answers, apply_id=normalized_apply_id)
|
|
1067
|
+
result = self.backend.request(
|
|
1068
|
+
"POST",
|
|
1069
|
+
context,
|
|
1070
|
+
f"/app/{app_key}/apply/{normalized_apply_id}",
|
|
1071
|
+
json_body={"role": role, "answers": normalized_answers},
|
|
1072
|
+
)
|
|
1073
|
+
verification = self._verify_record_write_result(
|
|
1074
|
+
context,
|
|
1075
|
+
app_key=app_key,
|
|
1076
|
+
apply_id=normalized_apply_id,
|
|
1077
|
+
normalized_answers=normalized_answers,
|
|
1078
|
+
index=cast(FieldIndex, index),
|
|
1079
|
+
) if verify_write and index is not None else None
|
|
1080
|
+
verified = True if verification is None else bool(verification.get("verified"))
|
|
1081
|
+
return self._attach_human_review_notice(
|
|
1082
|
+
{
|
|
1083
|
+
"profile": profile,
|
|
1084
|
+
"ws_id": session_profile.selected_ws_id,
|
|
1085
|
+
"request_route": self._request_route_payload(context),
|
|
1086
|
+
"app_key": app_key,
|
|
1087
|
+
"apply_id": normalized_apply_id,
|
|
1088
|
+
"result": result,
|
|
1089
|
+
"normalized_answers": normalized_answers,
|
|
1090
|
+
"status": "completed" if verified else "verification_failed",
|
|
1091
|
+
"ok": verified,
|
|
1092
|
+
"verify_write": verify_write,
|
|
1093
|
+
"write_verified": verified if verify_write else None,
|
|
1094
|
+
"verification": verification,
|
|
1095
|
+
"resource": {"type": "record", "apply_id": normalized_apply_id},
|
|
1096
|
+
},
|
|
1097
|
+
operation="update",
|
|
1098
|
+
target="record data",
|
|
1099
|
+
)
|
|
1100
|
+
|
|
1101
|
+
return self._run_record_tool(profile, runner)
|
|
1102
|
+
|
|
1103
|
+
def record_delete(self, *, profile: str, app_key: str, apply_id: int, list_type: int) -> JSONObject:
|
|
1104
|
+
normalized_apply_id = self._validate_app_and_record(app_key, apply_id)
|
|
1105
|
+
|
|
1106
|
+
def runner(session_profile, context):
|
|
1107
|
+
result = self.backend.request(
|
|
1108
|
+
"DELETE",
|
|
1109
|
+
context,
|
|
1110
|
+
f"/app/{app_key}/apply",
|
|
1111
|
+
json_body={"type": list_type, "applyIds": [normalized_apply_id]},
|
|
1112
|
+
)
|
|
1113
|
+
return self._attach_human_review_notice(
|
|
1114
|
+
{
|
|
1115
|
+
"profile": profile,
|
|
1116
|
+
"ws_id": session_profile.selected_ws_id,
|
|
1117
|
+
"request_route": self._request_route_payload(context),
|
|
1118
|
+
"app_key": app_key,
|
|
1119
|
+
"apply_id": normalized_apply_id,
|
|
1120
|
+
"result": result,
|
|
1121
|
+
},
|
|
1122
|
+
operation="delete",
|
|
1123
|
+
target="record data",
|
|
1124
|
+
)
|
|
1125
|
+
|
|
1126
|
+
return self._run_record_tool(profile, runner)
|
|
1127
|
+
|
|
1128
|
+
def _record_query_record(
|
|
1129
|
+
self,
|
|
1130
|
+
*,
|
|
1131
|
+
profile: str,
|
|
1132
|
+
app_key: str,
|
|
1133
|
+
apply_id: int | None,
|
|
1134
|
+
select_columns: list[str | int],
|
|
1135
|
+
max_columns: int | None,
|
|
1136
|
+
output_profile: str,
|
|
1137
|
+
list_type: int,
|
|
1138
|
+
) -> JSONObject:
|
|
1139
|
+
if not app_key:
|
|
1140
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
1141
|
+
if apply_id is None or apply_id <= 0:
|
|
1142
|
+
raise_tool_error(QingflowApiError.config_error("apply_id is required"))
|
|
1143
|
+
if not select_columns:
|
|
1144
|
+
raise_tool_error(QingflowApiError.config_error("select_columns is required in record mode"))
|
|
1145
|
+
|
|
1146
|
+
def runner(session_profile, context):
|
|
1147
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=False)
|
|
1148
|
+
resolved_column_cap = _bounded_column_limit(
|
|
1149
|
+
max_columns,
|
|
1150
|
+
default_limit=MAX_RECORD_COLUMN_LIMIT,
|
|
1151
|
+
hard_limit=MAX_RECORD_COLUMN_LIMIT,
|
|
1152
|
+
)
|
|
1153
|
+
selected_fields = self._resolve_select_columns(
|
|
1154
|
+
select_columns,
|
|
1155
|
+
index,
|
|
1156
|
+
max_columns=max_columns,
|
|
1157
|
+
default_limit=MAX_RECORD_COLUMN_LIMIT,
|
|
1158
|
+
)
|
|
1159
|
+
result = self.backend.request(
|
|
1160
|
+
"GET",
|
|
1161
|
+
context,
|
|
1162
|
+
f"/app/{app_key}/apply/{apply_id}",
|
|
1163
|
+
params={"role": 1, "listType": list_type},
|
|
1164
|
+
)
|
|
1165
|
+
answers = result.get("answers") if isinstance(result, dict) else None
|
|
1166
|
+
answer_list = answers if isinstance(answers, list) else []
|
|
1167
|
+
row = _build_flat_row(answer_list, selected_fields, apply_id=apply_id)
|
|
1168
|
+
completeness = _build_completeness(
|
|
1169
|
+
result_amount=1,
|
|
1170
|
+
returned_items=1,
|
|
1171
|
+
fetched_pages=1,
|
|
1172
|
+
requested_pages=1,
|
|
1173
|
+
has_more=False,
|
|
1174
|
+
next_page_token=None,
|
|
1175
|
+
is_complete=True,
|
|
1176
|
+
omitted_items=0,
|
|
1177
|
+
extra={},
|
|
1178
|
+
)
|
|
1179
|
+
evidence = {
|
|
1180
|
+
"query_id": _query_id(),
|
|
1181
|
+
"app_key": app_key,
|
|
1182
|
+
"filters": [],
|
|
1183
|
+
"selected_columns": [field.que_title for field in selected_fields],
|
|
1184
|
+
"time_range": None,
|
|
1185
|
+
"source_pages": [1],
|
|
1186
|
+
}
|
|
1187
|
+
response: JSONObject = {
|
|
1188
|
+
"profile": profile,
|
|
1189
|
+
"ws_id": session_profile.selected_ws_id,
|
|
1190
|
+
"ok": True,
|
|
1191
|
+
"request_route": self._request_route_payload(context),
|
|
1192
|
+
"data": {
|
|
1193
|
+
"mode": "record",
|
|
1194
|
+
"source_tool": "record_get",
|
|
1195
|
+
"record": {
|
|
1196
|
+
"apply_id": apply_id,
|
|
1197
|
+
"row": row,
|
|
1198
|
+
"applied_limits": {
|
|
1199
|
+
"column_cap": resolved_column_cap,
|
|
1200
|
+
"selected_columns": [field.que_title for field in selected_fields],
|
|
1201
|
+
},
|
|
1202
|
+
},
|
|
1203
|
+
},
|
|
1204
|
+
"output_profile": output_profile,
|
|
1205
|
+
"next_page_token": None,
|
|
1206
|
+
}
|
|
1207
|
+
if output_profile == "verbose":
|
|
1208
|
+
response["completeness"] = completeness
|
|
1209
|
+
response["evidence"] = evidence
|
|
1210
|
+
response["resolved_mappings"] = {
|
|
1211
|
+
"select_columns": [_field_mapping_entry("row", field, requested=field.que_title) for field in selected_fields]
|
|
1212
|
+
}
|
|
1213
|
+
return response
|
|
1214
|
+
|
|
1215
|
+
return self._run_record_tool(profile, runner)
|
|
1216
|
+
|
|
1217
|
+
def _record_query_list(
|
|
1218
|
+
self,
|
|
1219
|
+
*,
|
|
1220
|
+
profile: str,
|
|
1221
|
+
app_key: str,
|
|
1222
|
+
page_num: int,
|
|
1223
|
+
page_size: int,
|
|
1224
|
+
requested_pages: int,
|
|
1225
|
+
scan_max_pages: int,
|
|
1226
|
+
query_key: str | None,
|
|
1227
|
+
filters: list[JSONObject],
|
|
1228
|
+
sorts: list[JSONObject],
|
|
1229
|
+
max_rows: int,
|
|
1230
|
+
max_columns: int | None,
|
|
1231
|
+
select_columns: list[str | int],
|
|
1232
|
+
time_range: JSONObject,
|
|
1233
|
+
output_profile: str,
|
|
1234
|
+
list_type: int,
|
|
1235
|
+
view_key: str | None = None,
|
|
1236
|
+
view_name: str | None = None,
|
|
1237
|
+
) -> JSONObject:
|
|
1238
|
+
if not app_key:
|
|
1239
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
1240
|
+
if not select_columns:
|
|
1241
|
+
raise_tool_error(QingflowApiError.config_error("select_columns is required in list mode"))
|
|
1242
|
+
if max_rows <= 0:
|
|
1243
|
+
raise_tool_error(QingflowApiError.config_error("max_rows must be positive"))
|
|
1244
|
+
|
|
1245
|
+
def runner(session_profile, context):
|
|
1246
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=False)
|
|
1247
|
+
view_selection = self._resolve_view_selection(profile, context, app_key, view_key=view_key, view_name=view_name)
|
|
1248
|
+
dept_member_cache: dict[int, set[int]] = {}
|
|
1249
|
+
resolved_column_cap = _bounded_column_limit(
|
|
1250
|
+
max_columns,
|
|
1251
|
+
default_limit=MAX_LIST_COLUMN_LIMIT,
|
|
1252
|
+
hard_limit=MAX_LIST_COLUMN_LIMIT,
|
|
1253
|
+
)
|
|
1254
|
+
selected_fields = self._resolve_select_columns(
|
|
1255
|
+
select_columns,
|
|
1256
|
+
index,
|
|
1257
|
+
max_columns=max_columns,
|
|
1258
|
+
default_limit=MAX_LIST_COLUMN_LIMIT,
|
|
1259
|
+
)
|
|
1260
|
+
selected_field_batches = _chunk_fields(selected_fields, BACKEND_LIST_SEARCH_FIELD_LIMIT)
|
|
1261
|
+
primary_search_que_ids: list[int] | None = [field.que_id for field in selected_field_batches[0]]
|
|
1262
|
+
if view_selection is not None and not _view_selection_supported_by_search_ids(view_selection, primary_search_que_ids):
|
|
1263
|
+
primary_search_que_ids = None
|
|
1264
|
+
remaining_field_batches: list[list[FormField]] = []
|
|
1265
|
+
selected_fields_from_primary = selected_fields
|
|
1266
|
+
else:
|
|
1267
|
+
remaining_field_batches = selected_field_batches[1:]
|
|
1268
|
+
primary_search_que_ids = primary_search_que_ids or None
|
|
1269
|
+
primary_que_ids = set(primary_search_que_ids or [])
|
|
1270
|
+
selected_fields_from_primary = [field for field in selected_fields if field.que_id in primary_que_ids]
|
|
1271
|
+
time_field = self._resolve_time_range_column(time_range, index)
|
|
1272
|
+
match_rules = self._resolve_match_rules(context, filters, index)
|
|
1273
|
+
sort_rules = self._resolve_sorts(sorts, index)
|
|
1274
|
+
match_rules = self._append_time_range_filter(match_rules, time_range, time_field)
|
|
1275
|
+
scan_limit = min(max(requested_pages, 1), max(scan_max_pages, 1))
|
|
1276
|
+
current_page = max(page_num, 1)
|
|
1277
|
+
scanned_pages = 0
|
|
1278
|
+
rows: list[JSONObject] = []
|
|
1279
|
+
matched_records = 0
|
|
1280
|
+
result_amount: int | None = None
|
|
1281
|
+
reported_total: int | None = None
|
|
1282
|
+
has_more = False
|
|
1283
|
+
source_pages: list[int] = []
|
|
1284
|
+
while scanned_pages < scan_limit and len(rows) < max_rows:
|
|
1285
|
+
page = self._search_page(
|
|
1286
|
+
context,
|
|
1287
|
+
app_key=app_key,
|
|
1288
|
+
page_num=current_page,
|
|
1289
|
+
page_size=page_size,
|
|
1290
|
+
query_key=query_key,
|
|
1291
|
+
match_rules=match_rules,
|
|
1292
|
+
sorts=sort_rules,
|
|
1293
|
+
search_que_ids=primary_search_que_ids,
|
|
1294
|
+
list_type=list_type,
|
|
1295
|
+
)
|
|
1296
|
+
scanned_pages += 1
|
|
1297
|
+
source_pages.append(current_page)
|
|
1298
|
+
page_rows = page.get("list")
|
|
1299
|
+
items = page_rows if isinstance(page_rows, list) else []
|
|
1300
|
+
if result_amount is None:
|
|
1301
|
+
reported_total = _coerce_count(page.get("total"))
|
|
1302
|
+
if reported_total is None:
|
|
1303
|
+
reported_total = _coerce_count(page.get("count"))
|
|
1304
|
+
result_amount = _effective_total(page, page_size)
|
|
1305
|
+
has_more = _page_has_more(page, current_page, page_size, len(items))
|
|
1306
|
+
page_output_rows: list[JSONObject] = []
|
|
1307
|
+
for item in items:
|
|
1308
|
+
if not isinstance(item, dict):
|
|
1309
|
+
continue
|
|
1310
|
+
answers = item.get("answers")
|
|
1311
|
+
answer_list = answers if isinstance(answers, list) else []
|
|
1312
|
+
if not self._matches_view_selection(
|
|
1313
|
+
context,
|
|
1314
|
+
answer_list,
|
|
1315
|
+
view_selection=view_selection,
|
|
1316
|
+
dept_member_cache=dept_member_cache,
|
|
1317
|
+
):
|
|
1318
|
+
continue
|
|
1319
|
+
matched_records += 1
|
|
1320
|
+
apply_id = _coerce_count(item.get("applyId")) or _coerce_count(item.get("id"))
|
|
1321
|
+
row = _build_flat_row(answer_list, selected_fields_from_primary, apply_id=apply_id)
|
|
1322
|
+
rows.append(row)
|
|
1323
|
+
page_output_rows.append(row)
|
|
1324
|
+
if len(rows) >= max_rows:
|
|
1325
|
+
break
|
|
1326
|
+
if page_output_rows and remaining_field_batches:
|
|
1327
|
+
page_row_map = {
|
|
1328
|
+
_coerce_count(row.get("apply_id")): row
|
|
1329
|
+
for row in page_output_rows
|
|
1330
|
+
if isinstance(row, dict) and _coerce_count(row.get("apply_id")) is not None
|
|
1331
|
+
}
|
|
1332
|
+
for batch in remaining_field_batches:
|
|
1333
|
+
extra_page = self._search_page(
|
|
1334
|
+
context,
|
|
1335
|
+
app_key=app_key,
|
|
1336
|
+
page_num=current_page,
|
|
1337
|
+
page_size=page_size,
|
|
1338
|
+
query_key=query_key,
|
|
1339
|
+
match_rules=match_rules,
|
|
1340
|
+
sorts=sort_rules,
|
|
1341
|
+
search_que_ids=[field.que_id for field in batch],
|
|
1342
|
+
list_type=list_type,
|
|
1343
|
+
)
|
|
1344
|
+
extra_rows = extra_page.get("list")
|
|
1345
|
+
extra_items = extra_rows if isinstance(extra_rows, list) else []
|
|
1346
|
+
for extra_item in extra_items:
|
|
1347
|
+
if not isinstance(extra_item, dict):
|
|
1348
|
+
continue
|
|
1349
|
+
apply_id = _coerce_count(extra_item.get("applyId")) or _coerce_count(extra_item.get("id"))
|
|
1350
|
+
if apply_id is None or apply_id not in page_row_map:
|
|
1351
|
+
continue
|
|
1352
|
+
extra_answers = extra_item.get("answers")
|
|
1353
|
+
extra_answer_list = extra_answers if isinstance(extra_answers, list) else []
|
|
1354
|
+
partial_row = _build_flat_row(extra_answer_list, batch, apply_id=apply_id)
|
|
1355
|
+
partial_row.pop("apply_id", None)
|
|
1356
|
+
page_row_map[apply_id].update(partial_row)
|
|
1357
|
+
if not has_more:
|
|
1358
|
+
break
|
|
1359
|
+
current_page += 1
|
|
1360
|
+
effective_result_amount = matched_records if view_selection is not None else (result_amount or len(rows))
|
|
1361
|
+
completeness = _build_completeness(
|
|
1362
|
+
result_amount=effective_result_amount,
|
|
1363
|
+
returned_items=len(rows),
|
|
1364
|
+
fetched_pages=scanned_pages,
|
|
1365
|
+
requested_pages=scan_limit,
|
|
1366
|
+
has_more=has_more,
|
|
1367
|
+
next_page_token=None,
|
|
1368
|
+
is_complete=not has_more and len(rows) < max_rows,
|
|
1369
|
+
omitted_items=max(0, effective_result_amount - len(rows)),
|
|
1370
|
+
extra={},
|
|
1371
|
+
)
|
|
1372
|
+
evidence = {
|
|
1373
|
+
"query_id": _query_id(),
|
|
1374
|
+
"app_key": app_key,
|
|
1375
|
+
"filters": _echo_filters(match_rules),
|
|
1376
|
+
"selected_columns": [field.que_title for field in selected_fields],
|
|
1377
|
+
"time_range": time_range or None,
|
|
1378
|
+
"source_pages": source_pages,
|
|
1379
|
+
"view": _view_selection_payload(view_selection),
|
|
1380
|
+
}
|
|
1381
|
+
response: JSONObject = {
|
|
1382
|
+
"profile": profile,
|
|
1383
|
+
"ws_id": session_profile.selected_ws_id,
|
|
1384
|
+
"ok": True,
|
|
1385
|
+
"request_route": self._request_route_payload(context),
|
|
1386
|
+
"data": {
|
|
1387
|
+
"mode": "list",
|
|
1388
|
+
"source_tool": "record_search",
|
|
1389
|
+
"view": _view_selection_payload(view_selection),
|
|
1390
|
+
"list": {
|
|
1391
|
+
"rows": rows,
|
|
1392
|
+
"pagination": {
|
|
1393
|
+
"page_num": page_num,
|
|
1394
|
+
"page_size": page_size,
|
|
1395
|
+
"requested_pages": scan_limit,
|
|
1396
|
+
"result_amount": effective_result_amount,
|
|
1397
|
+
"returned_items": len(rows),
|
|
1398
|
+
},
|
|
1399
|
+
"applied_limits": {
|
|
1400
|
+
"row_cap": max_rows,
|
|
1401
|
+
"column_cap": resolved_column_cap,
|
|
1402
|
+
"selected_columns": [field.que_title for field in selected_fields],
|
|
1403
|
+
},
|
|
1404
|
+
},
|
|
1405
|
+
},
|
|
1406
|
+
"output_profile": output_profile,
|
|
1407
|
+
"next_page_token": None,
|
|
1408
|
+
}
|
|
1409
|
+
if output_profile == "verbose":
|
|
1410
|
+
response["completeness"] = completeness
|
|
1411
|
+
evidence["backend_reported_total"] = reported_total
|
|
1412
|
+
response["evidence"] = evidence
|
|
1413
|
+
response["resolved_mappings"] = {
|
|
1414
|
+
"select_columns": [_field_mapping_entry("row", field, requested=field.que_title) for field in selected_fields],
|
|
1415
|
+
"filters": [_field_mapping_entry("filter", entry["field"], requested=entry["requested"]) for entry in self._resolve_filter_field_entries(filters, index)],
|
|
1416
|
+
"time_range": _field_mapping_entry("time", time_field, requested=time_field.que_title) if time_field is not None else None,
|
|
1417
|
+
}
|
|
1418
|
+
return response
|
|
1419
|
+
|
|
1420
|
+
return self._run_record_tool(profile, runner)
|
|
1421
|
+
|
|
1422
|
+
def _record_query_summary(
|
|
1423
|
+
self,
|
|
1424
|
+
*,
|
|
1425
|
+
profile: str,
|
|
1426
|
+
app_key: str,
|
|
1427
|
+
page_num: int,
|
|
1428
|
+
page_size: int,
|
|
1429
|
+
requested_pages: int,
|
|
1430
|
+
scan_max_pages: int,
|
|
1431
|
+
query_key: str | None,
|
|
1432
|
+
filters: list[JSONObject],
|
|
1433
|
+
sorts: list[JSONObject],
|
|
1434
|
+
max_rows: int,
|
|
1435
|
+
max_columns: int | None,
|
|
1436
|
+
select_columns: list[str | int],
|
|
1437
|
+
amount_column: str | int | None,
|
|
1438
|
+
time_range: JSONObject,
|
|
1439
|
+
stat_policy: JSONObject,
|
|
1440
|
+
strict_full: bool,
|
|
1441
|
+
output_profile: str,
|
|
1442
|
+
list_type: int,
|
|
1443
|
+
view_key: str | None = None,
|
|
1444
|
+
view_name: str | None = None,
|
|
1445
|
+
) -> JSONObject:
|
|
1446
|
+
if not app_key:
|
|
1447
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
1448
|
+
|
|
1449
|
+
def runner(session_profile, context):
|
|
1450
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=False)
|
|
1451
|
+
view_selection = self._resolve_view_selection(profile, context, app_key, view_key=view_key, view_name=view_name)
|
|
1452
|
+
dept_member_cache: dict[int, set[int]] = {}
|
|
1453
|
+
amount_field = self._resolve_field_selector(amount_column, index, location="amount_column") if amount_column is not None else None
|
|
1454
|
+
time_field = self._resolve_time_range_column(time_range, index)
|
|
1455
|
+
resolved_column_cap = _bounded_column_limit(
|
|
1456
|
+
max_columns,
|
|
1457
|
+
default_limit=MAX_SUMMARY_PREVIEW_COLUMN_LIMIT,
|
|
1458
|
+
hard_limit=MAX_SUMMARY_PREVIEW_COLUMN_LIMIT,
|
|
1459
|
+
)
|
|
1460
|
+
preview_fields = self._resolve_summary_preview_fields(select_columns, index, amount_field, time_field, max_columns=max_columns)
|
|
1461
|
+
match_rules = self._resolve_match_rules(context, filters, index)
|
|
1462
|
+
sort_rules = self._resolve_sorts(sorts, index)
|
|
1463
|
+
match_rules = self._append_time_range_filter(match_rules, time_range, time_field)
|
|
1464
|
+
include_negative = bool(stat_policy.get("include_negative", True))
|
|
1465
|
+
include_null = bool(stat_policy.get("include_null", False))
|
|
1466
|
+
scan_limit = min(max(requested_pages, 1), max(scan_max_pages, 1))
|
|
1467
|
+
current_page = max(page_num, 1)
|
|
1468
|
+
scanned_pages = 0
|
|
1469
|
+
scanned_records = 0
|
|
1470
|
+
result_amount: int | None = None
|
|
1471
|
+
has_more = False
|
|
1472
|
+
source_pages: list[int] = []
|
|
1473
|
+
preview_rows: list[JSONObject] = []
|
|
1474
|
+
total_amount = 0.0
|
|
1475
|
+
missing_count = 0
|
|
1476
|
+
by_day: dict[str, JSONObject] = {}
|
|
1477
|
+
while scanned_pages < scan_limit:
|
|
1478
|
+
page = self._search_page(
|
|
1479
|
+
context,
|
|
1480
|
+
app_key=app_key,
|
|
1481
|
+
page_num=current_page,
|
|
1482
|
+
page_size=page_size,
|
|
1483
|
+
query_key=query_key,
|
|
1484
|
+
match_rules=match_rules,
|
|
1485
|
+
sorts=sort_rules,
|
|
1486
|
+
search_que_ids=None,
|
|
1487
|
+
list_type=list_type,
|
|
1488
|
+
)
|
|
1489
|
+
scanned_pages += 1
|
|
1490
|
+
source_pages.append(current_page)
|
|
1491
|
+
page_rows = page.get("list")
|
|
1492
|
+
items = page_rows if isinstance(page_rows, list) else []
|
|
1493
|
+
if result_amount is None:
|
|
1494
|
+
result_amount = _effective_total(page, page_size)
|
|
1495
|
+
has_more = _page_has_more(page, current_page, page_size, len(items))
|
|
1496
|
+
for item in items:
|
|
1497
|
+
if not isinstance(item, dict):
|
|
1498
|
+
continue
|
|
1499
|
+
answers = item.get("answers")
|
|
1500
|
+
answer_list = answers if isinstance(answers, list) else []
|
|
1501
|
+
if not self._matches_view_selection(
|
|
1502
|
+
context,
|
|
1503
|
+
answer_list,
|
|
1504
|
+
view_selection=view_selection,
|
|
1505
|
+
dept_member_cache=dept_member_cache,
|
|
1506
|
+
):
|
|
1507
|
+
continue
|
|
1508
|
+
scanned_records += 1
|
|
1509
|
+
if len(preview_rows) < max_rows:
|
|
1510
|
+
apply_id = _coerce_count(item.get("applyId")) or _coerce_count(item.get("id"))
|
|
1511
|
+
preview_rows.append(_build_flat_row(answer_list, preview_fields, apply_id=apply_id))
|
|
1512
|
+
amount_value = _coerce_amount(_extract_field_value(answer_list, amount_field)) if amount_field is not None else None
|
|
1513
|
+
if amount_field is not None:
|
|
1514
|
+
if amount_value is None:
|
|
1515
|
+
if not include_null:
|
|
1516
|
+
missing_count += 1
|
|
1517
|
+
elif include_negative or amount_value >= 0:
|
|
1518
|
+
total_amount += amount_value
|
|
1519
|
+
day_key = _to_time_bucket(_extract_field_value(answer_list, time_field), "day") if time_field is not None else "all"
|
|
1520
|
+
bucket = by_day.get(day_key)
|
|
1521
|
+
if bucket is None:
|
|
1522
|
+
bucket = {"day": day_key, "count": 0, "amount_total": 0.0 if amount_field is not None else None}
|
|
1523
|
+
by_day[day_key] = bucket
|
|
1524
|
+
bucket["count"] = int(bucket["count"]) + 1
|
|
1525
|
+
if amount_field is not None and amount_value is not None and (include_negative or amount_value >= 0):
|
|
1526
|
+
bucket["amount_total"] = float(bucket.get("amount_total") or 0.0) + amount_value
|
|
1527
|
+
if not has_more:
|
|
1528
|
+
break
|
|
1529
|
+
current_page += 1
|
|
1530
|
+
raw_scan_complete = not has_more
|
|
1531
|
+
effective_result_amount = scanned_records if view_selection is not None else max(result_amount or 0, scanned_records)
|
|
1532
|
+
completeness = _build_completeness(
|
|
1533
|
+
result_amount=effective_result_amount,
|
|
1534
|
+
returned_items=len(preview_rows),
|
|
1535
|
+
fetched_pages=scanned_pages,
|
|
1536
|
+
requested_pages=scan_limit,
|
|
1537
|
+
has_more=has_more,
|
|
1538
|
+
next_page_token=None,
|
|
1539
|
+
is_complete=raw_scan_complete and len(preview_rows) < max_rows,
|
|
1540
|
+
omitted_items=max(0, effective_result_amount - len(preview_rows)),
|
|
1541
|
+
extra={
|
|
1542
|
+
"raw_scan_complete": raw_scan_complete,
|
|
1543
|
+
"scan_limit_hit": has_more,
|
|
1544
|
+
"scanned_pages": scanned_pages,
|
|
1545
|
+
"scan_limit": scan_limit,
|
|
1546
|
+
"output_page_complete": len(preview_rows) < max_rows,
|
|
1547
|
+
"raw_next_page_token": None,
|
|
1548
|
+
"output_next_page_token": None,
|
|
1549
|
+
"stop_reason": "source_exhausted" if raw_scan_complete else "scan_limit",
|
|
1550
|
+
},
|
|
1551
|
+
)
|
|
1552
|
+
evidence = {
|
|
1553
|
+
"query_id": _query_id(),
|
|
1554
|
+
"app_key": app_key,
|
|
1555
|
+
"filters": _echo_filters(match_rules),
|
|
1556
|
+
"selected_columns": [field.que_title for field in preview_fields],
|
|
1557
|
+
"time_range": time_range or None,
|
|
1558
|
+
"source_pages": source_pages,
|
|
1559
|
+
"view": _view_selection_payload(view_selection),
|
|
1560
|
+
}
|
|
1561
|
+
if strict_full and not raw_scan_complete:
|
|
1562
|
+
self._raise_need_more_data(completeness, evidence, "Summary is incomplete; increase requested_pages or scan_max_pages.")
|
|
1563
|
+
response: JSONObject = {
|
|
1564
|
+
"profile": profile,
|
|
1565
|
+
"ws_id": session_profile.selected_ws_id,
|
|
1566
|
+
"ok": True,
|
|
1567
|
+
"request_route": self._request_route_payload(context),
|
|
1568
|
+
"data": {
|
|
1569
|
+
"mode": "summary",
|
|
1570
|
+
"source_tool": "record_search",
|
|
1571
|
+
"view": _view_selection_payload(view_selection),
|
|
1572
|
+
"summary": {
|
|
1573
|
+
"summary": {
|
|
1574
|
+
"total_count": scanned_records,
|
|
1575
|
+
"total_amount": total_amount if amount_field is not None else None,
|
|
1576
|
+
"by_day": sorted(by_day.values(), key=lambda item: str(item.get("day"))),
|
|
1577
|
+
"missing_count": missing_count,
|
|
1578
|
+
},
|
|
1579
|
+
"rows": preview_rows,
|
|
1580
|
+
"completeness": completeness,
|
|
1581
|
+
"applied_limits": {
|
|
1582
|
+
"row_cap": max_rows,
|
|
1583
|
+
"column_cap": resolved_column_cap,
|
|
1584
|
+
"selected_columns": [field.que_title for field in preview_fields],
|
|
1585
|
+
},
|
|
1586
|
+
},
|
|
1587
|
+
},
|
|
1588
|
+
"output_profile": output_profile,
|
|
1589
|
+
"next_page_token": None,
|
|
1590
|
+
}
|
|
1591
|
+
if output_profile == "verbose":
|
|
1592
|
+
response["completeness"] = completeness
|
|
1593
|
+
response["evidence"] = evidence
|
|
1594
|
+
response["resolved_mappings"] = {
|
|
1595
|
+
"select_columns": [_field_mapping_entry("row", field, requested=field.que_title) for field in preview_fields],
|
|
1596
|
+
"amount_column": _field_mapping_entry("amount", amount_field, requested=amount_field.que_title) if amount_field is not None else None,
|
|
1597
|
+
"time_range": _field_mapping_entry("time", time_field, requested=time_field.que_title) if time_field is not None else None,
|
|
1598
|
+
}
|
|
1599
|
+
return response
|
|
1600
|
+
|
|
1601
|
+
return self._run_record_tool(profile, runner)
|
|
1602
|
+
|
|
1603
|
+
def _get_form_schema(self, profile: str, context, app_key: str, *, force_refresh: bool) -> JSONObject: # type: ignore[no-untyped-def]
|
|
1604
|
+
cache_key = (profile, app_key)
|
|
1605
|
+
if not force_refresh and cache_key in self._form_cache:
|
|
1606
|
+
return self._form_cache[cache_key]
|
|
1607
|
+
schema = self.backend.request("GET", context, f"/app/{app_key}/form", params={"type": 1})
|
|
1608
|
+
normalized = _normalize_form_schema(schema)
|
|
1609
|
+
self._form_cache[cache_key] = normalized
|
|
1610
|
+
return normalized
|
|
1611
|
+
|
|
1612
|
+
def _get_field_index(self, profile: str, context, app_key: str, *, force_refresh: bool) -> FieldIndex: # type: ignore[no-untyped-def]
|
|
1613
|
+
return _build_field_index(self._get_form_schema(profile, context, app_key, force_refresh=force_refresh))
|
|
1614
|
+
|
|
1615
|
+
def _get_view_list(self, profile: str, context, app_key: str) -> list[JSONObject]: # type: ignore[no-untyped-def]
|
|
1616
|
+
cache_key = (profile, app_key)
|
|
1617
|
+
if cache_key in self._view_list_cache:
|
|
1618
|
+
return self._view_list_cache[cache_key]
|
|
1619
|
+
payload = self.backend.request("GET", context, f"/app/{app_key}/view/viewList")
|
|
1620
|
+
normalized = _normalize_view_list(payload)
|
|
1621
|
+
self._view_list_cache[cache_key] = normalized
|
|
1622
|
+
return normalized
|
|
1623
|
+
|
|
1624
|
+
def _get_view_config(self, profile: str, context, view_key: str) -> JSONObject: # type: ignore[no-untyped-def]
|
|
1625
|
+
cache_key = (profile, view_key)
|
|
1626
|
+
if cache_key in self._view_config_cache:
|
|
1627
|
+
return self._view_config_cache[cache_key]
|
|
1628
|
+
payload = self.backend.request("GET", context, f"/view/{view_key}/viewConfig")
|
|
1629
|
+
normalized = payload if isinstance(payload, dict) else {}
|
|
1630
|
+
self._view_config_cache[cache_key] = normalized
|
|
1631
|
+
return normalized
|
|
1632
|
+
|
|
1633
|
+
def _resolve_view_selection(
|
|
1634
|
+
self,
|
|
1635
|
+
profile: str,
|
|
1636
|
+
context, # type: ignore[no-untyped-def]
|
|
1637
|
+
app_key: str,
|
|
1638
|
+
*,
|
|
1639
|
+
view_key: str | None,
|
|
1640
|
+
view_name: str | None,
|
|
1641
|
+
) -> ViewSelection | None:
|
|
1642
|
+
requested_key = _normalize_optional_text(view_key)
|
|
1643
|
+
requested_name = _normalize_optional_text(view_name)
|
|
1644
|
+
if requested_key is None and requested_name is None:
|
|
1645
|
+
return None
|
|
1646
|
+
views = self._get_view_list(profile, context, app_key)
|
|
1647
|
+
selected: JSONObject | None = None
|
|
1648
|
+
if requested_key is not None:
|
|
1649
|
+
selected = next((item for item in views if _normalize_optional_text(item.get("viewKey")) == requested_key), None)
|
|
1650
|
+
if selected is None and requested_name is not None:
|
|
1651
|
+
exact_matches = [item for item in views if _normalize_optional_text(item.get("viewName")) == requested_name]
|
|
1652
|
+
if len(exact_matches) > 1:
|
|
1653
|
+
raise_tool_error(QingflowApiError.config_error(f"view_name '{requested_name}' is ambiguous; pass view_key instead"))
|
|
1654
|
+
selected = exact_matches[0] if exact_matches else None
|
|
1655
|
+
resolved_view_key = requested_key or _normalize_optional_text(selected.get("viewKey") if isinstance(selected, dict) else None)
|
|
1656
|
+
if not resolved_view_key:
|
|
1657
|
+
raise_tool_error(QingflowApiError.config_error(f"cannot resolve view '{requested_name or requested_key}' for app {app_key}"))
|
|
1658
|
+
config = self._get_view_config(profile, context, resolved_view_key)
|
|
1659
|
+
resolved_view_name = (
|
|
1660
|
+
_normalize_optional_text(config.get("viewgraphName"))
|
|
1661
|
+
or _normalize_optional_text(selected.get("viewName") if isinstance(selected, dict) else None)
|
|
1662
|
+
or requested_name
|
|
1663
|
+
or resolved_view_key
|
|
1664
|
+
)
|
|
1665
|
+
return ViewSelection(
|
|
1666
|
+
view_key=resolved_view_key,
|
|
1667
|
+
view_name=resolved_view_name,
|
|
1668
|
+
conditions=_compile_view_conditions(config),
|
|
1669
|
+
)
|
|
1670
|
+
|
|
1671
|
+
def _get_department_member_ids(self, context, dept_id: int) -> set[int]: # type: ignore[no-untyped-def]
|
|
1672
|
+
page_num = 1
|
|
1673
|
+
page_size = 200
|
|
1674
|
+
member_ids: set[int] = set()
|
|
1675
|
+
while True:
|
|
1676
|
+
payload = self.backend.request(
|
|
1677
|
+
"GET",
|
|
1678
|
+
context,
|
|
1679
|
+
"/contact",
|
|
1680
|
+
params={"pageNum": page_num, "pageSize": page_size, "deptId": dept_id, "containDisable": False},
|
|
1681
|
+
)
|
|
1682
|
+
result = payload.get("result") if isinstance(payload, dict) else None
|
|
1683
|
+
rows = result if isinstance(result, list) else []
|
|
1684
|
+
for item in rows:
|
|
1685
|
+
if not isinstance(item, dict):
|
|
1686
|
+
continue
|
|
1687
|
+
member_id = _coerce_count(item.get("uid", item.get("id")))
|
|
1688
|
+
if member_id is not None:
|
|
1689
|
+
member_ids.add(member_id)
|
|
1690
|
+
page_amount = _coerce_count(payload.get("pageAmount")) if isinstance(payload, dict) else None
|
|
1691
|
+
if page_amount is not None:
|
|
1692
|
+
if page_num >= page_amount:
|
|
1693
|
+
break
|
|
1694
|
+
elif len(rows) < page_size:
|
|
1695
|
+
break
|
|
1696
|
+
page_num += 1
|
|
1697
|
+
return member_ids
|
|
1698
|
+
|
|
1699
|
+
def _matches_view_selection(
|
|
1700
|
+
self,
|
|
1701
|
+
context, # type: ignore[no-untyped-def]
|
|
1702
|
+
answer_list: list[JSONValue],
|
|
1703
|
+
*,
|
|
1704
|
+
view_selection: ViewSelection | None,
|
|
1705
|
+
dept_member_cache: dict[int, set[int]],
|
|
1706
|
+
) -> bool:
|
|
1707
|
+
if view_selection is None or not view_selection.conditions:
|
|
1708
|
+
return True
|
|
1709
|
+
for group in view_selection.conditions:
|
|
1710
|
+
if all(
|
|
1711
|
+
_match_view_condition(
|
|
1712
|
+
answer_list,
|
|
1713
|
+
condition,
|
|
1714
|
+
dept_member_cache=dept_member_cache,
|
|
1715
|
+
dept_member_resolver=lambda dept_id: self._get_department_member_ids(context, dept_id),
|
|
1716
|
+
)
|
|
1717
|
+
for condition in group
|
|
1718
|
+
):
|
|
1719
|
+
return True
|
|
1720
|
+
return False
|
|
1721
|
+
|
|
1722
|
+
def _search_page(
|
|
1723
|
+
self,
|
|
1724
|
+
context, # type: ignore[no-untyped-def]
|
|
1725
|
+
*,
|
|
1726
|
+
app_key: str,
|
|
1727
|
+
page_num: int,
|
|
1728
|
+
page_size: int,
|
|
1729
|
+
query_key: str | None,
|
|
1730
|
+
match_rules: list[JSONObject],
|
|
1731
|
+
sorts: list[JSONObject],
|
|
1732
|
+
search_que_ids: list[int] | None,
|
|
1733
|
+
list_type: int,
|
|
1734
|
+
) -> JSONObject:
|
|
1735
|
+
body: JSONObject = {"type": list_type, "pageNum": page_num, "pageSize": page_size}
|
|
1736
|
+
if query_key:
|
|
1737
|
+
body["queryKey"] = query_key
|
|
1738
|
+
if match_rules:
|
|
1739
|
+
queries = []
|
|
1740
|
+
normalized_match_rules = []
|
|
1741
|
+
for item in match_rules:
|
|
1742
|
+
if not isinstance(item, dict):
|
|
1743
|
+
continue
|
|
1744
|
+
queries.extend(_normalize_list_query_rules(item))
|
|
1745
|
+
normalized_match_rules.extend(_normalize_list_match_rules(item))
|
|
1746
|
+
if queries:
|
|
1747
|
+
body["queries"] = queries
|
|
1748
|
+
if normalized_match_rules:
|
|
1749
|
+
body["matchRules"] = normalized_match_rules
|
|
1750
|
+
if sorts:
|
|
1751
|
+
normalized_sorts = [rule for rule in (_normalize_list_sort_rule(item) for item in sorts if isinstance(item, dict)) if rule]
|
|
1752
|
+
if normalized_sorts:
|
|
1753
|
+
body["sorts"] = normalized_sorts
|
|
1754
|
+
if search_que_ids:
|
|
1755
|
+
body["searchQueIds"] = search_que_ids
|
|
1756
|
+
result = self.backend.request("POST", context, f"/app/{app_key}/apply/filter", json_body=body)
|
|
1757
|
+
return result if isinstance(result, dict) else {}
|
|
1758
|
+
|
|
1759
|
+
def _resolve_answers(
|
|
1760
|
+
self,
|
|
1761
|
+
profile: str,
|
|
1762
|
+
context, # type: ignore[no-untyped-def]
|
|
1763
|
+
app_key: str,
|
|
1764
|
+
*,
|
|
1765
|
+
answers: list[JSONObject],
|
|
1766
|
+
fields: JSONObject,
|
|
1767
|
+
force_refresh_form: bool,
|
|
1768
|
+
) -> list[JSONObject]:
|
|
1769
|
+
if not app_key:
|
|
1770
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
1771
|
+
if not answers and not fields:
|
|
1772
|
+
raise_tool_error(QingflowApiError.config_error("either answers or fields is required"))
|
|
1773
|
+
if not answers and fields:
|
|
1774
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=force_refresh_form)
|
|
1775
|
+
return [self._build_field_answer(profile, context, index, key, value) for key, value in fields.items() if value is not None]
|
|
1776
|
+
if answers and not _answers_need_resolution(answers) and not fields:
|
|
1777
|
+
return answers
|
|
1778
|
+
index = self._get_field_index(profile, context, app_key, force_refresh=force_refresh_form)
|
|
1779
|
+
normalized = [self._normalize_answer_item(profile, context, index, item) for item in answers]
|
|
1780
|
+
if fields:
|
|
1781
|
+
normalized.extend(self._build_field_answer(profile, context, index, key, value) for key, value in fields.items() if value is not None)
|
|
1782
|
+
return normalized
|
|
1783
|
+
|
|
1784
|
+
def _normalize_answer_item(self, profile: str, context, index: FieldIndex, item: JSONObject) -> JSONObject: # type: ignore[no-untyped-def]
|
|
1785
|
+
field = self._resolve_field_from_answer_item(item, index)
|
|
1786
|
+
if field.que_type in SUBTABLE_QUE_TYPES:
|
|
1787
|
+
table_values_input: JSONValue = item.get("tableValues")
|
|
1788
|
+
if table_values_input is None:
|
|
1789
|
+
table_values_input = item.get("rows", item.get("value", item.get("values")))
|
|
1790
|
+
normalized_rows = self._normalize_subtable_rows(profile, context, field, table_values_input, location=field.que_title)
|
|
1791
|
+
payload: JSONObject = {
|
|
1792
|
+
"queId": field.que_id,
|
|
1793
|
+
"queType": field.que_type or 18,
|
|
1794
|
+
"values": [],
|
|
1795
|
+
"tableValues": normalized_rows,
|
|
1796
|
+
}
|
|
1797
|
+
previous_row_ordinals = item.get("previousTableRowOrdinalList")
|
|
1798
|
+
if isinstance(previous_row_ordinals, list):
|
|
1799
|
+
payload["previousTableRowOrdinalList"] = previous_row_ordinals
|
|
1800
|
+
return payload
|
|
1801
|
+
self._raise_if_verify_unsupported_write_field(field, item, location=field.que_title)
|
|
1802
|
+
if "values" in item and isinstance(item["values"], list):
|
|
1803
|
+
values = item["values"]
|
|
1804
|
+
elif "value" in item:
|
|
1805
|
+
values = [item["value"]]
|
|
1806
|
+
else:
|
|
1807
|
+
raise RecordInputError(
|
|
1808
|
+
message=f"answer for field '{field.que_title}' requires value or values",
|
|
1809
|
+
error_code="MISSING_VALUE",
|
|
1810
|
+
fix_hint="Pass value for scalar fields, or values for multi-value fields.",
|
|
1811
|
+
details={"location": field.que_title, "field": _field_ref_payload(field), "expected_format": _write_format_for_field(field)},
|
|
1812
|
+
)
|
|
1813
|
+
return {
|
|
1814
|
+
"queId": field.que_id,
|
|
1815
|
+
"queType": field.que_type or 2,
|
|
1816
|
+
"values": self._normalize_field_values(profile, context, field, values),
|
|
1817
|
+
"tableValues": item.get("tableValues") if isinstance(item.get("tableValues"), list) else [],
|
|
1818
|
+
}
|
|
1819
|
+
|
|
1820
|
+
def _build_field_answer(self, profile: str, context, index: FieldIndex, field_selector: str, raw_value: JSONValue) -> JSONObject: # type: ignore[no-untyped-def]
|
|
1821
|
+
field = self._resolve_field_selector(field_selector, index, location="fields")
|
|
1822
|
+
if field.que_type in SUBTABLE_QUE_TYPES:
|
|
1823
|
+
return {
|
|
1824
|
+
"queId": field.que_id,
|
|
1825
|
+
"queType": field.que_type or 18,
|
|
1826
|
+
"values": [],
|
|
1827
|
+
"tableValues": self._normalize_subtable_rows(profile, context, field, raw_value, location=str(field_selector)),
|
|
1828
|
+
}
|
|
1829
|
+
self._raise_if_verify_unsupported_write_field(field, raw_value, location=str(field_selector))
|
|
1830
|
+
values = raw_value if isinstance(raw_value, list) and field.que_type in MULTI_SELECT_QUE_TYPES else [raw_value]
|
|
1831
|
+
return {
|
|
1832
|
+
"queId": field.que_id,
|
|
1833
|
+
"queType": field.que_type or 2,
|
|
1834
|
+
"values": self._normalize_field_values(profile, context, field, values),
|
|
1835
|
+
"tableValues": [],
|
|
1836
|
+
}
|
|
1837
|
+
|
|
1838
|
+
def _raise_if_verify_unsupported_write_field(self, field: FormField, raw_value: JSONValue, *, location: str) -> None:
|
|
1839
|
+
if field.que_type not in VERIFY_UNSUPPORTED_WRITE_QUE_TYPES:
|
|
1840
|
+
return
|
|
1841
|
+
raise RecordInputError(
|
|
1842
|
+
message=f"field '{field.que_title}' uses unsupported direct writes",
|
|
1843
|
+
error_code="UNSUPPORTED_WRITE_FORMAT",
|
|
1844
|
+
fix_hint=_unsupported_write_fix_hint(field.que_type),
|
|
1845
|
+
details={
|
|
1846
|
+
"location": location,
|
|
1847
|
+
"field": _field_ref_payload(field),
|
|
1848
|
+
"expected_format": _write_format_for_field(field),
|
|
1849
|
+
"received_value": raw_value,
|
|
1850
|
+
},
|
|
1851
|
+
)
|
|
1852
|
+
|
|
1853
|
+
def _normalize_field_values(
|
|
1854
|
+
self,
|
|
1855
|
+
profile: str,
|
|
1856
|
+
context, # type: ignore[no-untyped-def]
|
|
1857
|
+
field: FormField,
|
|
1858
|
+
raw_values: list[JSONValue],
|
|
1859
|
+
) -> list[JSONObject]:
|
|
1860
|
+
if field.que_type in SINGLE_SELECT_QUE_TYPES:
|
|
1861
|
+
return [_option_value(raw_values[0], field)]
|
|
1862
|
+
if field.que_type in MULTI_SELECT_QUE_TYPES:
|
|
1863
|
+
return [_option_value(value, field) for value in raw_values]
|
|
1864
|
+
if field.que_type in BOOLEAN_QUE_TYPES:
|
|
1865
|
+
return [{"value": _boolean_display(raw_values[0])}]
|
|
1866
|
+
if field.que_type in MEMBER_QUE_TYPES:
|
|
1867
|
+
return [_member_value(profile, value) for value in _expand_values(raw_values)]
|
|
1868
|
+
if field.que_type in DEPARTMENT_QUE_TYPES:
|
|
1869
|
+
return [self._department_value_from_selector(context, value) for value in _expand_values(raw_values)]
|
|
1870
|
+
if field.que_type in ATTACHMENT_QUE_TYPES:
|
|
1871
|
+
return [_attachment_value(value) for value in _expand_values(raw_values)]
|
|
1872
|
+
if field.que_type in RELATION_QUE_TYPES:
|
|
1873
|
+
return [_relation_value(value) for value in _expand_values(raw_values)]
|
|
1874
|
+
return [{"value": _stringify_json(raw_values[0])}]
|
|
1875
|
+
|
|
1876
|
+
def _normalize_subtable_rows(
|
|
1877
|
+
self,
|
|
1878
|
+
profile: str,
|
|
1879
|
+
context, # type: ignore[no-untyped-def]
|
|
1880
|
+
table_field: FormField,
|
|
1881
|
+
raw_rows: JSONValue,
|
|
1882
|
+
*,
|
|
1883
|
+
location: str,
|
|
1884
|
+
) -> list[list[JSONObject]]:
|
|
1885
|
+
row_values = raw_rows
|
|
1886
|
+
if isinstance(raw_rows, dict):
|
|
1887
|
+
if "rows" in raw_rows:
|
|
1888
|
+
row_values = raw_rows.get("rows")
|
|
1889
|
+
elif "tableValues" in raw_rows:
|
|
1890
|
+
row_values = raw_rows.get("tableValues")
|
|
1891
|
+
if row_values is None:
|
|
1892
|
+
return []
|
|
1893
|
+
if not isinstance(row_values, list):
|
|
1894
|
+
raise RecordInputError(
|
|
1895
|
+
message=f"field '{table_field.que_title}' requires subtable rows",
|
|
1896
|
+
error_code="INVALID_SUBTABLE_VALUE",
|
|
1897
|
+
fix_hint="Pass subtables as a list of row objects or a native tableValues array.",
|
|
1898
|
+
details={"location": location, "field": _field_ref_payload(table_field), "expected_format": _write_format_for_field(table_field), "received_value": raw_rows},
|
|
1899
|
+
)
|
|
1900
|
+
subtable_index = self._subtable_field_index(table_field)
|
|
1901
|
+
normalized_rows: list[list[JSONObject]] = []
|
|
1902
|
+
for row_ordinal, row in enumerate(row_values):
|
|
1903
|
+
normalized_rows.append(
|
|
1904
|
+
self._normalize_subtable_row(
|
|
1905
|
+
profile,
|
|
1906
|
+
context,
|
|
1907
|
+
table_field,
|
|
1908
|
+
subtable_index,
|
|
1909
|
+
row,
|
|
1910
|
+
row_ordinal=row_ordinal,
|
|
1911
|
+
location=location,
|
|
1912
|
+
)
|
|
1913
|
+
)
|
|
1914
|
+
return normalized_rows
|
|
1915
|
+
|
|
1916
|
+
def _normalize_subtable_row(
|
|
1917
|
+
self,
|
|
1918
|
+
profile: str,
|
|
1919
|
+
context, # type: ignore[no-untyped-def]
|
|
1920
|
+
table_field: FormField,
|
|
1921
|
+
subtable_index: FieldIndex,
|
|
1922
|
+
row: JSONValue,
|
|
1923
|
+
*,
|
|
1924
|
+
row_ordinal: int,
|
|
1925
|
+
location: str,
|
|
1926
|
+
) -> list[JSONObject]:
|
|
1927
|
+
row_id: int | None = None
|
|
1928
|
+
normalized_cells: list[JSONObject] = []
|
|
1929
|
+
if isinstance(row, dict):
|
|
1930
|
+
row_id = _coerce_count(row.get("__row_id__", row.get("row_id", row.get("rowId"))))
|
|
1931
|
+
if isinstance(row.get("answers"), list):
|
|
1932
|
+
normalized_cells = [
|
|
1933
|
+
self._normalize_subtable_answer_item(profile, context, subtable_index, item, location=f"{location}[{row_ordinal}]")
|
|
1934
|
+
for item in row["answers"]
|
|
1935
|
+
if isinstance(item, dict)
|
|
1936
|
+
]
|
|
1937
|
+
elif isinstance(row.get("fields"), dict):
|
|
1938
|
+
normalized_cells = [
|
|
1939
|
+
self._build_subtable_field_answer(profile, context, subtable_index, key, value, location=f"{location}[{row_ordinal}]")
|
|
1940
|
+
for key, value in row["fields"].items()
|
|
1941
|
+
if value is not None
|
|
1942
|
+
]
|
|
1943
|
+
else:
|
|
1944
|
+
row_fields = {
|
|
1945
|
+
key: value
|
|
1946
|
+
for key, value in row.items()
|
|
1947
|
+
if key not in {"__row_id__", "row_id", "rowId", "answers", "fields", "rows", "tableValues"}
|
|
1948
|
+
}
|
|
1949
|
+
normalized_cells = [
|
|
1950
|
+
self._build_subtable_field_answer(profile, context, subtable_index, key, value, location=f"{location}[{row_ordinal}]")
|
|
1951
|
+
for key, value in row_fields.items()
|
|
1952
|
+
if value is not None
|
|
1953
|
+
]
|
|
1954
|
+
elif isinstance(row, list):
|
|
1955
|
+
normalized_cells = [
|
|
1956
|
+
self._normalize_subtable_answer_item(profile, context, subtable_index, item, location=f"{location}[{row_ordinal}]")
|
|
1957
|
+
for item in row
|
|
1958
|
+
if isinstance(item, dict)
|
|
1959
|
+
]
|
|
1960
|
+
else:
|
|
1961
|
+
raise RecordInputError(
|
|
1962
|
+
message=f"field '{table_field.que_title}' row {row_ordinal + 1} has unsupported shape",
|
|
1963
|
+
error_code="INVALID_SUBTABLE_ROW",
|
|
1964
|
+
fix_hint="Pass each subtable row as an object keyed by subfield title, or a list of native answer objects.",
|
|
1965
|
+
details={"location": location, "field": _field_ref_payload(table_field), "row_ordinal": row_ordinal, "received_value": row},
|
|
1966
|
+
)
|
|
1967
|
+
if not normalized_cells:
|
|
1968
|
+
raise RecordInputError(
|
|
1969
|
+
message=f"field '{table_field.que_title}' row {row_ordinal + 1} is empty",
|
|
1970
|
+
error_code="EMPTY_SUBTABLE_ROW",
|
|
1971
|
+
fix_hint="Provide at least one subfield value in each subtable row, or omit the row entirely.",
|
|
1972
|
+
details={"location": location, "field": _field_ref_payload(table_field), "row_ordinal": row_ordinal, "received_value": row},
|
|
1973
|
+
)
|
|
1974
|
+
if row_id is not None:
|
|
1975
|
+
for cell in normalized_cells:
|
|
1976
|
+
cell.setdefault("rowId", row_id)
|
|
1977
|
+
return normalized_cells
|
|
1978
|
+
|
|
1979
|
+
def _normalize_subtable_answer_item(
|
|
1980
|
+
self,
|
|
1981
|
+
profile: str,
|
|
1982
|
+
context, # type: ignore[no-untyped-def]
|
|
1983
|
+
subtable_index: FieldIndex,
|
|
1984
|
+
item: JSONObject,
|
|
1985
|
+
*,
|
|
1986
|
+
location: str,
|
|
1987
|
+
) -> JSONObject:
|
|
1988
|
+
field = self._resolve_field_from_answer_item(item, subtable_index)
|
|
1989
|
+
if field.que_type in SUBTABLE_QUE_TYPES:
|
|
1990
|
+
raise RecordInputError(
|
|
1991
|
+
message=f"field '{field.que_title}' uses unsupported nested subtable writes",
|
|
1992
|
+
error_code="UNSUPPORTED_WRITE_FORMAT",
|
|
1993
|
+
fix_hint="Nested subtable writes are not supported in app-user tools.",
|
|
1994
|
+
details={"location": location, "field": _field_ref_payload(field), "expected_format": _write_format_for_field(field), "received_value": item},
|
|
1995
|
+
)
|
|
1996
|
+
self._raise_if_verify_unsupported_write_field(field, item, location=location)
|
|
1997
|
+
if "values" in item and isinstance(item["values"], list):
|
|
1998
|
+
values = item["values"]
|
|
1999
|
+
elif "value" in item:
|
|
2000
|
+
values = [item["value"]]
|
|
2001
|
+
else:
|
|
2002
|
+
raise RecordInputError(
|
|
2003
|
+
message=f"subtable field '{field.que_title}' requires value or values",
|
|
2004
|
+
error_code="MISSING_VALUE",
|
|
2005
|
+
fix_hint="Pass value for scalar subtable fields, or values for multi-value subtable fields.",
|
|
2006
|
+
details={"location": location, "field": _field_ref_payload(field), "expected_format": _write_format_for_field(field)},
|
|
2007
|
+
)
|
|
2008
|
+
payload: JSONObject = {
|
|
2009
|
+
"queId": field.que_id,
|
|
2010
|
+
"queType": field.que_type or 2,
|
|
2011
|
+
"values": self._normalize_field_values(profile, context, field, values),
|
|
2012
|
+
"tableValues": [],
|
|
2013
|
+
}
|
|
2014
|
+
row_id = _coerce_count(item.get("rowId", item.get("row_id")))
|
|
2015
|
+
if row_id is not None:
|
|
2016
|
+
payload["rowId"] = row_id
|
|
2017
|
+
return payload
|
|
2018
|
+
|
|
2019
|
+
def _build_subtable_field_answer(
|
|
2020
|
+
self,
|
|
2021
|
+
profile: str,
|
|
2022
|
+
context, # type: ignore[no-untyped-def]
|
|
2023
|
+
subtable_index: FieldIndex,
|
|
2024
|
+
field_selector: str,
|
|
2025
|
+
raw_value: JSONValue,
|
|
2026
|
+
*,
|
|
2027
|
+
location: str,
|
|
2028
|
+
) -> JSONObject:
|
|
2029
|
+
field = self._resolve_field_selector(field_selector, subtable_index, location=location)
|
|
2030
|
+
if field.que_type in SUBTABLE_QUE_TYPES:
|
|
2031
|
+
raise RecordInputError(
|
|
2032
|
+
message=f"field '{field.que_title}' uses unsupported nested subtable writes",
|
|
2033
|
+
error_code="UNSUPPORTED_WRITE_FORMAT",
|
|
2034
|
+
fix_hint="Nested subtable writes are not supported in app-user tools.",
|
|
2035
|
+
details={"location": location, "field": _field_ref_payload(field), "expected_format": _write_format_for_field(field), "received_value": raw_value},
|
|
2036
|
+
)
|
|
2037
|
+
self._raise_if_verify_unsupported_write_field(field, raw_value, location=location)
|
|
2038
|
+
values = raw_value if isinstance(raw_value, list) and field.que_type in MULTI_SELECT_QUE_TYPES else [raw_value]
|
|
2039
|
+
return {
|
|
2040
|
+
"queId": field.que_id,
|
|
2041
|
+
"queType": field.que_type or 2,
|
|
2042
|
+
"values": self._normalize_field_values(profile, context, field, values),
|
|
2043
|
+
"tableValues": [],
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
def _subtable_field_index(self, table_field: FormField) -> FieldIndex:
|
|
2047
|
+
raw = table_field.raw if isinstance(table_field.raw, dict) else {}
|
|
2048
|
+
schema: JSONObject = {}
|
|
2049
|
+
if isinstance(raw.get("subQuestions"), list):
|
|
2050
|
+
schema["formQues"] = [raw["subQuestions"]]
|
|
2051
|
+
elif isinstance(raw.get("innerQuestions"), list):
|
|
2052
|
+
schema["formQues"] = raw["innerQuestions"]
|
|
2053
|
+
index = _build_field_index(schema)
|
|
2054
|
+
if not index.by_id:
|
|
2055
|
+
raise RecordInputError(
|
|
2056
|
+
message=f"field '{table_field.que_title}' does not expose subtable column metadata",
|
|
2057
|
+
error_code="SUBTABLE_SCHEMA_UNAVAILABLE",
|
|
2058
|
+
fix_hint="Refresh the form schema and ensure the subtable columns are visible before writing.",
|
|
2059
|
+
details={"field": _field_ref_payload(table_field), "raw": raw},
|
|
2060
|
+
)
|
|
2061
|
+
return index
|
|
2062
|
+
|
|
2063
|
+
def _subtable_field_index_optional(self, table_field: FormField | None) -> FieldIndex | None:
|
|
2064
|
+
if table_field is None:
|
|
2065
|
+
return None
|
|
2066
|
+
try:
|
|
2067
|
+
return self._subtable_field_index(table_field)
|
|
2068
|
+
except RecordInputError:
|
|
2069
|
+
return None
|
|
2070
|
+
|
|
2071
|
+
def _run_record_tool(self, profile: str, func, *, require_workspace: bool = True): # type: ignore[no-untyped-def]
|
|
2072
|
+
try:
|
|
2073
|
+
return self._run(profile, func, require_workspace=require_workspace)
|
|
2074
|
+
except RecordInputError as error:
|
|
2075
|
+
raise_tool_error(
|
|
2076
|
+
QingflowApiError(
|
|
2077
|
+
category="config",
|
|
2078
|
+
message=error.message,
|
|
2079
|
+
backend_code=error.error_code,
|
|
2080
|
+
details={
|
|
2081
|
+
"error_code": error.error_code,
|
|
2082
|
+
"fix_hint": error.fix_hint,
|
|
2083
|
+
"record_input": error.to_dict(),
|
|
2084
|
+
},
|
|
2085
|
+
)
|
|
2086
|
+
)
|
|
2087
|
+
|
|
2088
|
+
def _request_route_payload(self, context) -> JSONObject: # type: ignore[no-untyped-def]
|
|
2089
|
+
describe_route = getattr(self.backend, "describe_route", None)
|
|
2090
|
+
if callable(describe_route):
|
|
2091
|
+
payload = describe_route(context)
|
|
2092
|
+
if isinstance(payload, dict):
|
|
2093
|
+
return payload
|
|
2094
|
+
return {
|
|
2095
|
+
"base_url": getattr(context, "base_url", None),
|
|
2096
|
+
"qf_version": getattr(context, "qf_version", None),
|
|
2097
|
+
"qf_version_source": getattr(context, "qf_version_source", None) or ("context" if getattr(context, "qf_version", None) else "unknown"),
|
|
2098
|
+
}
|
|
2099
|
+
|
|
2100
|
+
def _resolve_field_selector(self, selector: str | int | None, index: FieldIndex, *, location: str) -> FormField:
|
|
2101
|
+
if selector is None:
|
|
2102
|
+
raise RecordInputError(
|
|
2103
|
+
message=f"{location} requires a field selector",
|
|
2104
|
+
error_code="MISSING_FIELD_SELECTOR",
|
|
2105
|
+
fix_hint="Pass an exact field title or queId.",
|
|
2106
|
+
)
|
|
2107
|
+
requested = str(selector).strip()
|
|
2108
|
+
requested_key = _normalize_field_lookup_key(requested)
|
|
2109
|
+
if not requested:
|
|
2110
|
+
raise RecordInputError(
|
|
2111
|
+
message=f"{location} contains an empty field selector",
|
|
2112
|
+
error_code="EMPTY_FIELD_SELECTOR",
|
|
2113
|
+
fix_hint="Pass an exact field title or queId.",
|
|
2114
|
+
)
|
|
2115
|
+
if requested.isdigit():
|
|
2116
|
+
field = index.by_id.get(str(int(requested)))
|
|
2117
|
+
if field is not None:
|
|
2118
|
+
return field
|
|
2119
|
+
raise RecordInputError(
|
|
2120
|
+
message=f"{location} references unknown queId '{requested}'",
|
|
2121
|
+
error_code="FIELD_NOT_FOUND",
|
|
2122
|
+
fix_hint="Use record_field_resolve to confirm the exact field id.",
|
|
2123
|
+
details={"location": location, "requested": requested, "requested_key": requested_key},
|
|
2124
|
+
)
|
|
2125
|
+
matches = index.by_title.get(requested_key, [])
|
|
2126
|
+
if len(matches) == 1:
|
|
2127
|
+
return matches[0]
|
|
2128
|
+
if len(matches) > 1:
|
|
2129
|
+
raise RecordInputError(
|
|
2130
|
+
message=f"{location} field '{requested}' is ambiguous",
|
|
2131
|
+
error_code="AMBIGUOUS_FIELD",
|
|
2132
|
+
fix_hint="Use numeric queId, or resolve the field first with record_field_resolve.",
|
|
2133
|
+
details={
|
|
2134
|
+
"location": location,
|
|
2135
|
+
"requested": requested,
|
|
2136
|
+
"requested_key": requested_key,
|
|
2137
|
+
"matched_via": "title",
|
|
2138
|
+
"candidates": [_field_ref_payload(item) for item in matches],
|
|
2139
|
+
},
|
|
2140
|
+
)
|
|
2141
|
+
alias_matches = index.by_alias.get(requested_key, [])
|
|
2142
|
+
if len(alias_matches) == 1:
|
|
2143
|
+
return alias_matches[0]
|
|
2144
|
+
if len(alias_matches) > 1:
|
|
2145
|
+
raise RecordInputError(
|
|
2146
|
+
message=f"{location} field '{requested}' is ambiguous",
|
|
2147
|
+
error_code="AMBIGUOUS_FIELD",
|
|
2148
|
+
fix_hint="Use a more specific field title, or run record_field_resolve to inspect the alias candidates.",
|
|
2149
|
+
details={
|
|
2150
|
+
"location": location,
|
|
2151
|
+
"requested": requested,
|
|
2152
|
+
"requested_key": requested_key,
|
|
2153
|
+
"matched_via": "alias",
|
|
2154
|
+
"candidates": [_field_ref_payload(item) for item in alias_matches],
|
|
2155
|
+
},
|
|
2156
|
+
)
|
|
2157
|
+
raise RecordInputError(
|
|
2158
|
+
message=f"{location} cannot resolve field '{requested}'",
|
|
2159
|
+
error_code="FIELD_NOT_FOUND",
|
|
2160
|
+
fix_hint="Use record_field_resolve to confirm the exact field title.",
|
|
2161
|
+
details={
|
|
2162
|
+
"location": location,
|
|
2163
|
+
"requested": requested,
|
|
2164
|
+
"requested_key": requested_key,
|
|
2165
|
+
"suggestions": self._score_field_matches(requested, index, fuzzy=True, top_k=5),
|
|
2166
|
+
},
|
|
2167
|
+
)
|
|
2168
|
+
|
|
2169
|
+
def _resolve_field_from_answer_item(self, item: JSONObject, index: FieldIndex) -> FormField:
|
|
2170
|
+
for key in ("queId", "que_id", "queTitle", "que_title"):
|
|
2171
|
+
if key in item:
|
|
2172
|
+
return self._resolve_field_selector(cast(str | int, item[key]), index, location="answers")
|
|
2173
|
+
raise RecordInputError(
|
|
2174
|
+
message="answer item requires queId/que_id or queTitle/que_title",
|
|
2175
|
+
error_code="MISSING_FIELD_SELECTOR",
|
|
2176
|
+
fix_hint="Provide a field selector in each answer item.",
|
|
2177
|
+
)
|
|
2178
|
+
|
|
2179
|
+
def _resolve_select_columns(
|
|
2180
|
+
self,
|
|
2181
|
+
selectors: list[str | int],
|
|
2182
|
+
index: FieldIndex,
|
|
2183
|
+
*,
|
|
2184
|
+
max_columns: int | None,
|
|
2185
|
+
default_limit: int,
|
|
2186
|
+
) -> list[FormField]:
|
|
2187
|
+
if not selectors:
|
|
2188
|
+
raise_tool_error(QingflowApiError.config_error("select_columns is required"))
|
|
2189
|
+
limit = _bounded_column_limit(max_columns, default_limit=default_limit, hard_limit=default_limit)
|
|
2190
|
+
fields: list[FormField] = []
|
|
2191
|
+
seen: set[int] = set()
|
|
2192
|
+
for selector in selectors:
|
|
2193
|
+
field = self._resolve_field_selector(selector, index, location="select_columns")
|
|
2194
|
+
if field.que_id in seen:
|
|
2195
|
+
continue
|
|
2196
|
+
fields.append(field)
|
|
2197
|
+
seen.add(field.que_id)
|
|
2198
|
+
if len(fields) >= limit:
|
|
2199
|
+
break
|
|
2200
|
+
return fields
|
|
2201
|
+
|
|
2202
|
+
def _resolve_summary_preview_fields(
|
|
2203
|
+
self,
|
|
2204
|
+
selectors: list[str | int],
|
|
2205
|
+
index: FieldIndex,
|
|
2206
|
+
amount_field: FormField | None,
|
|
2207
|
+
time_field: FormField | None,
|
|
2208
|
+
*,
|
|
2209
|
+
max_columns: int | None,
|
|
2210
|
+
) -> list[FormField]:
|
|
2211
|
+
if selectors:
|
|
2212
|
+
return self._resolve_select_columns(
|
|
2213
|
+
selectors,
|
|
2214
|
+
index,
|
|
2215
|
+
max_columns=max_columns,
|
|
2216
|
+
default_limit=MAX_SUMMARY_PREVIEW_COLUMN_LIMIT,
|
|
2217
|
+
)
|
|
2218
|
+
limit = _bounded_column_limit(
|
|
2219
|
+
max_columns,
|
|
2220
|
+
default_limit=MAX_SUMMARY_PREVIEW_COLUMN_LIMIT,
|
|
2221
|
+
hard_limit=MAX_SUMMARY_PREVIEW_COLUMN_LIMIT,
|
|
2222
|
+
)
|
|
2223
|
+
candidates: list[FormField] = []
|
|
2224
|
+
title_candidate = _pick_title_field(index)
|
|
2225
|
+
for field in (title_candidate, amount_field, time_field):
|
|
2226
|
+
if field is None or any(existing.que_id == field.que_id for existing in candidates):
|
|
2227
|
+
continue
|
|
2228
|
+
candidates.append(field)
|
|
2229
|
+
if not candidates:
|
|
2230
|
+
candidates = list(index.by_id.values())[:limit]
|
|
2231
|
+
return candidates[:limit]
|
|
2232
|
+
|
|
2233
|
+
def _resolve_match_rules(self, context, filters: list[JSONObject], index: FieldIndex) -> list[JSONObject]: # type: ignore[no-untyped-def]
|
|
2234
|
+
rules: list[JSONObject] = []
|
|
2235
|
+
for item in filters:
|
|
2236
|
+
if not isinstance(item, dict):
|
|
2237
|
+
continue
|
|
2238
|
+
selector = _extract_filter_selector(item)
|
|
2239
|
+
field = self._resolve_field_selector(cast(str | int | None, selector), index, location="filters") if selector is not None else None
|
|
2240
|
+
if field is not None and field.que_type in DEPARTMENT_QUE_TYPES:
|
|
2241
|
+
department_rule = self._build_department_filter_rule(context, field, item)
|
|
2242
|
+
if department_rule:
|
|
2243
|
+
rules.append(department_rule)
|
|
2244
|
+
continue
|
|
2245
|
+
rule: JSONObject = {}
|
|
2246
|
+
if field is not None:
|
|
2247
|
+
rule["queId"] = field.que_id
|
|
2248
|
+
if field.que_type is not None:
|
|
2249
|
+
rule["queType"] = field.que_type
|
|
2250
|
+
for source, target in (
|
|
2251
|
+
("search_key", "searchKey"),
|
|
2252
|
+
("searchKey", "searchKey"),
|
|
2253
|
+
("search_keys", "searchKeys"),
|
|
2254
|
+
("searchKeys", "searchKeys"),
|
|
2255
|
+
("min_value", "minValue"),
|
|
2256
|
+
("minValue", "minValue"),
|
|
2257
|
+
("max_value", "maxValue"),
|
|
2258
|
+
("maxValue", "maxValue"),
|
|
2259
|
+
("scope", "scope"),
|
|
2260
|
+
("search_options", "searchOptions"),
|
|
2261
|
+
("searchOptions", "searchOptions"),
|
|
2262
|
+
("search_user_ids", "searchUserIds"),
|
|
2263
|
+
("searchUserIds", "searchUserIds"),
|
|
2264
|
+
):
|
|
2265
|
+
if source in item:
|
|
2266
|
+
rule[target] = item[source]
|
|
2267
|
+
operator = _stringify_json(item.get("operator", item.get("op"))).strip().lower()
|
|
2268
|
+
if "searchKey" not in rule and "searchKeys" not in rule and "searchOptions" not in rule and "searchUserIds" not in rule and "minValue" not in rule and "maxValue" not in rule:
|
|
2269
|
+
value = item.get("value", item.get("values"))
|
|
2270
|
+
if operator in {"gte", "gt"} and value is not None:
|
|
2271
|
+
rule["minValue"] = _stringify_json(value)
|
|
2272
|
+
elif operator in {"lte", "lt"} and value is not None:
|
|
2273
|
+
rule["maxValue"] = _stringify_json(value)
|
|
2274
|
+
elif operator == "between":
|
|
2275
|
+
lower, upper = _coerce_filter_range(value)
|
|
2276
|
+
if lower is not None:
|
|
2277
|
+
rule["minValue"] = lower
|
|
2278
|
+
if upper is not None:
|
|
2279
|
+
rule["maxValue"] = upper
|
|
2280
|
+
elif value is not None:
|
|
2281
|
+
if field is not None and field.que_type in SINGLE_SELECT_QUE_TYPES | MULTI_SELECT_QUE_TYPES:
|
|
2282
|
+
option_ids = _normalize_option_filter_ids(value)
|
|
2283
|
+
if option_ids:
|
|
2284
|
+
rule["searchOptions"] = option_ids
|
|
2285
|
+
elif isinstance(value, list):
|
|
2286
|
+
rule["searchKeys"] = [_stringify_json(entry) for entry in value]
|
|
2287
|
+
else:
|
|
2288
|
+
rule["searchKey"] = _stringify_json(value)
|
|
2289
|
+
elif field is not None and field.que_type in MEMBER_QUE_TYPES:
|
|
2290
|
+
member_ids = _normalize_member_filter_ids(value)
|
|
2291
|
+
if member_ids:
|
|
2292
|
+
rule["searchUids"] = member_ids
|
|
2293
|
+
elif isinstance(value, list):
|
|
2294
|
+
rule["searchKeys"] = [_stringify_json(entry) for entry in value]
|
|
2295
|
+
else:
|
|
2296
|
+
rule["searchKey"] = _stringify_json(value)
|
|
2297
|
+
if rule:
|
|
2298
|
+
rules.append(rule)
|
|
2299
|
+
return rules
|
|
2300
|
+
|
|
2301
|
+
def _build_department_filter_rule(self, context, field: FormField, item: JSONObject) -> JSONObject | None: # type: ignore[no-untyped-def]
|
|
2302
|
+
raw_value: JSONValue | None = None
|
|
2303
|
+
for key in ("search_options", "searchOptions", "search_keys", "searchKeys", "search_key", "searchKey", "value", "values"):
|
|
2304
|
+
if key in item:
|
|
2305
|
+
raw_value = cast(JSONValue, item[key])
|
|
2306
|
+
break
|
|
2307
|
+
if raw_value is None:
|
|
2308
|
+
return None
|
|
2309
|
+
details = self._resolve_department_filter_details(context, raw_value)
|
|
2310
|
+
if not details:
|
|
2311
|
+
return None
|
|
2312
|
+
judge_type = JUDGE_EQUAL if len(details) == 1 else JUDGE_EQUAL_ANY
|
|
2313
|
+
return _department_filter_rule(field.que_id, field.que_type, details, judge_type=judge_type)
|
|
2314
|
+
|
|
2315
|
+
def _resolve_department_filter_details(self, context, raw_value: JSONValue) -> list[JSONObject]: # type: ignore[no-untyped-def]
|
|
2316
|
+
values = raw_value if isinstance(raw_value, list) else [raw_value]
|
|
2317
|
+
details: list[JSONObject] = []
|
|
2318
|
+
seen: set[int] = set()
|
|
2319
|
+
for item in _expand_values(values):
|
|
2320
|
+
detail = self._resolve_department_filter_detail(context, item)
|
|
2321
|
+
dept_id = _coerce_count(detail.get("id"))
|
|
2322
|
+
if dept_id is None or dept_id in seen:
|
|
2323
|
+
continue
|
|
2324
|
+
seen.add(dept_id)
|
|
2325
|
+
details.append(detail)
|
|
2326
|
+
return details
|
|
2327
|
+
|
|
2328
|
+
def _resolve_department_filter_detail(self, context, raw_value: JSONValue) -> JSONObject: # type: ignore[no-untyped-def]
|
|
2329
|
+
if isinstance(raw_value, dict):
|
|
2330
|
+
dept_id = _coerce_count(raw_value.get("id", raw_value.get("deptId")))
|
|
2331
|
+
if dept_id is not None:
|
|
2332
|
+
return {
|
|
2333
|
+
"id": dept_id,
|
|
2334
|
+
"value": _stringify_json(raw_value.get("value", raw_value.get("name", raw_value.get("deptName", dept_id)))),
|
|
2335
|
+
}
|
|
2336
|
+
keyword = _normalize_optional_text(raw_value.get("value", raw_value.get("name", raw_value.get("deptName"))))
|
|
2337
|
+
if keyword:
|
|
2338
|
+
return self._lookup_department_filter_detail(context, keyword)
|
|
2339
|
+
raise RecordInputError(
|
|
2340
|
+
message="department filters require id/deptId or a department name",
|
|
2341
|
+
error_code="INVALID_DEPARTMENT_FILTER",
|
|
2342
|
+
fix_hint="Pass department filters like {'field': '所在部门', 'value': {'id': 11, 'value': '示例部门'}} or use an exact department name.",
|
|
2343
|
+
details={"received_value": raw_value},
|
|
2344
|
+
)
|
|
2345
|
+
dept_id = _coerce_count(raw_value)
|
|
2346
|
+
if dept_id is not None:
|
|
2347
|
+
return {"id": dept_id, "value": str(dept_id)}
|
|
2348
|
+
keyword = _normalize_optional_text(raw_value)
|
|
2349
|
+
if keyword is None:
|
|
2350
|
+
raise RecordInputError(
|
|
2351
|
+
message="department filters require a non-empty department selector",
|
|
2352
|
+
error_code="INVALID_DEPARTMENT_FILTER",
|
|
2353
|
+
fix_hint="Pass a numeric dept id, a department object, or an exact department name.",
|
|
2354
|
+
details={"received_value": raw_value},
|
|
2355
|
+
)
|
|
2356
|
+
return self._lookup_department_filter_detail(context, keyword)
|
|
2357
|
+
|
|
2358
|
+
def _lookup_department_filter_detail(self, context, keyword: str) -> JSONObject: # type: ignore[no-untyped-def]
|
|
2359
|
+
return self._lookup_department_detail(context, keyword, purpose="filter")
|
|
2360
|
+
|
|
2361
|
+
def _lookup_department_detail(self, context, keyword: str, *, purpose: str) -> JSONObject: # type: ignore[no-untyped-def]
|
|
2362
|
+
payload = self.backend.request(
|
|
2363
|
+
"GET",
|
|
2364
|
+
context,
|
|
2365
|
+
"/contact/deptByPage",
|
|
2366
|
+
params={"keyword": keyword, "pageNum": 1, "pageSize": 20},
|
|
2367
|
+
)
|
|
2368
|
+
rows = payload.get("list") if isinstance(payload, dict) else None
|
|
2369
|
+
items = [item for item in rows if isinstance(item, dict)] if isinstance(rows, list) else []
|
|
2370
|
+
normalized_keyword = keyword.strip()
|
|
2371
|
+
exact = [
|
|
2372
|
+
item for item in items
|
|
2373
|
+
if _normalize_optional_text(item.get("deptName", item.get("value", item.get("name")))) == normalized_keyword
|
|
2374
|
+
]
|
|
2375
|
+
matches = exact or items
|
|
2376
|
+
if len(matches) != 1:
|
|
2377
|
+
raise RecordInputError(
|
|
2378
|
+
message=f"department {purpose} '{keyword}' is ambiguous or not found",
|
|
2379
|
+
error_code="DEPARTMENT_NOT_RESOLVED",
|
|
2380
|
+
fix_hint="Pass an exact department name or a numeric dept id.",
|
|
2381
|
+
details={"keyword": keyword, "matches": [{"id": item.get('deptId', item.get('id')), "value": item.get('deptName', item.get('value', item.get('name')))} for item in items[:10]]},
|
|
2382
|
+
)
|
|
2383
|
+
matched = matches[0]
|
|
2384
|
+
dept_id = _coerce_count(matched.get("deptId", matched.get("id")))
|
|
2385
|
+
if dept_id is None:
|
|
2386
|
+
raise RecordInputError(
|
|
2387
|
+
message=f"department {purpose} '{keyword}' resolved to an invalid department payload",
|
|
2388
|
+
error_code="DEPARTMENT_NOT_RESOLVED",
|
|
2389
|
+
fix_hint="Pass a numeric dept id directly.",
|
|
2390
|
+
details={"keyword": keyword, "matched": matched},
|
|
2391
|
+
)
|
|
2392
|
+
return {"id": dept_id, "value": _stringify_json(matched.get("deptName", matched.get("value", matched.get("name", dept_id))))}
|
|
2393
|
+
|
|
2394
|
+
def _department_value_from_selector(self, context, value: JSONValue) -> JSONObject: # type: ignore[no-untyped-def]
|
|
2395
|
+
if isinstance(value, dict):
|
|
2396
|
+
dept_id = _coerce_count(value.get("id", value.get("deptId")))
|
|
2397
|
+
if dept_id is not None:
|
|
2398
|
+
return {
|
|
2399
|
+
"id": dept_id,
|
|
2400
|
+
"value": _stringify_json(value.get("value", value.get("name", value.get("deptName", dept_id)))),
|
|
2401
|
+
}
|
|
2402
|
+
keyword = _normalize_optional_text(value.get("value", value.get("name", value.get("deptName"))))
|
|
2403
|
+
if keyword:
|
|
2404
|
+
return self._lookup_department_detail(context, keyword, purpose="value")
|
|
2405
|
+
raise RecordInputError(
|
|
2406
|
+
message="department values require id/deptId or a department name",
|
|
2407
|
+
error_code="INVALID_DEPARTMENT_VALUE",
|
|
2408
|
+
fix_hint="Pass department values like {'id': 11, 'value': '示例部门'}, {'deptId': 11, 'deptName': '示例部门'}, or an exact department name.",
|
|
2409
|
+
details={"received_value": value},
|
|
2410
|
+
)
|
|
2411
|
+
dept_id = _coerce_count(value)
|
|
2412
|
+
if dept_id is not None:
|
|
2413
|
+
return {"id": dept_id, "value": str(dept_id)}
|
|
2414
|
+
keyword = _normalize_optional_text(value)
|
|
2415
|
+
if keyword is None:
|
|
2416
|
+
raise RecordInputError(
|
|
2417
|
+
message="department values require a numeric id, department object, or department name",
|
|
2418
|
+
error_code="INVALID_DEPARTMENT_VALUE",
|
|
2419
|
+
fix_hint="Pass department ids like 11, exact department names like '示例部门', or objects like {'id': 11, 'value': '示例部门'}.",
|
|
2420
|
+
details={"received_value": value},
|
|
2421
|
+
)
|
|
2422
|
+
return self._lookup_department_detail(context, keyword, purpose="value")
|
|
2423
|
+
|
|
2424
|
+
def _resolve_filter_field_entries(self, filters: list[JSONObject], index: FieldIndex) -> list[JSONObject]:
|
|
2425
|
+
entries: list[JSONObject] = []
|
|
2426
|
+
for item in filters:
|
|
2427
|
+
if not isinstance(item, dict):
|
|
2428
|
+
continue
|
|
2429
|
+
selector = _extract_filter_selector(item)
|
|
2430
|
+
if selector is None:
|
|
2431
|
+
continue
|
|
2432
|
+
field = self._resolve_field_selector(cast(str | int | None, selector), index, location="filters")
|
|
2433
|
+
entries.append({"requested": str(selector), "field": field})
|
|
2434
|
+
return entries
|
|
2435
|
+
|
|
2436
|
+
def _resolve_sorts(self, sorts: list[JSONObject], index: FieldIndex) -> list[JSONObject]:
|
|
2437
|
+
resolved: list[JSONObject] = []
|
|
2438
|
+
for item in sorts:
|
|
2439
|
+
if not isinstance(item, dict):
|
|
2440
|
+
continue
|
|
2441
|
+
selector = _extract_sort_selector(item)
|
|
2442
|
+
if selector is None:
|
|
2443
|
+
continue
|
|
2444
|
+
field = self._resolve_field_selector(cast(str | int | None, selector), index, location="sorts")
|
|
2445
|
+
ascend = _resolve_sort_ascend(item)
|
|
2446
|
+
resolved.append({"queId": field.que_id, "isAscend": ascend})
|
|
2447
|
+
return resolved
|
|
2448
|
+
|
|
2449
|
+
def _resolve_time_range_column(self, time_range: JSONObject, index: FieldIndex) -> FormField | None:
|
|
2450
|
+
if not time_range or not isinstance(time_range, dict):
|
|
2451
|
+
return None
|
|
2452
|
+
column = time_range.get("column")
|
|
2453
|
+
if column is None:
|
|
2454
|
+
return None
|
|
2455
|
+
return self._resolve_field_selector(cast(str | int, column), index, location="time_range.column")
|
|
2456
|
+
|
|
2457
|
+
def _build_time_range_filter(self, time_range: JSONObject, field: FormField) -> JSONObject | None:
|
|
2458
|
+
if not time_range:
|
|
2459
|
+
return None
|
|
2460
|
+
value_from = time_range.get("from")
|
|
2461
|
+
value_to = time_range.get("to")
|
|
2462
|
+
if value_from is None and value_to is None:
|
|
2463
|
+
return None
|
|
2464
|
+
rule: JSONObject = {"queId": field.que_id}
|
|
2465
|
+
if field.que_type is not None:
|
|
2466
|
+
rule["queType"] = field.que_type
|
|
2467
|
+
if value_from is not None:
|
|
2468
|
+
rule["minValue"] = _stringify_json(value_from)
|
|
2469
|
+
if value_to is not None:
|
|
2470
|
+
rule["maxValue"] = _stringify_json(value_to)
|
|
2471
|
+
return rule
|
|
2472
|
+
|
|
2473
|
+
def _append_time_range_filter(
|
|
2474
|
+
self,
|
|
2475
|
+
match_rules: list[JSONObject],
|
|
2476
|
+
time_range: JSONObject,
|
|
2477
|
+
field: FormField | None,
|
|
2478
|
+
) -> list[JSONObject]:
|
|
2479
|
+
if field is None:
|
|
2480
|
+
return match_rules
|
|
2481
|
+
time_rule = self._build_time_range_filter(time_range, field)
|
|
2482
|
+
if time_rule is None:
|
|
2483
|
+
return match_rules
|
|
2484
|
+
for item in match_rules:
|
|
2485
|
+
if not isinstance(item, dict):
|
|
2486
|
+
continue
|
|
2487
|
+
if _coerce_count(item.get("queId")) != field.que_id:
|
|
2488
|
+
continue
|
|
2489
|
+
if item.get("minValue") is not None or item.get("maxValue") is not None:
|
|
2490
|
+
return match_rules
|
|
2491
|
+
return [*match_rules, time_rule]
|
|
2492
|
+
|
|
2493
|
+
def _collect_write_plan_field_refs(self, *, fields: JSONObject, answers: list[JSONObject], index: FieldIndex) -> list[JSONObject]:
|
|
2494
|
+
refs: list[JSONObject] = []
|
|
2495
|
+
for field_key in fields.keys():
|
|
2496
|
+
refs.append(self._resolve_write_plan_field_ref("fields", field_key, index))
|
|
2497
|
+
for item in answers:
|
|
2498
|
+
if not isinstance(item, dict):
|
|
2499
|
+
continue
|
|
2500
|
+
selector = item.get("queId", item.get("que_id", item.get("queTitle", item.get("que_title"))))
|
|
2501
|
+
if selector is None:
|
|
2502
|
+
continue
|
|
2503
|
+
refs.append(self._resolve_write_plan_field_ref("answers", str(selector), index))
|
|
2504
|
+
return refs
|
|
2505
|
+
|
|
2506
|
+
def _resolve_write_plan_field_ref(self, source: str, requested: str, index: FieldIndex) -> JSONObject:
|
|
2507
|
+
try:
|
|
2508
|
+
field = self._resolve_field_selector(requested, index, location=source)
|
|
2509
|
+
return {
|
|
2510
|
+
"source": source,
|
|
2511
|
+
"requested": requested,
|
|
2512
|
+
"resolved": True,
|
|
2513
|
+
"que_id": field.que_id,
|
|
2514
|
+
"que_title": field.que_title,
|
|
2515
|
+
"que_type": field.que_type,
|
|
2516
|
+
"required": field.required,
|
|
2517
|
+
"readonly": field.readonly,
|
|
2518
|
+
"system": field.system,
|
|
2519
|
+
"write_format": _write_format_for_field(field),
|
|
2520
|
+
"reason": None,
|
|
2521
|
+
}
|
|
2522
|
+
except RecordInputError as error:
|
|
2523
|
+
return {
|
|
2524
|
+
"source": source,
|
|
2525
|
+
"requested": requested,
|
|
2526
|
+
"resolved": False,
|
|
2527
|
+
"que_id": None,
|
|
2528
|
+
"que_title": None,
|
|
2529
|
+
"que_type": None,
|
|
2530
|
+
"required": None,
|
|
2531
|
+
"readonly": None,
|
|
2532
|
+
"system": None,
|
|
2533
|
+
"write_format": {"kind": "unknown"},
|
|
2534
|
+
"reason": error.message,
|
|
2535
|
+
}
|
|
2536
|
+
|
|
2537
|
+
def _resolve_plan_candidate(self, candidate: JSONObject, index: FieldIndex) -> JSONObject:
|
|
2538
|
+
requested = str(candidate.get("requested", "")).strip()
|
|
2539
|
+
role = str(candidate.get("role", "field"))
|
|
2540
|
+
try:
|
|
2541
|
+
field = self._resolve_field_selector(requested, index, location=role)
|
|
2542
|
+
return {
|
|
2543
|
+
"role": role,
|
|
2544
|
+
"requested": requested,
|
|
2545
|
+
"resolved": True,
|
|
2546
|
+
"que_id": field.que_id,
|
|
2547
|
+
"que_title": field.que_title,
|
|
2548
|
+
"que_type": field.que_type,
|
|
2549
|
+
"reason": None,
|
|
2550
|
+
}
|
|
2551
|
+
except RecordInputError as error:
|
|
2552
|
+
return {
|
|
2553
|
+
"role": role,
|
|
2554
|
+
"requested": requested,
|
|
2555
|
+
"resolved": False,
|
|
2556
|
+
"que_id": None,
|
|
2557
|
+
"que_title": None,
|
|
2558
|
+
"que_type": None,
|
|
2559
|
+
"reason": error.message,
|
|
2560
|
+
}
|
|
2561
|
+
|
|
2562
|
+
def _validate_plan_arguments(self, tool: str, arguments: JSONObject) -> JSONObject:
|
|
2563
|
+
missing_required: list[str] = []
|
|
2564
|
+
warnings: list[str] = []
|
|
2565
|
+
if tool in {"record_query", "record_aggregate"} and not arguments.get("app_key"):
|
|
2566
|
+
missing_required.append("app_key")
|
|
2567
|
+
if tool == "record_get":
|
|
2568
|
+
if not arguments.get("app_key"):
|
|
2569
|
+
missing_required.append("app_key")
|
|
2570
|
+
if not arguments.get("apply_id"):
|
|
2571
|
+
missing_required.append("apply_id")
|
|
2572
|
+
if not arguments.get("select_columns"):
|
|
2573
|
+
missing_required.append("select_columns")
|
|
2574
|
+
query_mode = _resolve_query_mode(
|
|
2575
|
+
str(arguments.get("query_mode", "auto")),
|
|
2576
|
+
apply_id=_coerce_count(arguments.get("apply_id")),
|
|
2577
|
+
amount_column=arguments.get("amount_column"),
|
|
2578
|
+
time_range=cast(JSONObject, arguments.get("time_range") if isinstance(arguments.get("time_range"), dict) else {}),
|
|
2579
|
+
stat_policy=cast(JSONObject, arguments.get("stat_policy") if isinstance(arguments.get("stat_policy"), dict) else {}),
|
|
2580
|
+
) if tool == "record_query" else None
|
|
2581
|
+
if tool == "record_query" and query_mode in {"list", "record"} and not arguments.get("select_columns"):
|
|
2582
|
+
missing_required.append("select_columns")
|
|
2583
|
+
if tool == "record_query" and query_mode == "summary" and not arguments.get("amount_column") and not arguments.get("time_range"):
|
|
2584
|
+
warnings.append("summary mode without amount_column or time_range only returns row counts")
|
|
2585
|
+
return {"valid": not missing_required, "missing_required": missing_required, "warnings": warnings}
|
|
2586
|
+
|
|
2587
|
+
def _validate_app_and_record(self, app_key: str, apply_id: int | str) -> int:
|
|
2588
|
+
if not app_key:
|
|
2589
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
2590
|
+
normalized_apply_id = _coerce_count(apply_id)
|
|
2591
|
+
if normalized_apply_id is None or normalized_apply_id <= 0:
|
|
2592
|
+
raise_tool_error(QingflowApiError.config_error("apply_id must be positive"))
|
|
2593
|
+
return normalized_apply_id
|
|
2594
|
+
|
|
2595
|
+
def _validate_record_write(self, app_key: str, answers: list[JSONObject], apply_id: int | None = None) -> None:
|
|
2596
|
+
if not app_key:
|
|
2597
|
+
raise_tool_error(QingflowApiError.config_error("app_key is required"))
|
|
2598
|
+
if apply_id is not None and apply_id <= 0:
|
|
2599
|
+
raise_tool_error(QingflowApiError.config_error("apply_id must be positive"))
|
|
2600
|
+
if not isinstance(answers, list) or not answers:
|
|
2601
|
+
raise_tool_error(QingflowApiError.config_error("answers must be a non-empty array"))
|
|
2602
|
+
|
|
2603
|
+
def _verify_record_write_result(
|
|
2604
|
+
self,
|
|
2605
|
+
context, # type: ignore[no-untyped-def]
|
|
2606
|
+
*,
|
|
2607
|
+
app_key: str,
|
|
2608
|
+
apply_id: int | None,
|
|
2609
|
+
normalized_answers: list[JSONObject],
|
|
2610
|
+
index: FieldIndex,
|
|
2611
|
+
) -> JSONObject:
|
|
2612
|
+
if apply_id is None:
|
|
2613
|
+
return {
|
|
2614
|
+
"verified": False,
|
|
2615
|
+
"error": "missing_apply_id",
|
|
2616
|
+
"missing_fields": [],
|
|
2617
|
+
"empty_fields": [],
|
|
2618
|
+
"count_mismatches": [],
|
|
2619
|
+
}
|
|
2620
|
+
record = self.backend.request(
|
|
2621
|
+
"GET",
|
|
2622
|
+
context,
|
|
2623
|
+
f"/app/{app_key}/apply/{apply_id}",
|
|
2624
|
+
params={"role": 1, "listType": DEFAULT_RECORD_LIST_TYPE},
|
|
2625
|
+
)
|
|
2626
|
+
answers = record.get("answers") if isinstance(record, dict) else None
|
|
2627
|
+
answer_list = answers if isinstance(answers, list) else []
|
|
2628
|
+
actual_by_id = {
|
|
2629
|
+
que_id: item
|
|
2630
|
+
for item in answer_list
|
|
2631
|
+
if isinstance(item, dict) and (que_id := _coerce_count(item.get("queId"))) is not None
|
|
2632
|
+
}
|
|
2633
|
+
missing_fields: list[JSONObject] = []
|
|
2634
|
+
empty_fields: list[JSONObject] = []
|
|
2635
|
+
count_mismatches: list[JSONObject] = []
|
|
2636
|
+
for answer in normalized_answers:
|
|
2637
|
+
que_id = _coerce_count(answer.get("queId"))
|
|
2638
|
+
if que_id is None or que_id <= 0:
|
|
2639
|
+
continue
|
|
2640
|
+
actual = actual_by_id.get(que_id)
|
|
2641
|
+
field = index.by_id.get(str(que_id))
|
|
2642
|
+
field_payload = _field_ref_payload(field) if field is not None else {"que_id": que_id}
|
|
2643
|
+
if actual is None:
|
|
2644
|
+
missing_fields.append(field_payload)
|
|
2645
|
+
continue
|
|
2646
|
+
expected_rows = answer.get("tableValues") if isinstance(answer.get("tableValues"), list) else []
|
|
2647
|
+
if expected_rows:
|
|
2648
|
+
actual_rows = actual.get("tableValues") if isinstance(actual.get("tableValues"), list) else []
|
|
2649
|
+
self._verify_subtable_write_result(
|
|
2650
|
+
field=field,
|
|
2651
|
+
expected_rows=expected_rows,
|
|
2652
|
+
actual_rows=actual_rows,
|
|
2653
|
+
missing_fields=missing_fields,
|
|
2654
|
+
empty_fields=empty_fields,
|
|
2655
|
+
count_mismatches=count_mismatches,
|
|
2656
|
+
)
|
|
2657
|
+
continue
|
|
2658
|
+
actual_values = actual.get("values") if isinstance(actual.get("values"), list) else []
|
|
2659
|
+
if not actual_values:
|
|
2660
|
+
empty_fields.append(field_payload)
|
|
2661
|
+
continue
|
|
2662
|
+
expected_values = answer.get("values") if isinstance(answer.get("values"), list) else []
|
|
2663
|
+
if expected_values and len(actual_values) < len(expected_values):
|
|
2664
|
+
count_mismatches.append(
|
|
2665
|
+
{
|
|
2666
|
+
**field_payload,
|
|
2667
|
+
"expected_count": len(expected_values),
|
|
2668
|
+
"actual_count": len(actual_values),
|
|
2669
|
+
}
|
|
2670
|
+
)
|
|
2671
|
+
return {
|
|
2672
|
+
"verified": not missing_fields and not empty_fields and not count_mismatches,
|
|
2673
|
+
"missing_fields": missing_fields,
|
|
2674
|
+
"empty_fields": empty_fields,
|
|
2675
|
+
"count_mismatches": count_mismatches,
|
|
2676
|
+
}
|
|
2677
|
+
|
|
2678
|
+
def _verify_subtable_write_result(
|
|
2679
|
+
self,
|
|
2680
|
+
*,
|
|
2681
|
+
field: FormField | None,
|
|
2682
|
+
expected_rows: list[JSONValue],
|
|
2683
|
+
actual_rows: list[JSONValue],
|
|
2684
|
+
missing_fields: list[JSONObject],
|
|
2685
|
+
empty_fields: list[JSONObject],
|
|
2686
|
+
count_mismatches: list[JSONObject],
|
|
2687
|
+
) -> None:
|
|
2688
|
+
field_payload = _field_ref_payload(field) if field is not None else {"que_id": None}
|
|
2689
|
+
if not actual_rows:
|
|
2690
|
+
empty_fields.append(field_payload)
|
|
2691
|
+
return
|
|
2692
|
+
if len(actual_rows) < len(expected_rows):
|
|
2693
|
+
count_mismatches.append(
|
|
2694
|
+
{
|
|
2695
|
+
**field_payload,
|
|
2696
|
+
"expected_count": len(expected_rows),
|
|
2697
|
+
"actual_count": len(actual_rows),
|
|
2698
|
+
"unit": "rows",
|
|
2699
|
+
}
|
|
2700
|
+
)
|
|
2701
|
+
subtable_index = self._subtable_field_index_optional(field)
|
|
2702
|
+
actual_rows_by_row_id = _index_subtable_rows_by_row_id(actual_rows)
|
|
2703
|
+
for row_ordinal, raw_expected_row in enumerate(expected_rows):
|
|
2704
|
+
expected_row = [item for item in raw_expected_row if isinstance(item, dict)] if isinstance(raw_expected_row, list) else []
|
|
2705
|
+
expected_row_id = _subtable_row_id(expected_row)
|
|
2706
|
+
actual_row = None
|
|
2707
|
+
if expected_row_id is not None:
|
|
2708
|
+
actual_row = actual_rows_by_row_id.get(expected_row_id)
|
|
2709
|
+
if actual_row is None and row_ordinal < len(actual_rows):
|
|
2710
|
+
candidate = actual_rows[row_ordinal]
|
|
2711
|
+
actual_row = candidate if isinstance(candidate, list) else None
|
|
2712
|
+
if actual_row is None:
|
|
2713
|
+
missing_fields.append({**field_payload, "row_ordinal": row_ordinal})
|
|
2714
|
+
continue
|
|
2715
|
+
actual_cells_by_id = {
|
|
2716
|
+
que_id: item
|
|
2717
|
+
for item in actual_row
|
|
2718
|
+
if isinstance(item, dict) and (que_id := _coerce_count(item.get("queId"))) is not None
|
|
2719
|
+
}
|
|
2720
|
+
for expected_cell in expected_row:
|
|
2721
|
+
que_id = _coerce_count(expected_cell.get("queId"))
|
|
2722
|
+
if que_id is None or que_id <= 0:
|
|
2723
|
+
continue
|
|
2724
|
+
subfield = subtable_index.by_id.get(str(que_id)) if subtable_index is not None else None
|
|
2725
|
+
cell_payload = _subtable_cell_ref_payload(field, subfield, que_id=que_id, row_ordinal=row_ordinal, row_id=expected_row_id)
|
|
2726
|
+
actual_cell = actual_cells_by_id.get(que_id)
|
|
2727
|
+
if actual_cell is None:
|
|
2728
|
+
missing_fields.append(cell_payload)
|
|
2729
|
+
continue
|
|
2730
|
+
actual_values = actual_cell.get("values") if isinstance(actual_cell.get("values"), list) else []
|
|
2731
|
+
if not actual_values:
|
|
2732
|
+
empty_fields.append(cell_payload)
|
|
2733
|
+
continue
|
|
2734
|
+
expected_values = expected_cell.get("values") if isinstance(expected_cell.get("values"), list) else []
|
|
2735
|
+
if expected_values and len(actual_values) < len(expected_values):
|
|
2736
|
+
count_mismatches.append(
|
|
2737
|
+
{
|
|
2738
|
+
**cell_payload,
|
|
2739
|
+
"expected_count": len(expected_values),
|
|
2740
|
+
"actual_count": len(actual_values),
|
|
2741
|
+
"unit": "values",
|
|
2742
|
+
}
|
|
2743
|
+
)
|
|
2744
|
+
|
|
2745
|
+
def _score_field_matches(self, requested: str, index: FieldIndex, *, fuzzy: bool, top_k: int) -> list[JSONObject]:
|
|
2746
|
+
requested_text = requested.strip()
|
|
2747
|
+
requested_key = _normalize_field_lookup_key(requested_text)
|
|
2748
|
+
if not requested_key:
|
|
2749
|
+
return []
|
|
2750
|
+
matches: list[JSONObject] = []
|
|
2751
|
+
for field in index.by_id.values():
|
|
2752
|
+
best_score = 0.0
|
|
2753
|
+
match_type = "none"
|
|
2754
|
+
matched_alias: str | None = None
|
|
2755
|
+
if requested_text.isdigit() and field.que_id == int(requested_text):
|
|
2756
|
+
best_score = 1.0
|
|
2757
|
+
match_type = "id_exact"
|
|
2758
|
+
else:
|
|
2759
|
+
best_score, match_type = _score_candidate_text(requested_key, field.que_title, fuzzy=fuzzy, label="title")
|
|
2760
|
+
for alias in field.aliases:
|
|
2761
|
+
alias_score, alias_match_type = _score_candidate_text(requested_key, alias, fuzzy=fuzzy, label="alias")
|
|
2762
|
+
if alias_score > best_score:
|
|
2763
|
+
best_score = alias_score
|
|
2764
|
+
match_type = alias_match_type
|
|
2765
|
+
matched_alias = alias
|
|
2766
|
+
if best_score <= 0:
|
|
2767
|
+
continue
|
|
2768
|
+
payload = {
|
|
2769
|
+
"que_id": field.que_id,
|
|
2770
|
+
"que_title": field.que_title,
|
|
2771
|
+
"que_type": field.que_type,
|
|
2772
|
+
"score": round(best_score, 4),
|
|
2773
|
+
"match_type": match_type,
|
|
2774
|
+
}
|
|
2775
|
+
if matched_alias is not None:
|
|
2776
|
+
payload["matched_alias"] = matched_alias
|
|
2777
|
+
matches.append(payload)
|
|
2778
|
+
matches.sort(key=lambda item: float(item["score"]), reverse=True)
|
|
2779
|
+
return matches[: max(top_k, 1)]
|
|
2780
|
+
|
|
2781
|
+
def _raise_need_more_data(self, completeness: JSONObject, evidence: JSONObject, message: str) -> None:
|
|
2782
|
+
raise RuntimeError(
|
|
2783
|
+
json.dumps(
|
|
2784
|
+
{
|
|
2785
|
+
"ok": False,
|
|
2786
|
+
"code": "NEED_MORE_DATA",
|
|
2787
|
+
"status": "need_more_data",
|
|
2788
|
+
"message": message,
|
|
2789
|
+
"details": {"completeness": completeness, "evidence": evidence},
|
|
2790
|
+
},
|
|
2791
|
+
ensure_ascii=False,
|
|
2792
|
+
)
|
|
2793
|
+
)
|
|
2794
|
+
|
|
2795
|
+
|
|
2796
|
+
def _normalize_form_schema(payload: JSONValue) -> JSONObject:
|
|
2797
|
+
if isinstance(payload, dict) and isinstance(payload.get("formQues"), list):
|
|
2798
|
+
return payload
|
|
2799
|
+
if isinstance(payload, list):
|
|
2800
|
+
for item in payload:
|
|
2801
|
+
if isinstance(item, dict) and isinstance(item.get("formQues"), list):
|
|
2802
|
+
return item
|
|
2803
|
+
return {}
|
|
2804
|
+
|
|
2805
|
+
|
|
2806
|
+
def _normalize_view_list(payload: JSONValue) -> list[JSONObject]:
|
|
2807
|
+
if not isinstance(payload, list):
|
|
2808
|
+
return []
|
|
2809
|
+
flattened: list[JSONObject] = []
|
|
2810
|
+
for group in payload:
|
|
2811
|
+
if not isinstance(group, dict):
|
|
2812
|
+
continue
|
|
2813
|
+
view_list = group.get("viewList")
|
|
2814
|
+
if not isinstance(view_list, list):
|
|
2815
|
+
continue
|
|
2816
|
+
for item in view_list:
|
|
2817
|
+
if isinstance(item, dict) and item.get("viewKey"):
|
|
2818
|
+
flattened.append(item)
|
|
2819
|
+
return flattened
|
|
2820
|
+
|
|
2821
|
+
|
|
2822
|
+
def _compile_view_conditions(config: JSONObject) -> list[list[ViewFilterCondition]]:
|
|
2823
|
+
raw_limit = config.get("viewgraphLimit")
|
|
2824
|
+
if not isinstance(raw_limit, list):
|
|
2825
|
+
return []
|
|
2826
|
+
compiled: list[list[ViewFilterCondition]] = []
|
|
2827
|
+
for raw_group in raw_limit:
|
|
2828
|
+
group_items = raw_group if isinstance(raw_group, list) else [raw_group]
|
|
2829
|
+
group: list[ViewFilterCondition] = []
|
|
2830
|
+
for raw_condition in group_items:
|
|
2831
|
+
if not isinstance(raw_condition, dict):
|
|
2832
|
+
continue
|
|
2833
|
+
group.append(
|
|
2834
|
+
ViewFilterCondition(
|
|
2835
|
+
que_id=_coerce_count(raw_condition.get("queId")),
|
|
2836
|
+
que_title=_stringify_json(raw_condition.get("queTitle")).strip(),
|
|
2837
|
+
que_type=_coerce_count(raw_condition.get("queType")),
|
|
2838
|
+
judge_type=_coerce_count(raw_condition.get("judgeType")),
|
|
2839
|
+
judge_values=[_stringify_json(item).strip() for item in cast(list[JSONValue], raw_condition.get("judgeValues") or []) if _stringify_json(item).strip()],
|
|
2840
|
+
judge_value_details=[item for item in cast(list[JSONValue], raw_condition.get("judgeValueDetails") or []) if isinstance(item, dict)],
|
|
2841
|
+
raw=raw_condition,
|
|
2842
|
+
)
|
|
2843
|
+
)
|
|
2844
|
+
if group:
|
|
2845
|
+
compiled.append(group)
|
|
2846
|
+
return compiled
|
|
2847
|
+
|
|
2848
|
+
|
|
2849
|
+
def _build_field_index(schema: JSONObject) -> FieldIndex:
|
|
2850
|
+
by_id: dict[str, FormField] = {}
|
|
2851
|
+
by_title: dict[str, list[FormField]] = {}
|
|
2852
|
+
by_alias: dict[str, list[FormField]] = {}
|
|
2853
|
+
all_questions = [
|
|
2854
|
+
*[(question, True) for question in _flatten_questions(schema.get("baseQues"))],
|
|
2855
|
+
*[(question, False) for question in _flatten_questions(schema.get("formQues"))],
|
|
2856
|
+
]
|
|
2857
|
+
for question, is_base_question in all_questions:
|
|
2858
|
+
que_id = _coerce_count(question.get("queId"))
|
|
2859
|
+
title = _stringify_json(question.get("queTitle")).strip()
|
|
2860
|
+
if que_id is None or que_id < 0 or not title:
|
|
2861
|
+
continue
|
|
2862
|
+
field = FormField(
|
|
2863
|
+
que_id=que_id,
|
|
2864
|
+
que_title=title,
|
|
2865
|
+
que_type=_coerce_count(question.get("queType")),
|
|
2866
|
+
required=bool(question.get("required") or question.get("beingRequired")),
|
|
2867
|
+
readonly=bool(question.get("readonly") or question.get("beingReadonly") or is_base_question),
|
|
2868
|
+
system=bool(question.get("system") or question.get("beingSystem") or is_base_question),
|
|
2869
|
+
options=_extract_question_options(question),
|
|
2870
|
+
aliases=[],
|
|
2871
|
+
raw=question,
|
|
2872
|
+
)
|
|
2873
|
+
if str(que_id) in by_id:
|
|
2874
|
+
continue
|
|
2875
|
+
field.aliases = sorted(_field_alias_candidates(field))
|
|
2876
|
+
by_id[str(que_id)] = field
|
|
2877
|
+
by_title.setdefault(_normalize_field_lookup_key(title), []).append(field)
|
|
2878
|
+
for alias in field.aliases:
|
|
2879
|
+
by_alias.setdefault(_normalize_field_lookup_key(alias), []).append(field)
|
|
2880
|
+
return FieldIndex(by_id=by_id, by_title=by_title, by_alias=by_alias)
|
|
2881
|
+
|
|
2882
|
+
|
|
2883
|
+
def _flatten_questions(payload: JSONValue) -> list[JSONObject]:
|
|
2884
|
+
flattened: list[JSONObject] = []
|
|
2885
|
+
if isinstance(payload, dict):
|
|
2886
|
+
if "queId" in payload or "queTitle" in payload:
|
|
2887
|
+
flattened.append(payload)
|
|
2888
|
+
for value in payload.values():
|
|
2889
|
+
flattened.extend(_flatten_questions(value))
|
|
2890
|
+
elif isinstance(payload, list):
|
|
2891
|
+
for item in payload:
|
|
2892
|
+
flattened.extend(_flatten_questions(item))
|
|
2893
|
+
return flattened
|
|
2894
|
+
|
|
2895
|
+
|
|
2896
|
+
def _extract_question_options(question: JSONObject) -> list[str]:
|
|
2897
|
+
options = question.get("options")
|
|
2898
|
+
if not isinstance(options, list):
|
|
2899
|
+
return []
|
|
2900
|
+
values: list[str] = []
|
|
2901
|
+
for item in options:
|
|
2902
|
+
if isinstance(item, dict):
|
|
2903
|
+
value = item.get("optValue", item.get("value"))
|
|
2904
|
+
if value is not None:
|
|
2905
|
+
values.append(_stringify_json(value))
|
|
2906
|
+
elif item is not None:
|
|
2907
|
+
values.append(_stringify_json(item))
|
|
2908
|
+
return values
|
|
2909
|
+
|
|
2910
|
+
|
|
2911
|
+
def _normalize_plan_arguments(tool: str, arguments: JSONObject) -> JSONObject:
|
|
2912
|
+
normalized = cast(JSONObject, _parse_json_like(arguments))
|
|
2913
|
+
alias_map = {
|
|
2914
|
+
"appKey": "app_key",
|
|
2915
|
+
"applyId": "apply_id",
|
|
2916
|
+
"queryMode": "query_mode",
|
|
2917
|
+
"pageNum": "page_num",
|
|
2918
|
+
"pageSize": "page_size",
|
|
2919
|
+
"requestedPages": "requested_pages",
|
|
2920
|
+
"scanMaxPages": "scan_max_pages",
|
|
2921
|
+
"queryKey": "query_key",
|
|
2922
|
+
"maxRows": "max_rows",
|
|
2923
|
+
"maxColumns": "max_columns",
|
|
2924
|
+
"selectColumns": "select_columns",
|
|
2925
|
+
"amountColumn": "amount_column",
|
|
2926
|
+
"timeRange": "time_range",
|
|
2927
|
+
"strictFull": "strict_full",
|
|
2928
|
+
"outputProfile": "output_profile",
|
|
2929
|
+
"listType": "list_type",
|
|
2930
|
+
"viewKey": "view_key",
|
|
2931
|
+
"viewName": "view_name",
|
|
2932
|
+
"groupBy": "group_by",
|
|
2933
|
+
"timeBucket": "time_bucket",
|
|
2934
|
+
"maxGroups": "max_groups",
|
|
2935
|
+
"forceRefreshForm": "force_refresh_form",
|
|
2936
|
+
}
|
|
2937
|
+
result = dict(normalized)
|
|
2938
|
+
for alias, canonical in alias_map.items():
|
|
2939
|
+
if alias in result and canonical not in result:
|
|
2940
|
+
result[canonical] = result[alias]
|
|
2941
|
+
if tool == "record_get" and "query_mode" not in result:
|
|
2942
|
+
result["query_mode"] = "record"
|
|
2943
|
+
return result
|
|
2944
|
+
|
|
2945
|
+
|
|
2946
|
+
def _collect_plan_field_candidates(tool: str, arguments: JSONObject) -> list[JSONObject]:
|
|
2947
|
+
candidates: list[JSONObject] = []
|
|
2948
|
+
if tool in {"record_query", "record_get"}:
|
|
2949
|
+
for item in _as_selector_list(arguments.get("select_columns")):
|
|
2950
|
+
candidates.append({"role": "select_columns", "requested": str(item)})
|
|
2951
|
+
amount = arguments.get("amount_column")
|
|
2952
|
+
if amount is not None:
|
|
2953
|
+
candidates.append({"role": "amount_column", "requested": str(amount)})
|
|
2954
|
+
time_range = arguments.get("time_range")
|
|
2955
|
+
if isinstance(time_range, dict) and time_range.get("column") is not None:
|
|
2956
|
+
candidates.append({"role": "time_range.column", "requested": str(time_range["column"])})
|
|
2957
|
+
if tool == "record_aggregate":
|
|
2958
|
+
for item in _as_selector_list(arguments.get("group_by")):
|
|
2959
|
+
candidates.append({"role": "group_by", "requested": str(item)})
|
|
2960
|
+
amount = arguments.get("amount_column")
|
|
2961
|
+
if amount is not None:
|
|
2962
|
+
candidates.append({"role": "amount_column", "requested": str(amount)})
|
|
2963
|
+
time_range = arguments.get("time_range")
|
|
2964
|
+
if isinstance(time_range, dict) and time_range.get("column") is not None:
|
|
2965
|
+
candidates.append({"role": "time_range.column", "requested": str(time_range["column"])})
|
|
2966
|
+
for item in _as_object_list(arguments.get("filters")):
|
|
2967
|
+
selector = _extract_filter_selector(item)
|
|
2968
|
+
if selector is not None:
|
|
2969
|
+
candidates.append({"role": "filter", "requested": str(selector)})
|
|
2970
|
+
for item in _as_object_list(arguments.get("sorts", arguments.get("sort"))):
|
|
2971
|
+
selector = _extract_sort_selector(item)
|
|
2972
|
+
if selector is not None:
|
|
2973
|
+
candidates.append({"role": "sort", "requested": str(selector)})
|
|
2974
|
+
return candidates
|
|
2975
|
+
|
|
2976
|
+
|
|
2977
|
+
def _build_plan_estimate(tool: str, arguments: JSONObject) -> JSONObject:
|
|
2978
|
+
page_size = _coerce_count(arguments.get("page_size")) or DEFAULT_QUERY_PAGE_SIZE
|
|
2979
|
+
requested_pages = _coerce_count(arguments.get("requested_pages")) or 1
|
|
2980
|
+
scan_max_pages = _coerce_count(arguments.get("scan_max_pages")) or requested_pages
|
|
2981
|
+
estimated_scan_pages = min(requested_pages, scan_max_pages)
|
|
2982
|
+
may_hit_limits = estimated_scan_pages > DEFAULT_SCAN_MAX_PAGES
|
|
2983
|
+
reasons = []
|
|
2984
|
+
if may_hit_limits:
|
|
2985
|
+
reasons.append("requested scan pages exceed the default analysis budget")
|
|
2986
|
+
if tool == "record_query":
|
|
2987
|
+
routed_mode = _resolve_query_mode(
|
|
2988
|
+
str(arguments.get("query_mode", "auto")),
|
|
2989
|
+
apply_id=_coerce_count(arguments.get("apply_id")),
|
|
2990
|
+
amount_column=arguments.get("amount_column"),
|
|
2991
|
+
time_range=cast(JSONObject, arguments.get("time_range") if isinstance(arguments.get("time_range"), dict) else {}),
|
|
2992
|
+
stat_policy=cast(JSONObject, arguments.get("stat_policy") if isinstance(arguments.get("stat_policy"), dict) else {}),
|
|
2993
|
+
)
|
|
2994
|
+
if routed_mode == "list":
|
|
2995
|
+
reasons.append("list mode is not a safe final-analysis endpoint")
|
|
2996
|
+
return {
|
|
2997
|
+
"page_size": page_size,
|
|
2998
|
+
"requested_pages": requested_pages,
|
|
2999
|
+
"scan_max_pages": scan_max_pages,
|
|
3000
|
+
"estimated_scan_pages": estimated_scan_pages,
|
|
3001
|
+
"estimated_items_upper_bound": page_size * estimated_scan_pages,
|
|
3002
|
+
"may_hit_limits": may_hit_limits,
|
|
3003
|
+
"reasons": reasons,
|
|
3004
|
+
"probe": None,
|
|
3005
|
+
}
|
|
3006
|
+
|
|
3007
|
+
|
|
3008
|
+
def _assess_plan_readiness(
|
|
3009
|
+
tool: str,
|
|
3010
|
+
arguments: JSONObject,
|
|
3011
|
+
validation: JSONObject,
|
|
3012
|
+
field_mapping: list[JSONObject],
|
|
3013
|
+
estimate: JSONObject,
|
|
3014
|
+
) -> JSONObject:
|
|
3015
|
+
blockers: list[str] = []
|
|
3016
|
+
actions: list[str] = []
|
|
3017
|
+
if not bool(validation.get("valid")):
|
|
3018
|
+
blockers.append("arguments are not valid")
|
|
3019
|
+
actions.append("Fix missing_required before execution.")
|
|
3020
|
+
unresolved = [item for item in field_mapping if not bool(item.get("resolved"))]
|
|
3021
|
+
if unresolved:
|
|
3022
|
+
blockers.append("one or more fields are unresolved")
|
|
3023
|
+
actions.append("Use record_field_resolve to resolve field ids before execution.")
|
|
3024
|
+
if tool == "record_query":
|
|
3025
|
+
routed_mode = _resolve_query_mode(
|
|
3026
|
+
str(arguments.get("query_mode", "auto")),
|
|
3027
|
+
apply_id=_coerce_count(arguments.get("apply_id")),
|
|
3028
|
+
amount_column=arguments.get("amount_column"),
|
|
3029
|
+
time_range=cast(JSONObject, arguments.get("time_range") if isinstance(arguments.get("time_range"), dict) else {}),
|
|
3030
|
+
stat_policy=cast(JSONObject, arguments.get("stat_policy") if isinstance(arguments.get("stat_policy"), dict) else {}),
|
|
3031
|
+
)
|
|
3032
|
+
if routed_mode == "list":
|
|
3033
|
+
blockers.append("list mode is not a safe final-analysis endpoint")
|
|
3034
|
+
actions.append("Use record_query(summary) or record_aggregate for final statistics.")
|
|
3035
|
+
if routed_mode == "record":
|
|
3036
|
+
blockers.append("record mode is a detail endpoint, not a final-analysis endpoint")
|
|
3037
|
+
if tool in {"record_query", "record_aggregate"} and not bool(arguments.get("strict_full")):
|
|
3038
|
+
blockers.append("strict_full should be true for final conclusions")
|
|
3039
|
+
actions.append("Set strict_full=true so incomplete scans block final conclusions.")
|
|
3040
|
+
actions.append("After execution, verify completeness before using the result as a final conclusion.")
|
|
3041
|
+
return {
|
|
3042
|
+
"ready_for_final_conclusion": not blockers,
|
|
3043
|
+
"final_conclusion_blockers": blockers,
|
|
3044
|
+
"recommended_next_actions": actions,
|
|
3045
|
+
}
|
|
3046
|
+
|
|
3047
|
+
|
|
3048
|
+
def _resolve_query_mode(
|
|
3049
|
+
query_mode: str,
|
|
3050
|
+
*,
|
|
3051
|
+
apply_id: int | None,
|
|
3052
|
+
amount_column: JSONValue,
|
|
3053
|
+
time_range: JSONObject,
|
|
3054
|
+
stat_policy: JSONObject,
|
|
3055
|
+
) -> str:
|
|
3056
|
+
if query_mode in {"list", "record", "summary"}:
|
|
3057
|
+
return query_mode
|
|
3058
|
+
if apply_id is not None and apply_id > 0:
|
|
3059
|
+
return "record"
|
|
3060
|
+
if amount_column is not None or time_range or stat_policy:
|
|
3061
|
+
return "summary"
|
|
3062
|
+
return "list"
|
|
3063
|
+
|
|
3064
|
+
|
|
3065
|
+
def _bounded_column_limit(max_columns: int | None, *, default_limit: int, hard_limit: int) -> int:
|
|
3066
|
+
if max_columns is None:
|
|
3067
|
+
return default_limit
|
|
3068
|
+
return max(1, min(max_columns, hard_limit))
|
|
3069
|
+
|
|
3070
|
+
|
|
3071
|
+
def _chunk_fields(fields: list[FormField], chunk_size: int) -> list[list[FormField]]:
|
|
3072
|
+
if chunk_size <= 0:
|
|
3073
|
+
raise ValueError("chunk_size must be positive")
|
|
3074
|
+
if not fields:
|
|
3075
|
+
return []
|
|
3076
|
+
return [fields[index : index + chunk_size] for index in range(0, len(fields), chunk_size)]
|
|
3077
|
+
|
|
3078
|
+
|
|
3079
|
+
def _view_selection_supported_by_search_ids(view_selection: ViewSelection, search_que_ids: list[int] | None) -> bool:
|
|
3080
|
+
if not view_selection.conditions:
|
|
3081
|
+
return True
|
|
3082
|
+
if not search_que_ids:
|
|
3083
|
+
return False
|
|
3084
|
+
allowed_ids = set(search_que_ids)
|
|
3085
|
+
for group in view_selection.conditions:
|
|
3086
|
+
for condition in group:
|
|
3087
|
+
if condition.que_id is None or condition.que_id not in allowed_ids:
|
|
3088
|
+
return False
|
|
3089
|
+
return True
|
|
3090
|
+
|
|
3091
|
+
|
|
3092
|
+
def _build_flat_row(answer_list: list[JSONValue], fields: list[FormField], *, apply_id: int | None) -> JSONObject:
|
|
3093
|
+
row: JSONObject = {"apply_id": apply_id}
|
|
3094
|
+
for field in fields:
|
|
3095
|
+
row[field.que_title] = _extract_field_value(answer_list, field)
|
|
3096
|
+
return row
|
|
3097
|
+
|
|
3098
|
+
|
|
3099
|
+
def _extract_field_value(answer_list: list[JSONValue], field: FormField | None) -> JSONValue:
|
|
3100
|
+
if field is None:
|
|
3101
|
+
return None
|
|
3102
|
+
for answer in answer_list:
|
|
3103
|
+
if not isinstance(answer, dict):
|
|
3104
|
+
continue
|
|
3105
|
+
answer_que_id = _coerce_count(answer.get("queId"))
|
|
3106
|
+
answer_title = _stringify_json(answer.get("queTitle")).strip()
|
|
3107
|
+
if answer_que_id == field.que_id or (answer_title and answer_title == field.que_title):
|
|
3108
|
+
values = answer.get("values")
|
|
3109
|
+
if not isinstance(values, list) or not values:
|
|
3110
|
+
return None
|
|
3111
|
+
extracted = [_extract_value_item(item) for item in values]
|
|
3112
|
+
return extracted[0] if len(extracted) == 1 else extracted
|
|
3113
|
+
return None
|
|
3114
|
+
|
|
3115
|
+
|
|
3116
|
+
def _extract_value_item(value: JSONValue) -> JSONValue:
|
|
3117
|
+
if isinstance(value, dict):
|
|
3118
|
+
if "value" in value:
|
|
3119
|
+
return value["value"]
|
|
3120
|
+
if "name" in value:
|
|
3121
|
+
return value["name"]
|
|
3122
|
+
if "email" in value:
|
|
3123
|
+
return value["email"]
|
|
3124
|
+
if "id" in value:
|
|
3125
|
+
return value["id"]
|
|
3126
|
+
return value
|
|
3127
|
+
|
|
3128
|
+
|
|
3129
|
+
def _field_mapping_entry(role: str, field: FormField | None, *, requested: str) -> JSONObject:
|
|
3130
|
+
return {
|
|
3131
|
+
"role": role,
|
|
3132
|
+
"requested": requested,
|
|
3133
|
+
"resolved": field is not None,
|
|
3134
|
+
"que_id": field.que_id if field is not None else None,
|
|
3135
|
+
"que_title": field.que_title if field is not None else None,
|
|
3136
|
+
"que_type": field.que_type if field is not None else None,
|
|
3137
|
+
}
|
|
3138
|
+
|
|
3139
|
+
|
|
3140
|
+
def _query_id() -> str:
|
|
3141
|
+
return datetime.now(UTC).isoformat()
|
|
3142
|
+
|
|
3143
|
+
|
|
3144
|
+
def _view_selection_payload(view_selection: ViewSelection | None) -> JSONObject | None:
|
|
3145
|
+
if view_selection is None:
|
|
3146
|
+
return None
|
|
3147
|
+
return {
|
|
3148
|
+
"view_key": view_selection.view_key,
|
|
3149
|
+
"view_name": view_selection.view_name,
|
|
3150
|
+
"local_filtering": bool(view_selection.conditions),
|
|
3151
|
+
"condition_group_count": len(view_selection.conditions),
|
|
3152
|
+
}
|
|
3153
|
+
|
|
3154
|
+
|
|
3155
|
+
def _build_completeness(
|
|
3156
|
+
*,
|
|
3157
|
+
result_amount: int,
|
|
3158
|
+
returned_items: int,
|
|
3159
|
+
fetched_pages: int,
|
|
3160
|
+
requested_pages: int,
|
|
3161
|
+
has_more: bool,
|
|
3162
|
+
next_page_token: str | None,
|
|
3163
|
+
is_complete: bool,
|
|
3164
|
+
omitted_items: int,
|
|
3165
|
+
extra: JSONObject,
|
|
3166
|
+
) -> JSONObject:
|
|
3167
|
+
payload: JSONObject = {
|
|
3168
|
+
"result_amount": result_amount,
|
|
3169
|
+
"returned_items": returned_items,
|
|
3170
|
+
"fetched_pages": fetched_pages,
|
|
3171
|
+
"requested_pages": requested_pages,
|
|
3172
|
+
"actual_scanned_pages": fetched_pages,
|
|
3173
|
+
"has_more": has_more,
|
|
3174
|
+
"next_page_token": next_page_token,
|
|
3175
|
+
"is_complete": is_complete,
|
|
3176
|
+
"partial": not is_complete,
|
|
3177
|
+
"omitted_items": omitted_items,
|
|
3178
|
+
"omitted_chars": 0,
|
|
3179
|
+
}
|
|
3180
|
+
payload.update(extra)
|
|
3181
|
+
return payload
|
|
3182
|
+
|
|
3183
|
+
|
|
3184
|
+
def _page_has_more(page: JSONObject, current_page: int, page_size: int, returned_rows: int) -> bool:
|
|
3185
|
+
page_amount = _coerce_count(page.get("pageAmount"))
|
|
3186
|
+
if page_amount is not None:
|
|
3187
|
+
return current_page < page_amount
|
|
3188
|
+
return returned_rows >= page_size
|
|
3189
|
+
|
|
3190
|
+
|
|
3191
|
+
def _effective_total(page: JSONObject, page_size: int) -> int:
|
|
3192
|
+
rows = page.get("list")
|
|
3193
|
+
returned_rows = len(rows) if isinstance(rows, list) else 0
|
|
3194
|
+
reported = _coerce_count(page.get("total"))
|
|
3195
|
+
if reported is None:
|
|
3196
|
+
reported = _coerce_count(page.get("count"))
|
|
3197
|
+
if reported is not None:
|
|
3198
|
+
return max(reported, returned_rows)
|
|
3199
|
+
page_amount = _coerce_count(page.get("pageAmount"))
|
|
3200
|
+
if page_amount is not None:
|
|
3201
|
+
return page_amount * page_size
|
|
3202
|
+
return returned_rows
|
|
3203
|
+
|
|
3204
|
+
|
|
3205
|
+
def _pick_title_field(index: FieldIndex) -> FormField | None:
|
|
3206
|
+
priority_keywords = ("名称", "标题", "主题", "name", "title", "subject")
|
|
3207
|
+
for field in index.by_id.values():
|
|
3208
|
+
lowered = field.que_title.lower()
|
|
3209
|
+
if any(keyword in lowered for keyword in priority_keywords):
|
|
3210
|
+
return field
|
|
3211
|
+
return next(iter(index.by_id.values()), None)
|
|
3212
|
+
|
|
3213
|
+
|
|
3214
|
+
def _normalize_metrics(metrics: list[str], *, include_sum: bool) -> list[str]:
|
|
3215
|
+
normalized = [item for item in metrics if item in {"count", "sum", "avg", "min", "max"}]
|
|
3216
|
+
if not normalized:
|
|
3217
|
+
return ["count", "sum"] if include_sum else ["count"]
|
|
3218
|
+
if not include_sum:
|
|
3219
|
+
return [item for item in normalized if item == "count"] or ["count"]
|
|
3220
|
+
return normalized
|
|
3221
|
+
|
|
3222
|
+
|
|
3223
|
+
def _coerce_count(value: JSONValue) -> int | None:
|
|
3224
|
+
if isinstance(value, bool) or value is None:
|
|
3225
|
+
return None
|
|
3226
|
+
if isinstance(value, int):
|
|
3227
|
+
return value
|
|
3228
|
+
if isinstance(value, float):
|
|
3229
|
+
return int(value)
|
|
3230
|
+
if isinstance(value, str):
|
|
3231
|
+
text = value.strip()
|
|
3232
|
+
if not text:
|
|
3233
|
+
return None
|
|
3234
|
+
try:
|
|
3235
|
+
return int(text)
|
|
3236
|
+
except ValueError:
|
|
3237
|
+
return None
|
|
3238
|
+
return None
|
|
3239
|
+
|
|
3240
|
+
|
|
3241
|
+
def _coerce_amount(value: JSONValue) -> float | None:
|
|
3242
|
+
if isinstance(value, bool) or value is None:
|
|
3243
|
+
return None
|
|
3244
|
+
if isinstance(value, (int, float)):
|
|
3245
|
+
return float(value)
|
|
3246
|
+
if isinstance(value, str):
|
|
3247
|
+
text = value.strip()
|
|
3248
|
+
if not text:
|
|
3249
|
+
return None
|
|
3250
|
+
negative = False
|
|
3251
|
+
if text.startswith("(") and text.endswith(")"):
|
|
3252
|
+
negative = True
|
|
3253
|
+
text = text[1:-1].strip()
|
|
3254
|
+
for symbol in ("¥", "¥", "$"):
|
|
3255
|
+
text = text.replace(symbol, "")
|
|
3256
|
+
text = text.replace(",", "").replace(" ", "")
|
|
3257
|
+
if negative and text and not text.startswith("-"):
|
|
3258
|
+
text = f"-{text}"
|
|
3259
|
+
try:
|
|
3260
|
+
return float(text)
|
|
3261
|
+
except ValueError:
|
|
3262
|
+
return None
|
|
3263
|
+
return None
|
|
3264
|
+
|
|
3265
|
+
|
|
3266
|
+
def _to_time_bucket(value: JSONValue, bucket: str) -> str:
|
|
3267
|
+
text = _stringify_json(value).strip()
|
|
3268
|
+
if not text:
|
|
3269
|
+
return "unknown"
|
|
3270
|
+
parsed = _parse_datetime_like(text)
|
|
3271
|
+
if parsed is None:
|
|
3272
|
+
return text[:10] if bucket == "day" and len(text) >= 10 else text
|
|
3273
|
+
if bucket == "month":
|
|
3274
|
+
return parsed.strftime("%Y-%m")
|
|
3275
|
+
if bucket == "week":
|
|
3276
|
+
iso_year, iso_week, _ = parsed.isocalendar()
|
|
3277
|
+
return f"{iso_year}-W{iso_week:02d}"
|
|
3278
|
+
return parsed.strftime("%Y-%m-%d")
|
|
3279
|
+
|
|
3280
|
+
|
|
3281
|
+
def _parse_datetime_like(text: str) -> datetime | None:
|
|
3282
|
+
cleaned = text.strip().replace("/", "-")
|
|
3283
|
+
for candidate in (cleaned, cleaned.replace(" ", "T")):
|
|
3284
|
+
try:
|
|
3285
|
+
return datetime.fromisoformat(candidate)
|
|
3286
|
+
except ValueError:
|
|
3287
|
+
continue
|
|
3288
|
+
return None
|
|
3289
|
+
|
|
3290
|
+
|
|
3291
|
+
def _echo_filters(filters: list[JSONObject]) -> list[JSONObject]:
|
|
3292
|
+
return [dict(item) for item in filters]
|
|
3293
|
+
|
|
3294
|
+
|
|
3295
|
+
def _extract_filter_selector(item: JSONObject) -> JSONValue:
|
|
3296
|
+
for key in ("que_id", "queId", "field", "field_id", "fieldId", "column", "queTitle", "que_title"):
|
|
3297
|
+
if key in item:
|
|
3298
|
+
return item[key]
|
|
3299
|
+
return None
|
|
3300
|
+
|
|
3301
|
+
|
|
3302
|
+
def _extract_sort_selector(item: JSONObject) -> JSONValue:
|
|
3303
|
+
for key in ("que_id", "queId", "field", "field_id", "fieldId", "column", "queTitle", "que_title"):
|
|
3304
|
+
if key in item:
|
|
3305
|
+
return item[key]
|
|
3306
|
+
return None
|
|
3307
|
+
|
|
3308
|
+
|
|
3309
|
+
def _resolve_sort_ascend(item: JSONObject) -> bool:
|
|
3310
|
+
if "isAscend" in item:
|
|
3311
|
+
return bool(item["isAscend"])
|
|
3312
|
+
if "ascend" in item:
|
|
3313
|
+
return bool(item["ascend"])
|
|
3314
|
+
direction = _stringify_json(item.get("direction", item.get("order"))).strip().lower()
|
|
3315
|
+
return direction not in {"desc", "descending", "-1"}
|
|
3316
|
+
|
|
3317
|
+
|
|
3318
|
+
def _coerce_filter_range(value: JSONValue) -> tuple[str | None, str | None]:
|
|
3319
|
+
if isinstance(value, dict):
|
|
3320
|
+
lower = value.get("from", value.get("min", value.get("start")))
|
|
3321
|
+
upper = value.get("to", value.get("max", value.get("end")))
|
|
3322
|
+
return _stringify_json(lower) if lower is not None else None, _stringify_json(upper) if upper is not None else None
|
|
3323
|
+
if isinstance(value, list) and value:
|
|
3324
|
+
lower = value[0] if len(value) >= 1 else None
|
|
3325
|
+
upper = value[1] if len(value) >= 2 else None
|
|
3326
|
+
return _stringify_json(lower) if lower is not None else None, _stringify_json(upper) if upper is not None else None
|
|
3327
|
+
return None, None
|
|
3328
|
+
|
|
3329
|
+
|
|
3330
|
+
def _normalize_filter_values(value: JSONValue) -> list[str]:
|
|
3331
|
+
if isinstance(value, list):
|
|
3332
|
+
return [_stringify_json(item) for item in value]
|
|
3333
|
+
return [_stringify_json(value)]
|
|
3334
|
+
|
|
3335
|
+
|
|
3336
|
+
def _normalize_optional_text(value: JSONValue) -> str | None:
|
|
3337
|
+
if isinstance(value, str):
|
|
3338
|
+
text = value.strip()
|
|
3339
|
+
return text or None
|
|
3340
|
+
if isinstance(value, (int, float)):
|
|
3341
|
+
text = str(value).strip()
|
|
3342
|
+
return text or None
|
|
3343
|
+
return None
|
|
3344
|
+
|
|
3345
|
+
|
|
3346
|
+
def _match_view_condition(
|
|
3347
|
+
answer_list: list[JSONValue],
|
|
3348
|
+
condition: ViewFilterCondition,
|
|
3349
|
+
*,
|
|
3350
|
+
dept_member_cache: dict[int, set[int]],
|
|
3351
|
+
dept_member_resolver,
|
|
3352
|
+
) -> bool: # type: ignore[no-untyped-def]
|
|
3353
|
+
answer = _find_answer_for_condition(answer_list, condition)
|
|
3354
|
+
if answer is None:
|
|
3355
|
+
return False
|
|
3356
|
+
values = answer.get("values")
|
|
3357
|
+
answer_values = values if isinstance(values, list) else []
|
|
3358
|
+
if not answer_values:
|
|
3359
|
+
return False
|
|
3360
|
+
if condition.que_type in MEMBER_QUE_TYPES:
|
|
3361
|
+
dept_ids = _extract_condition_dept_ids(condition)
|
|
3362
|
+
if dept_ids:
|
|
3363
|
+
allowed_ids: set[int] = set()
|
|
3364
|
+
for dept_id in dept_ids:
|
|
3365
|
+
if dept_id not in dept_member_cache:
|
|
3366
|
+
dept_member_cache[dept_id] = set(dept_member_resolver(dept_id))
|
|
3367
|
+
allowed_ids.update(dept_member_cache[dept_id])
|
|
3368
|
+
return any(
|
|
3369
|
+
member_id in allowed_ids
|
|
3370
|
+
for member_id in (_coerce_count(item.get("id")) if isinstance(item, dict) else None for item in answer_values)
|
|
3371
|
+
if member_id is not None
|
|
3372
|
+
)
|
|
3373
|
+
condition_ids = _extract_condition_ids(condition)
|
|
3374
|
+
if condition_ids:
|
|
3375
|
+
answer_ids = [
|
|
3376
|
+
member_id
|
|
3377
|
+
for member_id in (_coerce_count(item.get("id")) if isinstance(item, dict) else None for item in answer_values)
|
|
3378
|
+
if member_id is not None
|
|
3379
|
+
]
|
|
3380
|
+
if any(answer_id in condition_ids for answer_id in answer_ids):
|
|
3381
|
+
return True
|
|
3382
|
+
condition_texts = _extract_condition_texts(condition)
|
|
3383
|
+
if not condition_texts:
|
|
3384
|
+
return False
|
|
3385
|
+
answer_texts = [_normalize_answer_value_text(item) for item in answer_values]
|
|
3386
|
+
return any(answer_text in condition_texts for answer_text in answer_texts if answer_text)
|
|
3387
|
+
|
|
3388
|
+
|
|
3389
|
+
def _find_answer_for_condition(answer_list: list[JSONValue], condition: ViewFilterCondition) -> JSONObject | None:
|
|
3390
|
+
target_title = condition.que_title.strip()
|
|
3391
|
+
for answer in answer_list:
|
|
3392
|
+
if not isinstance(answer, dict):
|
|
3393
|
+
continue
|
|
3394
|
+
answer_que_id = _coerce_count(answer.get("queId"))
|
|
3395
|
+
if condition.que_id is not None and answer_que_id == condition.que_id:
|
|
3396
|
+
return answer
|
|
3397
|
+
answer_title = _stringify_json(answer.get("queTitle")).strip()
|
|
3398
|
+
if target_title and answer_title == target_title:
|
|
3399
|
+
return answer
|
|
3400
|
+
return None
|
|
3401
|
+
|
|
3402
|
+
|
|
3403
|
+
def _extract_condition_dept_ids(condition: ViewFilterCondition) -> list[int]:
|
|
3404
|
+
dept_ids: list[int] = []
|
|
3405
|
+
for raw in condition.judge_values:
|
|
3406
|
+
if raw.startswith(DEPARTMENT_MEMBER_JUDGE_PREFIX):
|
|
3407
|
+
dept_id = _coerce_count(raw[len(DEPARTMENT_MEMBER_JUDGE_PREFIX):])
|
|
3408
|
+
if dept_id is not None:
|
|
3409
|
+
dept_ids.append(dept_id)
|
|
3410
|
+
return dept_ids
|
|
3411
|
+
|
|
3412
|
+
|
|
3413
|
+
def _extract_condition_ids(condition: ViewFilterCondition) -> set[int]:
|
|
3414
|
+
ids: set[int] = set()
|
|
3415
|
+
for raw in condition.judge_values:
|
|
3416
|
+
if raw.startswith(DEPARTMENT_MEMBER_JUDGE_PREFIX):
|
|
3417
|
+
continue
|
|
3418
|
+
value = _coerce_count(raw)
|
|
3419
|
+
if value is not None:
|
|
3420
|
+
ids.add(value)
|
|
3421
|
+
for item in condition.judge_value_details:
|
|
3422
|
+
value = _coerce_count(item.get("id", item.get("uid")))
|
|
3423
|
+
if value is not None:
|
|
3424
|
+
ids.add(value)
|
|
3425
|
+
return ids
|
|
3426
|
+
|
|
3427
|
+
|
|
3428
|
+
def _extract_condition_texts(condition: ViewFilterCondition) -> set[str]:
|
|
3429
|
+
values: set[str] = set()
|
|
3430
|
+
for raw in condition.judge_values:
|
|
3431
|
+
if raw.startswith(DEPARTMENT_MEMBER_JUDGE_PREFIX):
|
|
3432
|
+
continue
|
|
3433
|
+
normalized = raw.strip()
|
|
3434
|
+
if normalized:
|
|
3435
|
+
values.add(normalized)
|
|
3436
|
+
for item in condition.judge_value_details:
|
|
3437
|
+
for key in ("value", "dataValue", "name"):
|
|
3438
|
+
normalized = _normalize_optional_text(item.get(key))
|
|
3439
|
+
if normalized:
|
|
3440
|
+
values.add(normalized)
|
|
3441
|
+
return values
|
|
3442
|
+
|
|
3443
|
+
|
|
3444
|
+
def _normalize_answer_value_text(value: JSONValue) -> str | None:
|
|
3445
|
+
if isinstance(value, dict):
|
|
3446
|
+
for key in ("value", "dataValue", "name", "email"):
|
|
3447
|
+
normalized = _normalize_optional_text(value.get(key))
|
|
3448
|
+
if normalized:
|
|
3449
|
+
return normalized
|
|
3450
|
+
value_id = _coerce_count(value.get("id", value.get("uid")))
|
|
3451
|
+
return str(value_id) if value_id is not None else None
|
|
3452
|
+
return _normalize_optional_text(value)
|
|
3453
|
+
|
|
3454
|
+
|
|
3455
|
+
def _normalize_list_query_rules(item: JSONObject) -> list[JSONObject]:
|
|
3456
|
+
if _looks_like_backend_match_rule(item):
|
|
3457
|
+
rule = _match_rule_to_query_rule(item)
|
|
3458
|
+
return [rule] if rule else []
|
|
3459
|
+
que_id = item.get("queId", item.get("que_id"))
|
|
3460
|
+
que_type = _coerce_count(item.get("queType", item.get("que_type")))
|
|
3461
|
+
if que_type in DEPARTMENT_QUE_TYPES and _department_filter_details_from_item(item):
|
|
3462
|
+
return []
|
|
3463
|
+
rule: JSONObject = {}
|
|
3464
|
+
if que_id is not None:
|
|
3465
|
+
rule["queId"] = que_id
|
|
3466
|
+
if que_type is not None:
|
|
3467
|
+
rule["queType"] = que_type
|
|
3468
|
+
for source, target in (
|
|
3469
|
+
("searchKey", "searchKey"),
|
|
3470
|
+
("search_key", "searchKey"),
|
|
3471
|
+
("searchKeys", "searchKeys"),
|
|
3472
|
+
("search_keys", "searchKeys"),
|
|
3473
|
+
("minValue", "minValue"),
|
|
3474
|
+
("min_value", "minValue"),
|
|
3475
|
+
("maxValue", "maxValue"),
|
|
3476
|
+
("max_value", "maxValue"),
|
|
3477
|
+
("scope", "scope"),
|
|
3478
|
+
("searchOptions", "searchOptions"),
|
|
3479
|
+
("search_options", "searchOptions"),
|
|
3480
|
+
("searchUserIds", "searchUserIds"),
|
|
3481
|
+
("search_user_ids", "searchUserIds"),
|
|
3482
|
+
("searchUids", "searchUids"),
|
|
3483
|
+
("search_uids", "searchUids"),
|
|
3484
|
+
):
|
|
3485
|
+
if source in item:
|
|
3486
|
+
rule[target] = item[source]
|
|
3487
|
+
return [rule] if rule else []
|
|
3488
|
+
|
|
3489
|
+
|
|
3490
|
+
def _normalize_list_match_rules(item: JSONObject) -> list[JSONObject]:
|
|
3491
|
+
if _looks_like_backend_match_rule(item):
|
|
3492
|
+
normalized = _normalize_backend_match_rule(item)
|
|
3493
|
+
return [normalized] if normalized else []
|
|
3494
|
+
que_id = _coerce_count(item.get("queId", item.get("que_id")))
|
|
3495
|
+
que_type = _coerce_count(item.get("queType", item.get("que_type")))
|
|
3496
|
+
if que_id is None:
|
|
3497
|
+
return []
|
|
3498
|
+
if que_type in DEPARTMENT_QUE_TYPES:
|
|
3499
|
+
details = _department_filter_details_from_item(item)
|
|
3500
|
+
if details:
|
|
3501
|
+
judge_type = JUDGE_EQUAL if len(details) == 1 else JUDGE_EQUAL_ANY
|
|
3502
|
+
return [_department_filter_rule(que_id, que_type, details, judge_type=judge_type)]
|
|
3503
|
+
min_value = item.get("minValue", item.get("min_value"))
|
|
3504
|
+
max_value = item.get("maxValue", item.get("max_value"))
|
|
3505
|
+
if que_type in DATE_QUE_TYPES and (min_value is not None or max_value is not None):
|
|
3506
|
+
return []
|
|
3507
|
+
match_rules: list[JSONObject] = []
|
|
3508
|
+
scope = _coerce_count(item.get("scope"))
|
|
3509
|
+
if scope is not None and scope != SCOPE_ALL:
|
|
3510
|
+
match_rule: JSONObject = {
|
|
3511
|
+
"queId": que_id,
|
|
3512
|
+
"judgeType": JUDGE_UNEQUAL if scope == SCOPE_NOT_EMPTY else JUDGE_EQUAL,
|
|
3513
|
+
"judgeValues": [],
|
|
3514
|
+
"matchType": MATCH_TYPE_ACCURACY,
|
|
3515
|
+
}
|
|
3516
|
+
if que_type is not None:
|
|
3517
|
+
match_rule["queType"] = que_type
|
|
3518
|
+
match_rules.append(match_rule)
|
|
3519
|
+
search_key = _normalize_optional_text(item.get("searchKey", item.get("search_key")))
|
|
3520
|
+
if search_key is not None:
|
|
3521
|
+
match_rule = {
|
|
3522
|
+
"queId": que_id,
|
|
3523
|
+
"judgeType": JUDGE_FUZZY_MATCH,
|
|
3524
|
+
"judgeValues": [search_key],
|
|
3525
|
+
"matchType": MATCH_TYPE_ACCURACY,
|
|
3526
|
+
}
|
|
3527
|
+
if que_type is not None:
|
|
3528
|
+
match_rule["queType"] = que_type
|
|
3529
|
+
match_rules.append(match_rule)
|
|
3530
|
+
search_keys = item.get("searchKeys", item.get("search_keys"))
|
|
3531
|
+
normalized_search_keys = [_stringify_json(entry) for entry in search_keys] if isinstance(search_keys, list) else []
|
|
3532
|
+
if normalized_search_keys:
|
|
3533
|
+
judge_type = JUDGE_INCLUDE_ANY if que_type in MULTI_SELECT_QUE_TYPES | MEMBER_QUE_TYPES else JUDGE_EQUAL_ANY
|
|
3534
|
+
match_rule = {
|
|
3535
|
+
"queId": que_id,
|
|
3536
|
+
"judgeType": judge_type,
|
|
3537
|
+
"judgeValues": normalized_search_keys,
|
|
3538
|
+
"matchType": MATCH_TYPE_ACCURACY,
|
|
3539
|
+
}
|
|
3540
|
+
if que_type is not None:
|
|
3541
|
+
match_rule["queType"] = que_type
|
|
3542
|
+
match_rules.append(match_rule)
|
|
3543
|
+
if min_value is not None:
|
|
3544
|
+
match_rule = {
|
|
3545
|
+
"queId": que_id,
|
|
3546
|
+
"judgeType": JUDGE_GREATER_OR_EQUAL,
|
|
3547
|
+
"judgeValues": [_stringify_json(min_value)],
|
|
3548
|
+
"matchType": MATCH_TYPE_ACCURACY,
|
|
3549
|
+
}
|
|
3550
|
+
if que_type is not None:
|
|
3551
|
+
match_rule["queType"] = que_type
|
|
3552
|
+
match_rules.append(match_rule)
|
|
3553
|
+
if max_value is not None:
|
|
3554
|
+
match_rule = {
|
|
3555
|
+
"queId": que_id,
|
|
3556
|
+
"judgeType": JUDGE_LESS_OR_EQUAL,
|
|
3557
|
+
"judgeValues": [_stringify_json(max_value)],
|
|
3558
|
+
"matchType": MATCH_TYPE_ACCURACY,
|
|
3559
|
+
}
|
|
3560
|
+
if que_type is not None:
|
|
3561
|
+
match_rule["queType"] = que_type
|
|
3562
|
+
match_rules.append(match_rule)
|
|
3563
|
+
search_options = item.get("searchOptions", item.get("search_options"))
|
|
3564
|
+
normalized_search_options = [_stringify_json(entry) for entry in search_options] if isinstance(search_options, list) else []
|
|
3565
|
+
if normalized_search_options:
|
|
3566
|
+
judge_type = JUDGE_INCLUDE_ANY if que_type in MULTI_SELECT_QUE_TYPES else JUDGE_EQUAL_ANY
|
|
3567
|
+
match_rule = {
|
|
3568
|
+
"queId": que_id,
|
|
3569
|
+
"judgeType": judge_type,
|
|
3570
|
+
"judgeValues": normalized_search_options,
|
|
3571
|
+
"matchType": MATCH_TYPE_ACCURACY,
|
|
3572
|
+
}
|
|
3573
|
+
if que_type is not None:
|
|
3574
|
+
match_rule["queType"] = que_type
|
|
3575
|
+
match_rules.append(match_rule)
|
|
3576
|
+
search_uids = item.get("searchUids", item.get("search_uids", item.get("searchUserIds", item.get("search_user_ids"))))
|
|
3577
|
+
normalized_search_uids = [str(member_id) for member_id in _normalize_member_filter_ids(search_uids)] if search_uids is not None else []
|
|
3578
|
+
if normalized_search_uids:
|
|
3579
|
+
match_rule = {
|
|
3580
|
+
"queId": que_id,
|
|
3581
|
+
"judgeType": JUDGE_INCLUDE_ANY,
|
|
3582
|
+
"judgeValues": normalized_search_uids,
|
|
3583
|
+
"matchType": MATCH_TYPE_ACCURACY,
|
|
3584
|
+
}
|
|
3585
|
+
if que_type is not None:
|
|
3586
|
+
match_rule["queType"] = que_type
|
|
3587
|
+
match_rules.append(match_rule)
|
|
3588
|
+
return match_rules
|
|
3589
|
+
|
|
3590
|
+
|
|
3591
|
+
def _looks_like_backend_match_rule(item: JSONObject) -> bool:
|
|
3592
|
+
return any(key in item for key in ("judgeType", "judge_type", "judgeValues", "judge_values", "judgeValueDetails", "judge_value_details"))
|
|
3593
|
+
|
|
3594
|
+
|
|
3595
|
+
def _normalize_backend_match_rule(item: JSONObject) -> JSONObject:
|
|
3596
|
+
que_id = _coerce_count(item.get("queId", item.get("que_id")))
|
|
3597
|
+
if que_id is None:
|
|
3598
|
+
return {}
|
|
3599
|
+
rule: JSONObject = {"queId": que_id}
|
|
3600
|
+
que_title = _normalize_optional_text(item.get("queTitle", item.get("que_title")))
|
|
3601
|
+
if que_title is not None:
|
|
3602
|
+
rule["queTitle"] = que_title
|
|
3603
|
+
que_type = _coerce_count(item.get("queType", item.get("que_type")))
|
|
3604
|
+
if que_type is not None:
|
|
3605
|
+
rule["queType"] = que_type
|
|
3606
|
+
judge_type = _coerce_count(item.get("judgeType", item.get("judge_type")))
|
|
3607
|
+
if judge_type is not None:
|
|
3608
|
+
rule["judgeType"] = judge_type
|
|
3609
|
+
judge_values = item.get("judgeValues", item.get("judge_values"))
|
|
3610
|
+
if isinstance(judge_values, list):
|
|
3611
|
+
rule["judgeValues"] = [_stringify_json(value) for value in judge_values]
|
|
3612
|
+
judge_value_details = item.get("judgeValueDetails", item.get("judge_value_details"))
|
|
3613
|
+
if isinstance(judge_value_details, list):
|
|
3614
|
+
rule["judgeValueDetails"] = [value for value in judge_value_details if isinstance(value, dict)]
|
|
3615
|
+
control_value = _coerce_count(item.get("controlValue", item.get("control_value")))
|
|
3616
|
+
if control_value is not None:
|
|
3617
|
+
rule["controlValue"] = control_value
|
|
3618
|
+
control_time_start_value = item.get("controlTimeStartValue", item.get("control_time_start_value"))
|
|
3619
|
+
if control_time_start_value is not None:
|
|
3620
|
+
rule["controlTimeStartValue"] = _stringify_json(control_time_start_value)
|
|
3621
|
+
control_time_end_value = item.get("controlTimeEndValue", item.get("control_time_end_value"))
|
|
3622
|
+
if control_time_end_value is not None:
|
|
3623
|
+
rule["controlTimeEndValue"] = _stringify_json(control_time_end_value)
|
|
3624
|
+
match_type = _coerce_count(item.get("matchType", item.get("match_type")))
|
|
3625
|
+
rule["matchType"] = match_type if match_type is not None else MATCH_TYPE_ACCURACY
|
|
3626
|
+
return rule
|
|
3627
|
+
|
|
3628
|
+
|
|
3629
|
+
def _department_filter_rule(
|
|
3630
|
+
que_id: int,
|
|
3631
|
+
que_type: int | None,
|
|
3632
|
+
details: list[JSONObject],
|
|
3633
|
+
*,
|
|
3634
|
+
judge_type: int,
|
|
3635
|
+
) -> JSONObject:
|
|
3636
|
+
rule: JSONObject = {
|
|
3637
|
+
"queId": que_id,
|
|
3638
|
+
"judgeType": judge_type,
|
|
3639
|
+
"judgeValues": [str(detail["id"]) for detail in details if detail.get("id") is not None],
|
|
3640
|
+
"judgeValueDetails": [
|
|
3641
|
+
{"id": detail["id"], "value": _stringify_json(detail.get("value", detail["id"]))}
|
|
3642
|
+
for detail in details
|
|
3643
|
+
if detail.get("id") is not None
|
|
3644
|
+
],
|
|
3645
|
+
"matchType": MATCH_TYPE_ACCURACY,
|
|
3646
|
+
}
|
|
3647
|
+
if que_type is not None:
|
|
3648
|
+
rule["queType"] = que_type
|
|
3649
|
+
return rule
|
|
3650
|
+
|
|
3651
|
+
|
|
3652
|
+
def _department_filter_details_from_item(item: JSONObject) -> list[JSONObject]:
|
|
3653
|
+
values: list[JSONValue] = []
|
|
3654
|
+
for key in ("searchOptions", "search_options", "searchKeys", "search_keys", "searchKey", "search_key", "value", "values"):
|
|
3655
|
+
if key in item:
|
|
3656
|
+
raw = item[key]
|
|
3657
|
+
values = raw if isinstance(raw, list) else [raw]
|
|
3658
|
+
break
|
|
3659
|
+
details: list[JSONObject] = []
|
|
3660
|
+
seen: set[int] = set()
|
|
3661
|
+
for value in _expand_values(values):
|
|
3662
|
+
detail = _department_filter_detail_from_value(value)
|
|
3663
|
+
if detail is None:
|
|
3664
|
+
continue
|
|
3665
|
+
dept_id = _coerce_count(detail.get("id"))
|
|
3666
|
+
if dept_id is None or dept_id in seen:
|
|
3667
|
+
continue
|
|
3668
|
+
seen.add(dept_id)
|
|
3669
|
+
details.append(detail)
|
|
3670
|
+
return details
|
|
3671
|
+
|
|
3672
|
+
|
|
3673
|
+
def _department_filter_detail_from_value(value: JSONValue) -> JSONObject | None:
|
|
3674
|
+
if isinstance(value, dict):
|
|
3675
|
+
dept_id = _coerce_count(value.get("id", value.get("deptId")))
|
|
3676
|
+
if dept_id is None:
|
|
3677
|
+
return None
|
|
3678
|
+
return {"id": dept_id, "value": _stringify_json(value.get("value", value.get("name", value.get("deptName", dept_id))))}
|
|
3679
|
+
dept_id = _coerce_count(value)
|
|
3680
|
+
if dept_id is None:
|
|
3681
|
+
return None
|
|
3682
|
+
return {"id": dept_id, "value": str(dept_id)}
|
|
3683
|
+
|
|
3684
|
+
|
|
3685
|
+
def _match_rule_to_query_rule(item: JSONObject) -> JSONObject:
|
|
3686
|
+
que_id = _coerce_count(item.get("queId", item.get("que_id")))
|
|
3687
|
+
if que_id is None:
|
|
3688
|
+
return {}
|
|
3689
|
+
que_type = _coerce_count(item.get("queType", item.get("que_type")))
|
|
3690
|
+
judge_type = _coerce_count(item.get("judgeType", item.get("judge_type")))
|
|
3691
|
+
judge_values = item.get("judgeValues", item.get("judge_values"))
|
|
3692
|
+
values = [_stringify_json(value) for value in judge_values] if isinstance(judge_values, list) else []
|
|
3693
|
+
rule: JSONObject = {"queId": que_id}
|
|
3694
|
+
if que_type is not None:
|
|
3695
|
+
rule["queType"] = que_type
|
|
3696
|
+
if judge_type == JUDGE_FUZZY_MATCH and values:
|
|
3697
|
+
rule["searchKey"] = values[0]
|
|
3698
|
+
elif judge_type == JUDGE_GREATER_OR_EQUAL and values:
|
|
3699
|
+
rule["minValue"] = values[0]
|
|
3700
|
+
elif judge_type == JUDGE_LESS_OR_EQUAL and values:
|
|
3701
|
+
rule["maxValue"] = values[0]
|
|
3702
|
+
elif judge_type == JUDGE_EQUAL_ANY and values:
|
|
3703
|
+
if que_type in SINGLE_SELECT_QUE_TYPES | MULTI_SELECT_QUE_TYPES | DEPARTMENT_QUE_TYPES:
|
|
3704
|
+
rule["searchOptions"] = values
|
|
3705
|
+
else:
|
|
3706
|
+
rule["searchKeys"] = values
|
|
3707
|
+
elif judge_type == JUDGE_INCLUDE_ANY and values:
|
|
3708
|
+
if que_type in MEMBER_QUE_TYPES:
|
|
3709
|
+
numeric_ids = [member_id for member_id in (_coerce_count(value) for value in values) if member_id is not None]
|
|
3710
|
+
if numeric_ids:
|
|
3711
|
+
rule["searchUids"] = numeric_ids
|
|
3712
|
+
else:
|
|
3713
|
+
rule["searchUserIds"] = values
|
|
3714
|
+
elif que_type in MULTI_SELECT_QUE_TYPES:
|
|
3715
|
+
rule["searchOptions"] = values
|
|
3716
|
+
else:
|
|
3717
|
+
rule["searchKeys"] = values
|
|
3718
|
+
elif judge_type == JUDGE_UNEQUAL and not values:
|
|
3719
|
+
rule["scope"] = SCOPE_NOT_EMPTY
|
|
3720
|
+
elif judge_type == JUDGE_EQUAL and not values:
|
|
3721
|
+
rule["scope"] = SCOPE_EMPTY
|
|
3722
|
+
query_keys = {"searchKey", "searchKeys", "minValue", "maxValue", "searchOptions", "searchUids", "searchUserIds", "scope"}
|
|
3723
|
+
return rule if any(key in rule for key in query_keys) else {}
|
|
3724
|
+
|
|
3725
|
+
|
|
3726
|
+
def _normalize_list_sort_rule(item: JSONObject) -> JSONObject:
|
|
3727
|
+
rule: JSONObject = {}
|
|
3728
|
+
que_id = item.get("queId", item.get("que_id"))
|
|
3729
|
+
if que_id is not None:
|
|
3730
|
+
rule["queId"] = que_id
|
|
3731
|
+
if "isAscend" in item:
|
|
3732
|
+
rule["isAscend"] = bool(item["isAscend"])
|
|
3733
|
+
elif "ascend" in item or "direction" in item or "order" in item:
|
|
3734
|
+
rule["isAscend"] = _resolve_sort_ascend(item)
|
|
3735
|
+
return rule
|
|
3736
|
+
|
|
3737
|
+
|
|
3738
|
+
def _normalize_member_filter_ids(value: JSONValue) -> list[int]:
|
|
3739
|
+
values = value if isinstance(value, list) else [value]
|
|
3740
|
+
member_ids: list[int] = []
|
|
3741
|
+
for item in values:
|
|
3742
|
+
if isinstance(item, dict):
|
|
3743
|
+
candidate = item.get("id", item.get("uid", item.get("userId")))
|
|
3744
|
+
else:
|
|
3745
|
+
candidate = item
|
|
3746
|
+
member_id = _coerce_count(candidate)
|
|
3747
|
+
if member_id is not None:
|
|
3748
|
+
member_ids.append(member_id)
|
|
3749
|
+
return member_ids
|
|
3750
|
+
|
|
3751
|
+
|
|
3752
|
+
def _normalize_option_filter_ids(value: JSONValue) -> list[int]:
|
|
3753
|
+
values = value if isinstance(value, list) else [value]
|
|
3754
|
+
option_ids: list[int] = []
|
|
3755
|
+
for item in values:
|
|
3756
|
+
candidate: JSONValue
|
|
3757
|
+
if isinstance(item, dict):
|
|
3758
|
+
candidate = item.get("optionId", item.get("optId", item.get("id")))
|
|
3759
|
+
else:
|
|
3760
|
+
candidate = item
|
|
3761
|
+
option_id = _coerce_count(candidate)
|
|
3762
|
+
if option_id is not None:
|
|
3763
|
+
option_ids.append(option_id)
|
|
3764
|
+
return option_ids
|
|
3765
|
+
|
|
3766
|
+
|
|
3767
|
+
def _collect_question_relations(schema: JSONObject) -> list[JSONObject]:
|
|
3768
|
+
relations = schema.get("questionRelations")
|
|
3769
|
+
if not isinstance(relations, list):
|
|
3770
|
+
return []
|
|
3771
|
+
return [item for item in relations if isinstance(item, dict)]
|
|
3772
|
+
|
|
3773
|
+
|
|
3774
|
+
def _collect_option_links(resolved_fields: list[JSONObject]) -> list[JSONObject]:
|
|
3775
|
+
links: list[JSONObject] = []
|
|
3776
|
+
for entry in resolved_fields:
|
|
3777
|
+
write_format = entry.get("write_format")
|
|
3778
|
+
if not isinstance(write_format, dict):
|
|
3779
|
+
continue
|
|
3780
|
+
option_links = write_format.get("option_links")
|
|
3781
|
+
if not isinstance(option_links, list):
|
|
3782
|
+
continue
|
|
3783
|
+
for item in option_links:
|
|
3784
|
+
if isinstance(item, dict):
|
|
3785
|
+
links.append(item)
|
|
3786
|
+
return links
|
|
3787
|
+
|
|
3788
|
+
|
|
3789
|
+
def _answers_need_resolution(answers: list[JSONObject]) -> bool:
|
|
3790
|
+
for item in answers:
|
|
3791
|
+
if not isinstance(item, dict):
|
|
3792
|
+
return True
|
|
3793
|
+
if "queId" not in item or "queType" not in item:
|
|
3794
|
+
return True
|
|
3795
|
+
if "values" not in item and "value" in item:
|
|
3796
|
+
return True
|
|
3797
|
+
if "queTitle" in item or "que_title" in item or "que_id" in item:
|
|
3798
|
+
return True
|
|
3799
|
+
if _subtable_answer_needs_resolution(item):
|
|
3800
|
+
return True
|
|
3801
|
+
return False
|
|
3802
|
+
|
|
3803
|
+
|
|
3804
|
+
def _subtable_answer_needs_resolution(item: JSONObject) -> bool:
|
|
3805
|
+
if any(key in item for key in ("rows", "fields")):
|
|
3806
|
+
return True
|
|
3807
|
+
que_type = _coerce_count(item.get("queType"))
|
|
3808
|
+
table_values = item.get("tableValues")
|
|
3809
|
+
if que_type not in SUBTABLE_QUE_TYPES:
|
|
3810
|
+
return False
|
|
3811
|
+
if table_values is None:
|
|
3812
|
+
return True
|
|
3813
|
+
if not isinstance(table_values, list):
|
|
3814
|
+
return True
|
|
3815
|
+
for row in table_values:
|
|
3816
|
+
if isinstance(row, dict):
|
|
3817
|
+
return True
|
|
3818
|
+
if not isinstance(row, list):
|
|
3819
|
+
return True
|
|
3820
|
+
for cell in row:
|
|
3821
|
+
if not isinstance(cell, dict):
|
|
3822
|
+
return True
|
|
3823
|
+
if "queId" not in cell or "queType" not in cell:
|
|
3824
|
+
return True
|
|
3825
|
+
if "values" not in cell and "value" in cell:
|
|
3826
|
+
return True
|
|
3827
|
+
if any(key in cell for key in ("queTitle", "que_title", "que_id", "fields", "answers", "rows")):
|
|
3828
|
+
return True
|
|
3829
|
+
return False
|
|
3830
|
+
|
|
3831
|
+
|
|
3832
|
+
def _expand_values(values: list[JSONValue]) -> list[JSONValue]:
|
|
3833
|
+
if len(values) == 1 and isinstance(values[0], list):
|
|
3834
|
+
nested = values[0]
|
|
3835
|
+
return [item for item in nested]
|
|
3836
|
+
return values
|
|
3837
|
+
|
|
3838
|
+
|
|
3839
|
+
def _option_value(raw_value: JSONValue, field: FormField) -> JSONObject:
|
|
3840
|
+
if isinstance(raw_value, dict):
|
|
3841
|
+
value = raw_value.get("value", raw_value.get("optValue"))
|
|
3842
|
+
payload: JSONObject = {"value": _stringify_json(value)}
|
|
3843
|
+
if "optionId" in raw_value:
|
|
3844
|
+
payload["optionId"] = raw_value["optionId"]
|
|
3845
|
+
return payload
|
|
3846
|
+
text = _stringify_json(raw_value)
|
|
3847
|
+
if field.options and text not in field.options:
|
|
3848
|
+
raise RecordInputError(
|
|
3849
|
+
message=f"field '{field.que_title}' uses unknown option '{text}'",
|
|
3850
|
+
error_code="OPTION_NOT_FOUND",
|
|
3851
|
+
fix_hint="Use record_field_resolve or inspect the form to confirm allowed option values.",
|
|
3852
|
+
details={"field": _field_ref_payload(field), "expected_format": _write_format_for_field(field), "received_value": raw_value},
|
|
3853
|
+
)
|
|
3854
|
+
return {"value": text}
|
|
3855
|
+
|
|
3856
|
+
|
|
3857
|
+
def _boolean_display(value: JSONValue) -> str:
|
|
3858
|
+
if isinstance(value, bool):
|
|
3859
|
+
return "是" if value else "否"
|
|
3860
|
+
text = _stringify_json(value).strip().lower()
|
|
3861
|
+
if text in {"true", "1", "yes", "y", "是"}:
|
|
3862
|
+
return "是"
|
|
3863
|
+
if text in {"false", "0", "no", "n", "否"}:
|
|
3864
|
+
return "否"
|
|
3865
|
+
return _stringify_json(value)
|
|
3866
|
+
|
|
3867
|
+
|
|
3868
|
+
def _member_value(profile: str, value: JSONValue) -> JSONObject:
|
|
3869
|
+
if isinstance(value, dict):
|
|
3870
|
+
member_id = _coerce_count(value.get("id", value.get("uid", value.get("userId"))))
|
|
3871
|
+
user_id = value.get("userId")
|
|
3872
|
+
if member_id is None and not (isinstance(user_id, str) and user_id.strip()):
|
|
3873
|
+
raise RecordInputError(
|
|
3874
|
+
message="member values require id, uid, or userId",
|
|
3875
|
+
error_code="INVALID_MEMBER_VALUE",
|
|
3876
|
+
fix_hint="Pass member values like {'id': 2, 'value': '张三'} or {'userId': 'u_123', 'userName': '张三'}.",
|
|
3877
|
+
details={"received_value": value, "location": profile},
|
|
3878
|
+
)
|
|
3879
|
+
payload: JSONObject = {
|
|
3880
|
+
"value": _stringify_json(value.get("value", value.get("name", value.get("userName", member_id if member_id is not None else user_id))))
|
|
3881
|
+
}
|
|
3882
|
+
if member_id is not None:
|
|
3883
|
+
payload["id"] = member_id
|
|
3884
|
+
elif isinstance(user_id, str) and user_id.strip():
|
|
3885
|
+
payload["userId"] = user_id.strip()
|
|
3886
|
+
if value.get("email") is not None:
|
|
3887
|
+
payload["email"] = value["email"]
|
|
3888
|
+
if value.get("otherInfo") is not None:
|
|
3889
|
+
payload["otherInfo"] = value["otherInfo"]
|
|
3890
|
+
return payload
|
|
3891
|
+
member_id = _coerce_count(value)
|
|
3892
|
+
if member_id is None and not (isinstance(value, str) and value.strip()):
|
|
3893
|
+
raise RecordInputError(
|
|
3894
|
+
message="member values require numeric ids, userIds, or member objects",
|
|
3895
|
+
error_code="INVALID_MEMBER_VALUE",
|
|
3896
|
+
fix_hint="Pass member ids like 2, userIds like 'u_123', or objects like {'id': 2, 'value': '张三'}.",
|
|
3897
|
+
details={"received_value": value, "location": profile},
|
|
3898
|
+
)
|
|
3899
|
+
if member_id is not None:
|
|
3900
|
+
return {"id": member_id, "value": str(member_id)}
|
|
3901
|
+
return {"userId": str(value).strip(), "value": str(value).strip()}
|
|
3902
|
+
|
|
3903
|
+
|
|
3904
|
+
def _department_value(value: JSONValue) -> JSONObject:
|
|
3905
|
+
if isinstance(value, dict):
|
|
3906
|
+
dept_id = _coerce_count(value.get("id", value.get("deptId")))
|
|
3907
|
+
if dept_id is None:
|
|
3908
|
+
raise RecordInputError(
|
|
3909
|
+
message="department values require id or deptId",
|
|
3910
|
+
error_code="INVALID_DEPARTMENT_VALUE",
|
|
3911
|
+
fix_hint="Pass department values like {'id': 11, 'value': '示例部门'} or {'deptId': 11, 'deptName': '示例部门'}.",
|
|
3912
|
+
details={"received_value": value},
|
|
3913
|
+
)
|
|
3914
|
+
return {"id": dept_id, "value": _stringify_json(value.get("value", value.get("name", value.get("deptName", dept_id))))}
|
|
3915
|
+
dept_id = _coerce_count(value)
|
|
3916
|
+
if dept_id is None:
|
|
3917
|
+
raise RecordInputError(
|
|
3918
|
+
message="department values require numeric ids or department objects",
|
|
3919
|
+
error_code="INVALID_DEPARTMENT_VALUE",
|
|
3920
|
+
fix_hint="Pass department ids like 11 or objects like {'id': 11, 'value': '示例部门'}.",
|
|
3921
|
+
details={"received_value": value},
|
|
3922
|
+
)
|
|
3923
|
+
return {"id": dept_id, "value": str(dept_id)}
|
|
3924
|
+
|
|
3925
|
+
|
|
3926
|
+
def _attachment_value(value: JSONValue) -> JSONObject:
|
|
3927
|
+
if isinstance(value, dict):
|
|
3928
|
+
if "value" not in value and "url" not in value:
|
|
3929
|
+
raise RecordInputError(
|
|
3930
|
+
message="attachment values require value/url",
|
|
3931
|
+
error_code="INVALID_ATTACHMENT_VALUE",
|
|
3932
|
+
fix_hint="Pass attachments like {'value': 'https://.../a.pdf', 'name': 'a.pdf'}.",
|
|
3933
|
+
details={"received_value": value},
|
|
3934
|
+
)
|
|
3935
|
+
payload: JSONObject = {"value": value.get("value", value.get("url"))}
|
|
3936
|
+
if value.get("name") is not None:
|
|
3937
|
+
payload["name"] = value["name"]
|
|
3938
|
+
return payload
|
|
3939
|
+
return {"value": _stringify_json(value)}
|
|
3940
|
+
|
|
3941
|
+
|
|
3942
|
+
def _relation_value(value: JSONValue) -> JSONObject:
|
|
3943
|
+
if isinstance(value, dict):
|
|
3944
|
+
apply_id = value.get("apply_id", value.get("applyId", value.get("value", value.get("id"))))
|
|
3945
|
+
if apply_id is None:
|
|
3946
|
+
raise RecordInputError(
|
|
3947
|
+
message="relation values require apply_id/applyId/value/id",
|
|
3948
|
+
error_code="INVALID_RELATION_VALUE",
|
|
3949
|
+
fix_hint="Pass relation values like {'apply_id': 5001} or numeric apply ids.",
|
|
3950
|
+
details={"received_value": value},
|
|
3951
|
+
)
|
|
3952
|
+
return {"value": _stringify_json(apply_id)}
|
|
3953
|
+
return {"value": _stringify_json(value)}
|
|
3954
|
+
|
|
3955
|
+
|
|
3956
|
+
def _field_ref_payload(field: FormField) -> JSONObject:
|
|
3957
|
+
payload = {"que_id": field.que_id, "que_title": field.que_title, "que_type": field.que_type}
|
|
3958
|
+
if field.aliases:
|
|
3959
|
+
payload["aliases"] = field.aliases[:8]
|
|
3960
|
+
return payload
|
|
3961
|
+
|
|
3962
|
+
|
|
3963
|
+
def _normalize_field_lookup_key(value: str | int | None) -> str:
|
|
3964
|
+
text = _stringify_json(value).strip().lower()
|
|
3965
|
+
if not text:
|
|
3966
|
+
return ""
|
|
3967
|
+
return FIELD_LOOKUP_STRIP_RE.sub("", text)
|
|
3968
|
+
|
|
3969
|
+
|
|
3970
|
+
def _field_alias_candidates(field: FormField) -> set[str]:
|
|
3971
|
+
title = field.que_title.strip()
|
|
3972
|
+
aliases = set(GENERIC_FIELD_ALIAS_OVERRIDES.get(title, []))
|
|
3973
|
+
for prefix in GENERIC_FIELD_PREFIX_ALIASES:
|
|
3974
|
+
if title.startswith(prefix) and len(title) > len(prefix) + 1:
|
|
3975
|
+
aliases.add(title[len(prefix):])
|
|
3976
|
+
if title.endswith("所在部门"):
|
|
3977
|
+
aliases.add("部门")
|
|
3978
|
+
aliases.add(f"{title.removesuffix('所在部门')}部门")
|
|
3979
|
+
if title.endswith("阶段"):
|
|
3980
|
+
aliases.add("阶段")
|
|
3981
|
+
if title.endswith("来源"):
|
|
3982
|
+
aliases.add("来源")
|
|
3983
|
+
if title.endswith("类型"):
|
|
3984
|
+
aliases.add("类型")
|
|
3985
|
+
if title.endswith("等级"):
|
|
3986
|
+
aliases.add("等级")
|
|
3987
|
+
if title.endswith("名称"):
|
|
3988
|
+
aliases.add("名称")
|
|
3989
|
+
if title.endswith("编号"):
|
|
3990
|
+
aliases.add("编号")
|
|
3991
|
+
if title.endswith("金额"):
|
|
3992
|
+
aliases.add("金额")
|
|
3993
|
+
if field.que_type in MEMBER_QUE_TYPES:
|
|
3994
|
+
if "负责人" in title:
|
|
3995
|
+
aliases.add("负责人")
|
|
3996
|
+
if field.que_type in DEPARTMENT_QUE_TYPES:
|
|
3997
|
+
aliases.add("部门")
|
|
3998
|
+
if field.que_type in DATE_QUE_TYPES:
|
|
3999
|
+
if title.endswith("时间"):
|
|
4000
|
+
aliases.add("时间")
|
|
4001
|
+
if title.endswith("日期"):
|
|
4002
|
+
aliases.add("日期")
|
|
4003
|
+
normalized_title = _normalize_field_lookup_key(title)
|
|
4004
|
+
return {
|
|
4005
|
+
alias.strip()
|
|
4006
|
+
for alias in aliases
|
|
4007
|
+
if alias and _normalize_field_lookup_key(alias) and _normalize_field_lookup_key(alias) != normalized_title
|
|
4008
|
+
}
|
|
4009
|
+
|
|
4010
|
+
|
|
4011
|
+
def _subtable_columns_for_field(field: FormField) -> list[JSONObject]:
|
|
4012
|
+
raw = field.raw if isinstance(field.raw, dict) else {}
|
|
4013
|
+
columns_source = raw.get("subQuestions")
|
|
4014
|
+
if isinstance(columns_source, list):
|
|
4015
|
+
flattened = _flatten_questions(columns_source)
|
|
4016
|
+
else:
|
|
4017
|
+
inner_questions = raw.get("innerQuestions")
|
|
4018
|
+
flattened = _flatten_questions(inner_questions) if isinstance(inner_questions, list) else []
|
|
4019
|
+
columns: list[JSONObject] = []
|
|
4020
|
+
for question in flattened:
|
|
4021
|
+
que_id = _coerce_count(question.get("queId"))
|
|
4022
|
+
que_title = _normalize_optional_text(question.get("queTitle"))
|
|
4023
|
+
if que_id is None or que_title is None:
|
|
4024
|
+
continue
|
|
4025
|
+
columns.append(
|
|
4026
|
+
{
|
|
4027
|
+
"que_id": que_id,
|
|
4028
|
+
"que_title": que_title,
|
|
4029
|
+
"que_type": _coerce_count(question.get("queType")),
|
|
4030
|
+
}
|
|
4031
|
+
)
|
|
4032
|
+
return columns
|
|
4033
|
+
|
|
4034
|
+
|
|
4035
|
+
def _write_support_payload(
|
|
4036
|
+
*,
|
|
4037
|
+
support_level: str,
|
|
4038
|
+
kind: str,
|
|
4039
|
+
examples: JSONValue | None = None,
|
|
4040
|
+
accepted_aliases: list[str] | None = None,
|
|
4041
|
+
options: list[str] | None = None,
|
|
4042
|
+
reason: str | None = None,
|
|
4043
|
+
fix_hint: str | None = None,
|
|
4044
|
+
required_presteps: list[str] | None = None,
|
|
4045
|
+
subfields: list[JSONObject] | None = None,
|
|
4046
|
+
row_shape: str | None = None,
|
|
4047
|
+
) -> JSONObject:
|
|
4048
|
+
payload: JSONObject = {
|
|
4049
|
+
"support_level": support_level,
|
|
4050
|
+
"kind": kind,
|
|
4051
|
+
}
|
|
4052
|
+
if examples is not None:
|
|
4053
|
+
payload["examples"] = examples
|
|
4054
|
+
if accepted_aliases:
|
|
4055
|
+
payload["accepted_aliases"] = accepted_aliases
|
|
4056
|
+
if options is not None:
|
|
4057
|
+
payload["options"] = options
|
|
4058
|
+
if reason is not None:
|
|
4059
|
+
payload["reason"] = reason
|
|
4060
|
+
if fix_hint is not None:
|
|
4061
|
+
payload["fix_hint"] = fix_hint
|
|
4062
|
+
if required_presteps:
|
|
4063
|
+
payload["required_presteps"] = required_presteps
|
|
4064
|
+
if subfields:
|
|
4065
|
+
payload["subfields"] = subfields
|
|
4066
|
+
if row_shape is not None:
|
|
4067
|
+
payload["row_shape"] = row_shape
|
|
4068
|
+
return payload
|
|
4069
|
+
|
|
4070
|
+
|
|
4071
|
+
def _score_candidate_text(requested_key: str, candidate_text: str, *, fuzzy: bool, label: str) -> tuple[float, str]:
|
|
4072
|
+
candidate_key = _normalize_field_lookup_key(candidate_text)
|
|
4073
|
+
if not candidate_key:
|
|
4074
|
+
return 0.0, "none"
|
|
4075
|
+
if candidate_key == requested_key:
|
|
4076
|
+
return (0.98 if label == "title" else 0.95), f"{label}_exact"
|
|
4077
|
+
if candidate_key.startswith(requested_key):
|
|
4078
|
+
return (0.9 if label == "title" else 0.87), f"{label}_prefix"
|
|
4079
|
+
if requested_key in candidate_key:
|
|
4080
|
+
return (0.84 if label == "title" else 0.8), f"{label}_contains"
|
|
4081
|
+
if fuzzy:
|
|
4082
|
+
similarity = _normalized_text_similarity(requested_key, candidate_key)
|
|
4083
|
+
if similarity >= 0.3:
|
|
4084
|
+
return similarity, f"{label}_fuzzy"
|
|
4085
|
+
return 0.0, "none"
|
|
4086
|
+
|
|
4087
|
+
|
|
4088
|
+
def _write_format_for_field(field: FormField) -> JSONObject:
|
|
4089
|
+
if field.que_type in VERIFY_UNSUPPORTED_WRITE_QUE_TYPES:
|
|
4090
|
+
return _write_support_payload(
|
|
4091
|
+
support_level="unsupported",
|
|
4092
|
+
kind="unsupported_direct_write",
|
|
4093
|
+
reason=_unsupported_write_reason(field.que_type),
|
|
4094
|
+
fix_hint=_unsupported_write_fix_hint(field.que_type),
|
|
4095
|
+
)
|
|
4096
|
+
if field.que_type in SUBTABLE_QUE_TYPES:
|
|
4097
|
+
return _write_support_payload(
|
|
4098
|
+
support_level="restricted",
|
|
4099
|
+
kind="subtable_rows",
|
|
4100
|
+
accepted_aliases=["rows", "tableValues", "rowId", "row_id", "__row_id__"],
|
|
4101
|
+
row_shape="list of row objects keyed by subfield title, or native tableValues rows",
|
|
4102
|
+
required_presteps=[
|
|
4103
|
+
"Use the current subfield titles from the form schema.",
|
|
4104
|
+
"When updating existing rows, include rowId/row_id only if the source record already exposes it.",
|
|
4105
|
+
],
|
|
4106
|
+
subfields=_subtable_columns_for_field(field),
|
|
4107
|
+
examples=[{"rows": [{"子字段1": "值", "子字段2": 1}]}],
|
|
4108
|
+
)
|
|
4109
|
+
if field.que_type in MEMBER_QUE_TYPES:
|
|
4110
|
+
return _write_support_payload(
|
|
4111
|
+
support_level="restricted",
|
|
4112
|
+
kind="member_list",
|
|
4113
|
+
examples=[{"id": 2, "value": "张三"}],
|
|
4114
|
+
accepted_aliases=["uid", "userId", "userName"],
|
|
4115
|
+
required_presteps=["Pass member ids/userIds or member objects; display names alone are not resolved automatically."],
|
|
4116
|
+
)
|
|
4117
|
+
if field.que_type in DEPARTMENT_QUE_TYPES:
|
|
4118
|
+
return _write_support_payload(
|
|
4119
|
+
support_level="restricted",
|
|
4120
|
+
kind="department_list",
|
|
4121
|
+
examples=[{"id": 11, "value": "示例部门"}],
|
|
4122
|
+
accepted_aliases=["deptId", "deptName"],
|
|
4123
|
+
required_presteps=["Pass a dept id/object, or an exact department name that resolves uniquely."],
|
|
4124
|
+
)
|
|
4125
|
+
if field.que_type in ATTACHMENT_QUE_TYPES:
|
|
4126
|
+
return _write_support_payload(
|
|
4127
|
+
support_level="restricted",
|
|
4128
|
+
kind="attachment_list",
|
|
4129
|
+
examples=[{"value": "https://files.example.com/a.pdf", "name": "a.pdf"}],
|
|
4130
|
+
required_presteps=["Upload the file first with file_upload_local, then write the returned attachment value/url."],
|
|
4131
|
+
)
|
|
4132
|
+
if field.que_type in RELATION_QUE_TYPES:
|
|
4133
|
+
return _write_support_payload(
|
|
4134
|
+
support_level="restricted",
|
|
4135
|
+
kind="relation_record",
|
|
4136
|
+
examples=[{"value": "5001"}],
|
|
4137
|
+
required_presteps=["Query the target app first and use the referenced apply_id."],
|
|
4138
|
+
)
|
|
4139
|
+
if field.que_type in SINGLE_SELECT_QUE_TYPES:
|
|
4140
|
+
return _write_support_payload(support_level="full", kind="single_select", options=field.options)
|
|
4141
|
+
if field.que_type in MULTI_SELECT_QUE_TYPES:
|
|
4142
|
+
return _write_support_payload(support_level="full", kind="multi_select", options=field.options)
|
|
4143
|
+
if field.que_type in BOOLEAN_QUE_TYPES:
|
|
4144
|
+
return _write_support_payload(support_level="full", kind="boolean_label", examples=["是", "否"])
|
|
4145
|
+
if field.que_type in DATE_QUE_TYPES:
|
|
4146
|
+
return _write_support_payload(support_level="full", kind="date_string", examples=["2026-03-13 10:00:00"])
|
|
4147
|
+
return _write_support_payload(support_level="full", kind="scalar_text")
|
|
4148
|
+
|
|
4149
|
+
|
|
4150
|
+
def _summarize_write_support(resolved_fields: list[JSONObject]) -> JSONObject:
|
|
4151
|
+
summary: JSONObject = {
|
|
4152
|
+
"full": [],
|
|
4153
|
+
"restricted": [],
|
|
4154
|
+
"unsupported": [],
|
|
4155
|
+
}
|
|
4156
|
+
for entry in resolved_fields:
|
|
4157
|
+
if not bool(entry.get("resolved")):
|
|
4158
|
+
continue
|
|
4159
|
+
write_format = entry.get("write_format")
|
|
4160
|
+
if not isinstance(write_format, dict):
|
|
4161
|
+
continue
|
|
4162
|
+
support_level = str(write_format.get("support_level", "full"))
|
|
4163
|
+
bucket = summary.get(support_level)
|
|
4164
|
+
if not isinstance(bucket, list):
|
|
4165
|
+
continue
|
|
4166
|
+
bucket.append(
|
|
4167
|
+
{
|
|
4168
|
+
"source": entry.get("source"),
|
|
4169
|
+
"requested": entry.get("requested"),
|
|
4170
|
+
"que_id": entry.get("que_id"),
|
|
4171
|
+
"que_title": entry.get("que_title"),
|
|
4172
|
+
"que_type": entry.get("que_type"),
|
|
4173
|
+
"kind": write_format.get("kind"),
|
|
4174
|
+
}
|
|
4175
|
+
)
|
|
4176
|
+
return summary
|
|
4177
|
+
|
|
4178
|
+
|
|
4179
|
+
def _subtable_row_id(row: list[JSONObject]) -> int | None:
|
|
4180
|
+
for cell in row:
|
|
4181
|
+
if not isinstance(cell, dict):
|
|
4182
|
+
continue
|
|
4183
|
+
row_id = _coerce_count(cell.get("rowId", cell.get("row_id")))
|
|
4184
|
+
if row_id is not None:
|
|
4185
|
+
return row_id
|
|
4186
|
+
return None
|
|
4187
|
+
|
|
4188
|
+
|
|
4189
|
+
def _index_subtable_rows_by_row_id(rows: list[JSONValue]) -> dict[int, list[JSONObject]]:
|
|
4190
|
+
indexed: dict[int, list[JSONObject]] = {}
|
|
4191
|
+
for row in rows:
|
|
4192
|
+
if not isinstance(row, list):
|
|
4193
|
+
continue
|
|
4194
|
+
normalized_row = [item for item in row if isinstance(item, dict)]
|
|
4195
|
+
row_id = _subtable_row_id(normalized_row)
|
|
4196
|
+
if row_id is not None:
|
|
4197
|
+
indexed[row_id] = normalized_row
|
|
4198
|
+
return indexed
|
|
4199
|
+
|
|
4200
|
+
|
|
4201
|
+
def _subtable_cell_ref_payload(
|
|
4202
|
+
table_field: FormField | None,
|
|
4203
|
+
subfield: FormField | None,
|
|
4204
|
+
*,
|
|
4205
|
+
que_id: int,
|
|
4206
|
+
row_ordinal: int,
|
|
4207
|
+
row_id: int | None,
|
|
4208
|
+
) -> JSONObject:
|
|
4209
|
+
payload: JSONObject = {
|
|
4210
|
+
"parent_que_id": table_field.que_id if table_field is not None else None,
|
|
4211
|
+
"parent_que_title": table_field.que_title if table_field is not None else None,
|
|
4212
|
+
"row_ordinal": row_ordinal,
|
|
4213
|
+
"que_id": que_id,
|
|
4214
|
+
"que_title": subfield.que_title if subfield is not None else None,
|
|
4215
|
+
"que_type": subfield.que_type if subfield is not None else None,
|
|
4216
|
+
}
|
|
4217
|
+
if row_id is not None:
|
|
4218
|
+
payload["row_id"] = row_id
|
|
4219
|
+
return payload
|
|
4220
|
+
|
|
4221
|
+
|
|
4222
|
+
def _unsupported_write_reason(que_type: int | None) -> str:
|
|
4223
|
+
if que_type == 14:
|
|
4224
|
+
return "time range fields require a backend-specific native payload shape that app-user tools do not build yet"
|
|
4225
|
+
if que_type == 34:
|
|
4226
|
+
return "image recognition fields are runtime AI fields and direct writes are not reliably persisted"
|
|
4227
|
+
if que_type == 35:
|
|
4228
|
+
return "image generation fields are runtime AI fields and direct writes are not reliably persisted"
|
|
4229
|
+
if que_type == 36:
|
|
4230
|
+
return "document parsing fields are runtime AI fields and direct writes are not reliably persisted"
|
|
4231
|
+
return "direct writes are not supported for this field type"
|
|
4232
|
+
|
|
4233
|
+
|
|
4234
|
+
def _unsupported_write_fix_hint(que_type: int | None) -> str:
|
|
4235
|
+
if que_type == 14:
|
|
4236
|
+
return "Avoid direct writes for time range fields until native payload support is added."
|
|
4237
|
+
if que_type in {34, 35, 36}:
|
|
4238
|
+
return "Avoid direct writes for AI/runtime fields, or submit through the product UI/workflow path that populates them."
|
|
4239
|
+
return "Avoid direct writes for this field type."
|
|
4240
|
+
|
|
4241
|
+
|
|
4242
|
+
def _parse_json_like(value: JSONValue) -> JSONValue:
|
|
4243
|
+
if isinstance(value, dict):
|
|
4244
|
+
return {key: _parse_json_like(item) for key, item in value.items()}
|
|
4245
|
+
if isinstance(value, list):
|
|
4246
|
+
return [_parse_json_like(item) for item in value]
|
|
4247
|
+
if isinstance(value, str):
|
|
4248
|
+
text = value.strip()
|
|
4249
|
+
if not text:
|
|
4250
|
+
return value
|
|
4251
|
+
if (text.startswith("{") and text.endswith("}")) or (text.startswith("[") and text.endswith("]")):
|
|
4252
|
+
try:
|
|
4253
|
+
return _parse_json_like(cast(JSONValue, json.loads(text)))
|
|
4254
|
+
except json.JSONDecodeError:
|
|
4255
|
+
return value
|
|
4256
|
+
if text.lower() in {"true", "false"}:
|
|
4257
|
+
return text.lower() == "true"
|
|
4258
|
+
if text.isdigit():
|
|
4259
|
+
return int(text)
|
|
4260
|
+
return value
|
|
4261
|
+
|
|
4262
|
+
|
|
4263
|
+
def _as_selector_list(value: JSONValue) -> list[str | int]:
|
|
4264
|
+
if isinstance(value, list):
|
|
4265
|
+
return [cast(str | int, item) for item in value if isinstance(item, (str, int))]
|
|
4266
|
+
return []
|
|
4267
|
+
|
|
4268
|
+
|
|
4269
|
+
def _as_object_list(value: JSONValue) -> list[JSONObject]:
|
|
4270
|
+
if isinstance(value, list):
|
|
4271
|
+
return [cast(JSONObject, item) for item in value if isinstance(item, dict)]
|
|
4272
|
+
return []
|
|
4273
|
+
|
|
4274
|
+
|
|
4275
|
+
def _stringify_json(value: JSONValue) -> str:
|
|
4276
|
+
if value is None:
|
|
4277
|
+
return ""
|
|
4278
|
+
if isinstance(value, str):
|
|
4279
|
+
return value
|
|
4280
|
+
if isinstance(value, bool):
|
|
4281
|
+
return "true" if value else "false"
|
|
4282
|
+
if isinstance(value, (int, float)):
|
|
4283
|
+
return str(value)
|
|
4284
|
+
return json.dumps(value, ensure_ascii=False)
|
|
4285
|
+
|
|
4286
|
+
|
|
4287
|
+
def _normalized_text_similarity(left: str, right: str) -> float:
|
|
4288
|
+
if not left or not right:
|
|
4289
|
+
return 0.0
|
|
4290
|
+
if left == right:
|
|
4291
|
+
return 1.0
|
|
4292
|
+
left_bigrams = _bigrams(left)
|
|
4293
|
+
right_bigrams = _bigrams(right)
|
|
4294
|
+
if not left_bigrams or not right_bigrams:
|
|
4295
|
+
return 0.0
|
|
4296
|
+
intersection = len(left_bigrams & right_bigrams)
|
|
4297
|
+
union = len(left_bigrams | right_bigrams)
|
|
4298
|
+
return (intersection / union) if union else 0.0
|
|
4299
|
+
|
|
4300
|
+
|
|
4301
|
+
def _bigrams(text: str) -> set[str]:
|
|
4302
|
+
normalized = text.replace(" ", "")
|
|
4303
|
+
if len(normalized) <= 1:
|
|
4304
|
+
return {normalized}
|
|
4305
|
+
return {normalized[index : index + 2] for index in range(len(normalized) - 1)}
|