@midscene/core 1.2.2-beta-20260116114131.0 → 1.2.2-beta-20260119114334.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/es/agent/agent.mjs +4 -4
- package/dist/es/agent/agent.mjs.map +1 -1
- package/dist/es/agent/tasks.mjs +3 -4
- package/dist/es/agent/tasks.mjs.map +1 -1
- package/dist/es/agent/utils.mjs +1 -1
- package/dist/es/ai-model/auto-glm/index.mjs +2 -2
- package/dist/es/ai-model/auto-glm/planning.mjs +1 -1
- package/dist/es/ai-model/auto-glm/planning.mjs.map +1 -1
- package/dist/es/ai-model/auto-glm/prompt.mjs +8 -8
- package/dist/es/ai-model/auto-glm/prompt.mjs.map +1 -1
- package/dist/es/ai-model/auto-glm/util.mjs +6 -3
- package/dist/es/ai-model/auto-glm/util.mjs.map +1 -1
- package/dist/es/ai-model/index.mjs +2 -2
- package/dist/es/ai-model/inspect.mjs +12 -12
- package/dist/es/ai-model/inspect.mjs.map +1 -1
- package/dist/es/ai-model/llm-planning.mjs +51 -7
- package/dist/es/ai-model/llm-planning.mjs.map +1 -1
- package/dist/es/ai-model/prompt/common.mjs +2 -2
- package/dist/es/ai-model/prompt/common.mjs.map +1 -1
- package/dist/es/ai-model/prompt/extraction.mjs +3 -1
- package/dist/es/ai-model/prompt/extraction.mjs.map +1 -1
- package/dist/es/ai-model/prompt/llm-locator.mjs +5 -3
- package/dist/es/ai-model/prompt/llm-locator.mjs.map +1 -1
- package/dist/es/ai-model/prompt/llm-planning.mjs +48 -52
- package/dist/es/ai-model/prompt/llm-planning.mjs.map +1 -1
- package/dist/es/ai-model/prompt/llm-section-locator.mjs +5 -3
- package/dist/es/ai-model/prompt/llm-section-locator.mjs.map +1 -1
- package/dist/es/ai-model/service-caller/index.mjs +12 -13
- package/dist/es/ai-model/service-caller/index.mjs.map +1 -1
- package/dist/es/ai-model/ui-tars-planning.mjs +2 -24
- package/dist/es/ai-model/ui-tars-planning.mjs.map +1 -1
- package/dist/es/common.mjs +10 -9
- package/dist/es/common.mjs.map +1 -1
- package/dist/es/service/index.mjs +6 -6
- package/dist/es/service/index.mjs.map +1 -1
- package/dist/es/utils.mjs +2 -2
- package/dist/lib/agent/agent.js +3 -3
- package/dist/lib/agent/agent.js.map +1 -1
- package/dist/lib/agent/tasks.js +2 -3
- package/dist/lib/agent/tasks.js.map +1 -1
- package/dist/lib/agent/utils.js +1 -1
- package/dist/lib/ai-model/auto-glm/index.js +3 -0
- package/dist/lib/ai-model/auto-glm/planning.js +1 -1
- package/dist/lib/ai-model/auto-glm/planning.js.map +1 -1
- package/dist/lib/ai-model/auto-glm/prompt.js +8 -8
- package/dist/lib/ai-model/auto-glm/prompt.js.map +1 -1
- package/dist/lib/ai-model/auto-glm/util.js +10 -4
- package/dist/lib/ai-model/auto-glm/util.js.map +1 -1
- package/dist/lib/ai-model/index.js +0 -3
- package/dist/lib/ai-model/inspect.js +12 -12
- package/dist/lib/ai-model/inspect.js.map +1 -1
- package/dist/lib/ai-model/llm-planning.js +52 -5
- package/dist/lib/ai-model/llm-planning.js.map +1 -1
- package/dist/lib/ai-model/prompt/common.js +2 -2
- package/dist/lib/ai-model/prompt/common.js.map +1 -1
- package/dist/lib/ai-model/prompt/extraction.js +5 -3
- package/dist/lib/ai-model/prompt/extraction.js.map +1 -1
- package/dist/lib/ai-model/prompt/llm-locator.js +5 -3
- package/dist/lib/ai-model/prompt/llm-locator.js.map +1 -1
- package/dist/lib/ai-model/prompt/llm-planning.js +48 -52
- package/dist/lib/ai-model/prompt/llm-planning.js.map +1 -1
- package/dist/lib/ai-model/prompt/llm-section-locator.js +5 -3
- package/dist/lib/ai-model/prompt/llm-section-locator.js.map +1 -1
- package/dist/lib/ai-model/service-caller/index.js +11 -12
- package/dist/lib/ai-model/service-caller/index.js.map +1 -1
- package/dist/lib/ai-model/ui-tars-planning.js +1 -26
- package/dist/lib/ai-model/ui-tars-planning.js.map +1 -1
- package/dist/lib/common.js +10 -9
- package/dist/lib/common.js.map +1 -1
- package/dist/lib/service/index.js +6 -6
- package/dist/lib/service/index.js.map +1 -1
- package/dist/lib/utils.js +2 -2
- package/dist/types/ai-model/auto-glm/index.d.ts +1 -1
- package/dist/types/ai-model/auto-glm/prompt.d.ts +3 -3
- package/dist/types/ai-model/auto-glm/util.d.ts +11 -5
- package/dist/types/ai-model/index.d.ts +1 -1
- package/dist/types/ai-model/llm-planning.d.ts +5 -1
- package/dist/types/ai-model/prompt/common.d.ts +2 -2
- package/dist/types/ai-model/prompt/llm-locator.d.ts +2 -2
- package/dist/types/ai-model/prompt/llm-planning.d.ts +3 -3
- package/dist/types/ai-model/prompt/llm-section-locator.d.ts +2 -2
- package/dist/types/ai-model/service-caller/index.d.ts +2 -2
- package/dist/types/ai-model/ui-tars-planning.d.ts +2 -3
- package/dist/types/common.d.ts +5 -5
- package/package.json +2 -2
- package/dist/es/ai-model/prompt/assertion.mjs +0 -31
- package/dist/es/ai-model/prompt/assertion.mjs.map +0 -1
- package/dist/lib/ai-model/prompt/assertion.js +0 -65
- package/dist/lib/ai-model/prompt/assertion.js.map +0 -1
- package/dist/types/ai-model/prompt/assertion.d.ts +0 -2
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-model/llm-planning.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../src/ai-model/llm-planning.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type {\n DeepThinkOption,\n DeviceAction,\n InterfaceType,\n PlanningAIResponse,\n RawResponsePlanningAIResponse,\n UIContext,\n} from '@/types';\nimport type { IModelConfig } from '@midscene/shared/env';\nimport { paddingToMatchBlockByBase64 } from '@midscene/shared/img';\nimport { getDebug } from '@midscene/shared/logger';\nimport { assert } from '@midscene/shared/utils';\nimport type { ChatCompletionMessageParam } from 'openai/resources/index';\nimport {\n buildYamlFlowFromPlans,\n fillBboxParam,\n finalizeActionName,\n findAllMidsceneLocatorField,\n} from '../common';\nimport type { ConversationHistory } from './conversation-history';\nimport { systemPromptToTaskPlanning } from './prompt/llm-planning';\nimport { callAIWithObjectResponse } from './service-caller/index';\n\nconst debug = getDebug('planning');\n\nexport async function plan(\n userInstruction: string,\n opts: {\n context: UIContext;\n interfaceType: InterfaceType;\n actionSpace: DeviceAction<any>[];\n actionContext?: string;\n modelConfig: IModelConfig;\n conversationHistory: ConversationHistory;\n includeBbox: boolean;\n imagesIncludeCount?: number;\n deepThink?: DeepThinkOption;\n },\n): Promise<PlanningAIResponse> {\n const { context, modelConfig, conversationHistory } = opts;\n const { size } = context;\n const screenshotBase64 = context.screenshot.base64;\n\n const { vlMode } = modelConfig;\n\n const systemPrompt = await systemPromptToTaskPlanning({\n actionSpace: opts.actionSpace,\n vlMode,\n includeBbox: opts.includeBbox,\n includeThought: opts.deepThink !== true,\n });\n\n let imagePayload = screenshotBase64;\n let imageWidth = size.width;\n let imageHeight = size.height;\n const rightLimit = imageWidth;\n const bottomLimit = imageHeight;\n\n // Process image based on VL mode requirements\n if (vlMode === 'qwen2.5-vl') {\n const paddedResult = await paddingToMatchBlockByBase64(imagePayload);\n imageWidth = paddedResult.width;\n imageHeight = paddedResult.height;\n imagePayload = paddedResult.imageBase64;\n }\n\n const actionContext = opts.actionContext\n ? `<high_priority_knowledge>${opts.actionContext}</high_priority_knowledge>\\n`\n : '';\n\n const instruction: ChatCompletionMessageParam[] = [\n {\n role: 'user',\n content: [\n {\n type: 'text',\n text: `${actionContext}<user_instruction>${userInstruction}</user_instruction>`,\n },\n ],\n },\n ];\n\n let latestFeedbackMessage: ChatCompletionMessageParam;\n\n if (conversationHistory.pendingFeedbackMessage) {\n latestFeedbackMessage = {\n role: 'user',\n content: [\n {\n type: 'text',\n text: `${conversationHistory.pendingFeedbackMessage}. The last screenshot is attached. Please going on according to the instruction.`,\n },\n {\n type: 'image_url',\n image_url: {\n url: imagePayload,\n detail: 'high',\n },\n },\n ],\n };\n\n conversationHistory.resetPendingFeedbackMessageIfExists();\n } else {\n latestFeedbackMessage = {\n role: 'user',\n content: [\n {\n type: 'text',\n text: 'this is the latest screenshot',\n },\n {\n type: 'image_url',\n image_url: {\n url: imagePayload,\n detail: 'high',\n },\n },\n ],\n };\n }\n conversationHistory.append(latestFeedbackMessage);\n const historyLog = conversationHistory.snapshot(opts.imagesIncludeCount);\n\n const msgs: ChatCompletionMessageParam[] = [\n { role: 'system', content: systemPrompt },\n ...instruction,\n ...historyLog,\n ];\n\n const {\n content: planFromAI,\n contentString: rawResponse,\n usage,\n reasoning_content,\n } = await callAIWithObjectResponse<RawResponsePlanningAIResponse>(\n msgs,\n modelConfig,\n {\n deepThink: opts.deepThink === 'unset' ? undefined : opts.deepThink,\n },\n );\n\n const actions = planFromAI.action ? [planFromAI.action] : [];\n let shouldContinuePlanning = true;\n if (actions[0]?.type === finalizeActionName) {\n debug('finalize action planned, stop planning');\n shouldContinuePlanning = false;\n }\n const returnValue: PlanningAIResponse = {\n ...planFromAI,\n actions,\n rawResponse,\n usage,\n reasoning_content,\n yamlFlow: buildYamlFlowFromPlans(actions, opts.actionSpace),\n shouldContinuePlanning,\n };\n\n assert(planFromAI, \"can't get plans from AI\");\n\n actions.forEach((action) => {\n const type = action.type;\n const actionInActionSpace = opts.actionSpace.find(\n (action) => action.name === type,\n );\n\n debug('actionInActionSpace matched', actionInActionSpace);\n const locateFields = actionInActionSpace\n ? findAllMidsceneLocatorField(actionInActionSpace.paramSchema)\n : [];\n\n debug('locateFields', locateFields);\n\n locateFields.forEach((field) => {\n const locateResult = action.param[field];\n if (locateResult && vlMode !== undefined) {\n // Always use VL mode to fill bbox parameters\n action.param[field] = fillBboxParam(\n locateResult,\n imageWidth,\n imageHeight,\n rightLimit,\n bottomLimit,\n vlMode,\n );\n }\n });\n });\n\n conversationHistory.append({\n role: 'assistant',\n content: [\n {\n type: 'text',\n text: rawResponse,\n },\n ],\n });\n\n return returnValue;\n}\n"],"names":["__webpack_require__","definition","key","Object","obj","prop","Symbol","debug","getDebug","plan","userInstruction","opts","context","modelConfig","conversationHistory","size","screenshotBase64","vlMode","systemPrompt","systemPromptToTaskPlanning","imagePayload","imageWidth","imageHeight","rightLimit","bottomLimit","paddedResult","paddingToMatchBlockByBase64","actionContext","instruction","latestFeedbackMessage","historyLog","msgs","planFromAI","rawResponse","usage","reasoning_content","callAIWithObjectResponse","undefined","actions","shouldContinuePlanning","finalizeActionName","returnValue","buildYamlFlowFromPlans","assert","action","type","actionInActionSpace","locateFields","findAllMidsceneLocatorField","field","locateResult","fillBboxParam"],"mappings":";;;IAAAA,oBAAoB,CAAC,GAAG,CAAC,UAASC;QACjC,IAAI,IAAIC,OAAOD,WACR,IAAGD,oBAAoB,CAAC,CAACC,YAAYC,QAAQ,CAACF,oBAAoB,CAAC,CAAC,UAASE,MACzEC,OAAO,cAAc,CAAC,UAASD,KAAK;YAAE,YAAY;YAAM,KAAKD,UAAU,CAACC,IAAI;QAAC;IAGzF;;;ICNAF,oBAAoB,CAAC,GAAG,CAACI,KAAKC,OAAUF,OAAO,SAAS,CAAC,cAAc,CAAC,IAAI,CAACC,KAAKC;;;ICClFL,oBAAoB,CAAC,GAAG,CAAC;QACxB,IAAG,AAAkB,eAAlB,OAAOM,UAA0BA,OAAO,WAAW,EACrDH,OAAO,cAAc,CAAC,UAASG,OAAO,WAAW,EAAE;YAAE,OAAO;QAAS;QAEtEH,OAAO,cAAc,CAAC,UAAS,cAAc;YAAE,OAAO;QAAK;IAC5D;;;;;;;;;;;;;ACiBA,MAAMI,QAAQC,AAAAA,IAAAA,uBAAAA,QAAAA,AAAAA,EAAS;AAEhB,eAAeC,KACpBC,eAAuB,EACvBC,IAUC;IAED,MAAM,EAAEC,OAAO,EAAEC,WAAW,EAAEC,mBAAmB,EAAE,GAAGH;IACtD,MAAM,EAAEI,IAAI,EAAE,GAAGH;IACjB,MAAMI,mBAAmBJ,QAAQ,UAAU,CAAC,MAAM;IAElD,MAAM,EAAEK,MAAM,EAAE,GAAGJ;IAEnB,MAAMK,eAAe,MAAMC,AAAAA,IAAAA,gCAAAA,0BAAAA,AAAAA,EAA2B;QACpD,aAAaR,KAAK,WAAW;QAC7BM;QACA,aAAaN,KAAK,WAAW;QAC7B,gBAAgBA,AAAmB,SAAnBA,KAAK,SAAS;IAChC;IAEA,IAAIS,eAAeJ;IACnB,IAAIK,aAAaN,KAAK,KAAK;IAC3B,IAAIO,cAAcP,KAAK,MAAM;IAC7B,MAAMQ,aAAaF;IACnB,MAAMG,cAAcF;IAGpB,IAAIL,AAAW,iBAAXA,QAAyB;QAC3B,MAAMQ,eAAe,MAAMC,AAAAA,IAAAA,oBAAAA,2BAAAA,AAAAA,EAA4BN;QACvDC,aAAaI,aAAa,KAAK;QAC/BH,cAAcG,aAAa,MAAM;QACjCL,eAAeK,aAAa,WAAW;IACzC;IAEA,MAAME,gBAAgBhB,KAAK,aAAa,GACpC,CAAC,yBAAyB,EAAEA,KAAK,aAAa,CAAC,4BAA4B,CAAC,GAC5E;IAEJ,MAAMiB,cAA4C;QAChD;YACE,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,MAAM,GAAGD,cAAc,kBAAkB,EAAEjB,gBAAgB,mBAAmB,CAAC;gBACjF;aACD;QACH;KACD;IAED,IAAImB;IAEJ,IAAIf,oBAAoB,sBAAsB,EAAE;QAC9Ce,wBAAwB;YACtB,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,MAAM,GAAGf,oBAAoB,sBAAsB,CAAC,gFAAgF,CAAC;gBACvI;gBACA;oBACE,MAAM;oBACN,WAAW;wBACT,KAAKM;wBACL,QAAQ;oBACV;gBACF;aACD;QACH;QAEAN,oBAAoB,mCAAmC;IACzD,OACEe,wBAAwB;QACtB,MAAM;QACN,SAAS;YACP;gBACE,MAAM;gBACN,MAAM;YACR;YACA;gBACE,MAAM;gBACN,WAAW;oBACT,KAAKT;oBACL,QAAQ;gBACV;YACF;SACD;IACH;IAEFN,oBAAoB,MAAM,CAACe;IAC3B,MAAMC,aAAahB,oBAAoB,QAAQ,CAACH,KAAK,kBAAkB;IAEvE,MAAMoB,OAAqC;QACzC;YAAE,MAAM;YAAU,SAASb;QAAa;WACrCU;WACAE;KACJ;IAED,MAAM,EACJ,SAASE,UAAU,EACnB,eAAeC,WAAW,EAC1BC,KAAK,EACLC,iBAAiB,EAClB,GAAG,MAAMC,AAAAA,IAAAA,yBAAAA,wBAAAA,AAAAA,EACRL,MACAlB,aACA;QACE,WAAWF,AAAmB,YAAnBA,KAAK,SAAS,GAAe0B,SAAY1B,KAAK,SAAS;IACpE;IAGF,MAAM2B,UAAUN,WAAW,MAAM,GAAG;QAACA,WAAW,MAAM;KAAC,GAAG,EAAE;IAC5D,IAAIO,yBAAyB;IAC7B,IAAID,OAAO,CAAC,EAAE,EAAE,SAASE,mCAAAA,kBAAkBA,EAAE;QAC3CjC,MAAM;QACNgC,yBAAyB;IAC3B;IACA,MAAME,cAAkC;QACtC,GAAGT,UAAU;QACbM;QACAL;QACAC;QACAC;QACA,UAAUO,AAAAA,IAAAA,mCAAAA,sBAAAA,AAAAA,EAAuBJ,SAAS3B,KAAK,WAAW;QAC1D4B;IACF;IAEAI,IAAAA,sBAAAA,MAAAA,AAAAA,EAAOX,YAAY;IAEnBM,QAAQ,OAAO,CAAC,CAACM;QACf,MAAMC,OAAOD,OAAO,IAAI;QACxB,MAAME,sBAAsBnC,KAAK,WAAW,CAAC,IAAI,CAC/C,CAACiC,SAAWA,OAAO,IAAI,KAAKC;QAG9BtC,MAAM,+BAA+BuC;QACrC,MAAMC,eAAeD,sBACjBE,AAAAA,IAAAA,mCAAAA,2BAAAA,AAAAA,EAA4BF,oBAAoB,WAAW,IAC3D,EAAE;QAENvC,MAAM,gBAAgBwC;QAEtBA,aAAa,OAAO,CAAC,CAACE;YACpB,MAAMC,eAAeN,OAAO,KAAK,CAACK,MAAM;YACxC,IAAIC,gBAAgBjC,AAAWoB,WAAXpB,QAElB2B,OAAO,KAAK,CAACK,MAAM,GAAGE,AAAAA,IAAAA,mCAAAA,aAAAA,AAAAA,EACpBD,cACA7B,YACAC,aACAC,YACAC,aACAP;QAGN;IACF;IAEAH,oBAAoB,MAAM,CAAC;QACzB,MAAM;QACN,SAAS;YACP;gBACE,MAAM;gBACN,MAAMmB;YACR;SACD;IACH;IAEA,OAAOQ;AACT"}
|
|
1
|
+
{"version":3,"file":"ai-model/llm-planning.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../src/ai-model/llm-planning.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type {\n DeepThinkOption,\n DeviceAction,\n InterfaceType,\n PlanningAIResponse,\n RawResponsePlanningAIResponse,\n UIContext,\n} from '@/types';\nimport type { IModelConfig } from '@midscene/shared/env';\nimport { paddingToMatchBlockByBase64 } from '@midscene/shared/img';\nimport { getDebug } from '@midscene/shared/logger';\nimport { assert } from '@midscene/shared/utils';\nimport type { ChatCompletionMessageParam } from 'openai/resources/index';\nimport {\n buildYamlFlowFromPlans,\n fillBboxParam,\n finalizeActionName,\n findAllMidsceneLocatorField,\n} from '../common';\nimport type { ConversationHistory } from './conversation-history';\nimport { systemPromptToTaskPlanning } from './prompt/llm-planning';\nimport { callAI } from './service-caller/index';\nimport { safeParseJson } from './service-caller/index';\n\nconst debug = getDebug('planning');\n\n/**\n * Parse XML response from LLM and convert to RawResponsePlanningAIResponse\n */\nexport function parseXMLPlanningResponse(xmlString: string): RawResponsePlanningAIResponse {\n // Extract content from XML tags\n const extractTag = (tagName: string): string | undefined => {\n const regex = new RegExp(`<${tagName}>([\\\\s\\\\S]*?)</${tagName}>`, 'i');\n const match = xmlString.match(regex);\n return match ? match[1].trim() : undefined;\n };\n\n const thought = extractTag('thought');\n const note = extractTag('note');\n const log = extractTag('log');\n const error = extractTag('error');\n const actionType = extractTag('action-type');\n const actionParamStr = extractTag('action-param-json');\n\n // Validate required fields\n if (!log) {\n throw new Error('Missing required field: log');\n }\n\n // Parse action\n let action: any = null;\n if (actionType && actionType.toLowerCase() !== 'null') {\n const type = actionType.trim();\n let param: any = undefined;\n\n if (actionParamStr) {\n try {\n // Parse the JSON string in action-param-json\n param = safeParseJson(actionParamStr, undefined);\n } catch (e) {\n throw new Error(`Failed to parse action-param-json: ${e}`);\n }\n }\n\n action = {\n type,\n ...(param !== undefined ? { param } : {}),\n };\n }\n\n return {\n ...(thought ? { thought } : {}),\n ...(note ? { note } : {}),\n log,\n ...(error ? { error } : {}),\n action,\n };\n}\n\nexport async function plan(\n userInstruction: string,\n opts: {\n context: UIContext;\n interfaceType: InterfaceType;\n actionSpace: DeviceAction<any>[];\n actionContext?: string;\n modelConfig: IModelConfig;\n conversationHistory: ConversationHistory;\n includeBbox: boolean;\n imagesIncludeCount?: number;\n deepThink?: DeepThinkOption;\n },\n): Promise<PlanningAIResponse> {\n const { context, modelConfig, conversationHistory } = opts;\n const { size } = context;\n const screenshotBase64 = context.screenshot.base64;\n\n const { modelFamily } = modelConfig;\n\n const systemPrompt = await systemPromptToTaskPlanning({\n actionSpace: opts.actionSpace,\n modelFamily,\n includeBbox: opts.includeBbox,\n includeThought: opts.deepThink !== true,\n });\n\n let imagePayload = screenshotBase64;\n let imageWidth = size.width;\n let imageHeight = size.height;\n const rightLimit = imageWidth;\n const bottomLimit = imageHeight;\n\n // Process image based on VL mode requirements\n if (modelFamily === 'qwen2.5-vl') {\n const paddedResult = await paddingToMatchBlockByBase64(imagePayload);\n imageWidth = paddedResult.width;\n imageHeight = paddedResult.height;\n imagePayload = paddedResult.imageBase64;\n }\n\n const actionContext = opts.actionContext\n ? `<high_priority_knowledge>${opts.actionContext}</high_priority_knowledge>\\n`\n : '';\n\n const instruction: ChatCompletionMessageParam[] = [\n {\n role: 'user',\n content: [\n {\n type: 'text',\n text: `${actionContext}<user_instruction>${userInstruction}</user_instruction>`,\n },\n ],\n },\n ];\n\n let latestFeedbackMessage: ChatCompletionMessageParam;\n\n if (conversationHistory.pendingFeedbackMessage) {\n latestFeedbackMessage = {\n role: 'user',\n content: [\n {\n type: 'text',\n text: `${conversationHistory.pendingFeedbackMessage}. The last screenshot is attached. Please going on according to the instruction.`,\n },\n {\n type: 'image_url',\n image_url: {\n url: imagePayload,\n detail: 'high',\n },\n },\n ],\n };\n\n conversationHistory.resetPendingFeedbackMessageIfExists();\n } else {\n latestFeedbackMessage = {\n role: 'user',\n content: [\n {\n type: 'text',\n text: 'this is the latest screenshot',\n },\n {\n type: 'image_url',\n image_url: {\n url: imagePayload,\n detail: 'high',\n },\n },\n ],\n };\n }\n conversationHistory.append(latestFeedbackMessage);\n const historyLog = conversationHistory.snapshot(opts.imagesIncludeCount);\n\n const msgs: ChatCompletionMessageParam[] = [\n { role: 'system', content: systemPrompt },\n ...instruction,\n ...historyLog,\n ];\n\n const {\n content: rawResponse,\n usage,\n reasoning_content,\n } = await callAI(msgs, modelConfig, {\n deepThink: opts.deepThink === 'unset' ? undefined : opts.deepThink,\n });\n\n // Parse XML response to JSON object\n const planFromAI = parseXMLPlanningResponse(rawResponse);\n\n const actions = planFromAI.action ? [planFromAI.action] : [];\n let shouldContinuePlanning = true;\n if (actions[0]?.type === finalizeActionName) {\n debug('finalize action planned, stop planning');\n shouldContinuePlanning = false;\n }\n const returnValue: PlanningAIResponse = {\n ...planFromAI,\n actions,\n rawResponse,\n usage,\n reasoning_content,\n yamlFlow: buildYamlFlowFromPlans(actions, opts.actionSpace),\n shouldContinuePlanning,\n };\n\n assert(planFromAI, \"can't get plans from AI\");\n\n actions.forEach((action) => {\n const type = action.type;\n const actionInActionSpace = opts.actionSpace.find(\n (action) => action.name === type,\n );\n\n debug('actionInActionSpace matched', actionInActionSpace);\n const locateFields = actionInActionSpace\n ? findAllMidsceneLocatorField(actionInActionSpace.paramSchema)\n : [];\n\n debug('locateFields', locateFields);\n\n locateFields.forEach((field) => {\n const locateResult = action.param[field];\n if (locateResult && modelFamily !== undefined) {\n // Always use model family to fill bbox parameters\n action.param[field] = fillBboxParam(\n locateResult,\n imageWidth,\n imageHeight,\n rightLimit,\n bottomLimit,\n modelFamily,\n );\n }\n });\n });\n\n conversationHistory.append({\n role: 'assistant',\n content: [\n {\n type: 'text',\n text: rawResponse,\n },\n ],\n });\n\n return returnValue;\n}\n"],"names":["__webpack_require__","definition","key","Object","obj","prop","Symbol","debug","getDebug","parseXMLPlanningResponse","xmlString","extractTag","tagName","regex","RegExp","match","undefined","thought","note","log","error","actionType","actionParamStr","Error","action","type","param","safeParseJson","e","plan","userInstruction","opts","context","modelConfig","conversationHistory","size","screenshotBase64","modelFamily","systemPrompt","systemPromptToTaskPlanning","imagePayload","imageWidth","imageHeight","rightLimit","bottomLimit","paddedResult","paddingToMatchBlockByBase64","actionContext","instruction","latestFeedbackMessage","historyLog","msgs","rawResponse","usage","reasoning_content","callAI","planFromAI","actions","shouldContinuePlanning","finalizeActionName","returnValue","buildYamlFlowFromPlans","assert","actionInActionSpace","locateFields","findAllMidsceneLocatorField","field","locateResult","fillBboxParam"],"mappings":";;;IAAAA,oBAAoB,CAAC,GAAG,CAAC,UAASC;QACjC,IAAI,IAAIC,OAAOD,WACR,IAAGD,oBAAoB,CAAC,CAACC,YAAYC,QAAQ,CAACF,oBAAoB,CAAC,CAAC,UAASE,MACzEC,OAAO,cAAc,CAAC,UAASD,KAAK;YAAE,YAAY;YAAM,KAAKD,UAAU,CAACC,IAAI;QAAC;IAGzF;;;ICNAF,oBAAoB,CAAC,GAAG,CAACI,KAAKC,OAAUF,OAAO,SAAS,CAAC,cAAc,CAAC,IAAI,CAACC,KAAKC;;;ICClFL,oBAAoB,CAAC,GAAG,CAAC;QACxB,IAAG,AAAkB,eAAlB,OAAOM,UAA0BA,OAAO,WAAW,EACrDH,OAAO,cAAc,CAAC,UAASG,OAAO,WAAW,EAAE;YAAE,OAAO;QAAS;QAEtEH,OAAO,cAAc,CAAC,UAAS,cAAc;YAAE,OAAO;QAAK;IAC5D;;;;;;;;;;;;;;ACkBA,MAAMI,QAAQC,AAAAA,IAAAA,uBAAAA,QAAAA,AAAAA,EAAS;AAKhB,SAASC,yBAAyBC,SAAiB;IAExD,MAAMC,aAAa,CAACC;QAClB,MAAMC,QAAQ,IAAIC,OAAO,CAAC,CAAC,EAAEF,QAAQ,eAAe,EAAEA,QAAQ,CAAC,CAAC,EAAE;QAClE,MAAMG,QAAQL,UAAU,KAAK,CAACG;QAC9B,OAAOE,QAAQA,KAAK,CAAC,EAAE,CAAC,IAAI,KAAKC;IACnC;IAEA,MAAMC,UAAUN,WAAW;IAC3B,MAAMO,OAAOP,WAAW;IACxB,MAAMQ,MAAMR,WAAW;IACvB,MAAMS,QAAQT,WAAW;IACzB,MAAMU,aAAaV,WAAW;IAC9B,MAAMW,iBAAiBX,WAAW;IAGlC,IAAI,CAACQ,KACH,MAAM,IAAII,MAAM;IAIlB,IAAIC,SAAc;IAClB,IAAIH,cAAcA,AAA6B,WAA7BA,WAAW,WAAW,IAAe;QACrD,MAAMI,OAAOJ,WAAW,IAAI;QAC5B,IAAIK;QAEJ,IAAIJ,gBACF,IAAI;YAEFI,QAAQC,AAAAA,IAAAA,yBAAAA,aAAAA,AAAAA,EAAcL,gBAAgBN;QACxC,EAAE,OAAOY,GAAG;YACV,MAAM,IAAIL,MAAM,CAAC,mCAAmC,EAAEK,GAAG;QAC3D;QAGFJ,SAAS;YACPC;YACA,GAAIC,AAAUV,WAAVU,QAAsB;gBAAEA;YAAM,IAAI,CAAC,CAAC;QAC1C;IACF;IAEA,OAAO;QACL,GAAIT,UAAU;YAAEA;QAAQ,IAAI,CAAC,CAAC;QAC9B,GAAIC,OAAO;YAAEA;QAAK,IAAI,CAAC,CAAC;QACxBC;QACA,GAAIC,QAAQ;YAAEA;QAAM,IAAI,CAAC,CAAC;QAC1BI;IACF;AACF;AAEO,eAAeK,KACpBC,eAAuB,EACvBC,IAUC;IAED,MAAM,EAAEC,OAAO,EAAEC,WAAW,EAAEC,mBAAmB,EAAE,GAAGH;IACtD,MAAM,EAAEI,IAAI,EAAE,GAAGH;IACjB,MAAMI,mBAAmBJ,QAAQ,UAAU,CAAC,MAAM;IAElD,MAAM,EAAEK,WAAW,EAAE,GAAGJ;IAExB,MAAMK,eAAe,MAAMC,AAAAA,IAAAA,gCAAAA,0BAAAA,AAAAA,EAA2B;QACpD,aAAaR,KAAK,WAAW;QAC7BM;QACA,aAAaN,KAAK,WAAW;QAC7B,gBAAgBA,AAAmB,SAAnBA,KAAK,SAAS;IAChC;IAEA,IAAIS,eAAeJ;IACnB,IAAIK,aAAaN,KAAK,KAAK;IAC3B,IAAIO,cAAcP,KAAK,MAAM;IAC7B,MAAMQ,aAAaF;IACnB,MAAMG,cAAcF;IAGpB,IAAIL,AAAgB,iBAAhBA,aAA8B;QAChC,MAAMQ,eAAe,MAAMC,AAAAA,IAAAA,oBAAAA,2BAAAA,AAAAA,EAA4BN;QACvDC,aAAaI,aAAa,KAAK;QAC/BH,cAAcG,aAAa,MAAM;QACjCL,eAAeK,aAAa,WAAW;IACzC;IAEA,MAAME,gBAAgBhB,KAAK,aAAa,GACpC,CAAC,yBAAyB,EAAEA,KAAK,aAAa,CAAC,4BAA4B,CAAC,GAC5E;IAEJ,MAAMiB,cAA4C;QAChD;YACE,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,MAAM,GAAGD,cAAc,kBAAkB,EAAEjB,gBAAgB,mBAAmB,CAAC;gBACjF;aACD;QACH;KACD;IAED,IAAImB;IAEJ,IAAIf,oBAAoB,sBAAsB,EAAE;QAC9Ce,wBAAwB;YACtB,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,MAAM,GAAGf,oBAAoB,sBAAsB,CAAC,gFAAgF,CAAC;gBACvI;gBACA;oBACE,MAAM;oBACN,WAAW;wBACT,KAAKM;wBACL,QAAQ;oBACV;gBACF;aACD;QACH;QAEAN,oBAAoB,mCAAmC;IACzD,OACEe,wBAAwB;QACtB,MAAM;QACN,SAAS;YACP;gBACE,MAAM;gBACN,MAAM;YACR;YACA;gBACE,MAAM;gBACN,WAAW;oBACT,KAAKT;oBACL,QAAQ;gBACV;YACF;SACD;IACH;IAEFN,oBAAoB,MAAM,CAACe;IAC3B,MAAMC,aAAahB,oBAAoB,QAAQ,CAACH,KAAK,kBAAkB;IAEvE,MAAMoB,OAAqC;QACzC;YAAE,MAAM;YAAU,SAASb;QAAa;WACrCU;WACAE;KACJ;IAED,MAAM,EACJ,SAASE,WAAW,EACpBC,KAAK,EACLC,iBAAiB,EAClB,GAAG,MAAMC,AAAAA,IAAAA,yBAAAA,MAAAA,AAAAA,EAAOJ,MAAMlB,aAAa;QAClC,WAAWF,AAAmB,YAAnBA,KAAK,SAAS,GAAef,SAAYe,KAAK,SAAS;IACpE;IAGA,MAAMyB,aAAa/C,yBAAyB2C;IAE5C,MAAMK,UAAUD,WAAW,MAAM,GAAG;QAACA,WAAW,MAAM;KAAC,GAAG,EAAE;IAC5D,IAAIE,yBAAyB;IAC7B,IAAID,OAAO,CAAC,EAAE,EAAE,SAASE,mCAAAA,kBAAkBA,EAAE;QAC3CpD,MAAM;QACNmD,yBAAyB;IAC3B;IACA,MAAME,cAAkC;QACtC,GAAGJ,UAAU;QACbC;QACAL;QACAC;QACAC;QACA,UAAUO,AAAAA,IAAAA,mCAAAA,sBAAAA,AAAAA,EAAuBJ,SAAS1B,KAAK,WAAW;QAC1D2B;IACF;IAEAI,IAAAA,sBAAAA,MAAAA,AAAAA,EAAON,YAAY;IAEnBC,QAAQ,OAAO,CAAC,CAACjC;QACf,MAAMC,OAAOD,OAAO,IAAI;QACxB,MAAMuC,sBAAsBhC,KAAK,WAAW,CAAC,IAAI,CAC/C,CAACP,SAAWA,OAAO,IAAI,KAAKC;QAG9BlB,MAAM,+BAA+BwD;QACrC,MAAMC,eAAeD,sBACjBE,AAAAA,IAAAA,mCAAAA,2BAAAA,AAAAA,EAA4BF,oBAAoB,WAAW,IAC3D,EAAE;QAENxD,MAAM,gBAAgByD;QAEtBA,aAAa,OAAO,CAAC,CAACE;YACpB,MAAMC,eAAe3C,OAAO,KAAK,CAAC0C,MAAM;YACxC,IAAIC,gBAAgB9B,AAAgBrB,WAAhBqB,aAElBb,OAAO,KAAK,CAAC0C,MAAM,GAAGE,AAAAA,IAAAA,mCAAAA,aAAAA,AAAAA,EACpBD,cACA1B,YACAC,aACAC,YACAC,aACAP;QAGN;IACF;IAEAH,oBAAoB,MAAM,CAAC;QACzB,MAAM;QACN,SAAS;YACP;gBACE,MAAM;gBACN,MAAMkB;YACR;SACD;IACH;IAEA,OAAOQ;AACT"}
|
|
@@ -26,8 +26,8 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
26
26
|
__webpack_require__.d(__webpack_exports__, {
|
|
27
27
|
bboxDescription: ()=>bboxDescription
|
|
28
28
|
});
|
|
29
|
-
function bboxDescription(
|
|
30
|
-
if ('gemini' ===
|
|
29
|
+
function bboxDescription(modelFamily) {
|
|
30
|
+
if ('gemini' === modelFamily) return 'box_2d bounding box for the target element, should be [ymin, xmin, ymax, xmax] normalized to 0-1000.';
|
|
31
31
|
return '2d bounding box as [xmin, ymin, xmax, ymax]';
|
|
32
32
|
}
|
|
33
33
|
exports.bboxDescription = __webpack_exports__.bboxDescription;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-model/prompt/common.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/common.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type {
|
|
1
|
+
{"version":3,"file":"ai-model/prompt/common.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/common.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type { TModelFamily } from '@midscene/shared/env';\nexport function bboxDescription(modelFamily: TModelFamily | undefined) {\n if (modelFamily === 'gemini') {\n return 'box_2d bounding box for the target element, should be [ymin, xmin, ymax, xmax] normalized to 0-1000.';\n }\n return '2d bounding box as [xmin, ymin, xmax, ymax]';\n}\n"],"names":["__webpack_require__","definition","key","Object","obj","prop","Symbol","bboxDescription","modelFamily"],"mappings":";;;IAAAA,oBAAoB,CAAC,GAAG,CAAC,UAASC;QACjC,IAAI,IAAIC,OAAOD,WACR,IAAGD,oBAAoB,CAAC,CAACC,YAAYC,QAAQ,CAACF,oBAAoB,CAAC,CAAC,UAASE,MACzEC,OAAO,cAAc,CAAC,UAASD,KAAK;YAAE,YAAY;YAAM,KAAKD,UAAU,CAACC,IAAI;QAAC;IAGzF;;;ICNAF,oBAAoB,CAAC,GAAG,CAACI,KAAKC,OAAUF,OAAO,SAAS,CAAC,cAAc,CAAC,IAAI,CAACC,KAAKC;;;ICClFL,oBAAoB,CAAC,GAAG,CAAC;QACxB,IAAG,AAAkB,eAAlB,OAAOM,UAA0BA,OAAO,WAAW,EACrDH,OAAO,cAAc,CAAC,UAASG,OAAO,WAAW,EAAE;YAAE,OAAO;QAAS;QAEtEH,OAAO,cAAc,CAAC,UAAS,cAAc;YAAE,OAAO;QAAK;IAC5D;;;;;;;ACLO,SAASI,gBAAgBC,WAAqC;IACnE,IAAIA,AAAgB,aAAhBA,aACF,OAAO;IAET,OAAO;AACT"}
|
|
@@ -24,11 +24,13 @@ var __webpack_require__ = {};
|
|
|
24
24
|
var __webpack_exports__ = {};
|
|
25
25
|
__webpack_require__.r(__webpack_exports__);
|
|
26
26
|
__webpack_require__.d(__webpack_exports__, {
|
|
27
|
-
extractDataQueryPrompt: ()=>extractDataQueryPrompt,
|
|
28
27
|
extractDataSchema: ()=>extractDataSchema,
|
|
29
|
-
systemPromptToExtract: ()=>systemPromptToExtract
|
|
28
|
+
systemPromptToExtract: ()=>systemPromptToExtract,
|
|
29
|
+
extractDataQueryPrompt: ()=>extractDataQueryPrompt
|
|
30
30
|
});
|
|
31
|
+
const env_namespaceObject = require("@midscene/shared/env");
|
|
31
32
|
function systemPromptToExtract() {
|
|
33
|
+
const preferredLanguage = (0, env_namespaceObject.getPreferredLanguage)();
|
|
32
34
|
return `
|
|
33
35
|
You are a versatile professional in software UI design and testing. Your outstanding contributions will impact the user experience of billions of users.
|
|
34
36
|
|
|
@@ -41,7 +43,7 @@ If the user provides multiple reference images, please carefully review the refe
|
|
|
41
43
|
|
|
42
44
|
Return in the following JSON format:
|
|
43
45
|
{
|
|
44
|
-
thought: string, // the thinking process of the extraction, less then 300 words
|
|
46
|
+
thought: string, // the thinking process of the extraction, less then 300 words. Use ${preferredLanguage} in this field.
|
|
45
47
|
data: any, // the extracted data. Make sure both the value and scheme meet the DATA_DEMAND. If you want to write some description in this field, use the same language as the DATA_DEMAND.
|
|
46
48
|
errors: [], // string[], error message if any
|
|
47
49
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-model/prompt/extraction.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/extraction.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type { ResponseFormatJSONSchema } from 'openai/resources/index';\n\nexport function systemPromptToExtract() {\n return `\nYou are a versatile professional in software UI design and testing. Your outstanding contributions will impact the user experience of billions of users.\n\nThe user will give you a screenshot, the contents of it (optional), and some data requirements in <DATA_DEMAND>. You need to understand the user's requirements and extract the data satisfying the <DATA_DEMAND>.\n\nIf a key specifies a JSON data type (such as Number, String, Boolean, Object, Array), ensure the returned value strictly matches that data type.\n\nIf the user provides multiple reference images, please carefully review the reference images with the screenshot and provide the correct answer for <DATA_DEMAND>.\n\n\nReturn in the following JSON format:\n{\n thought: string, // the thinking process of the extraction, less then 300 words
|
|
1
|
+
{"version":3,"file":"ai-model/prompt/extraction.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/extraction.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import { getPreferredLanguage } from '@midscene/shared/env';\nimport type { ResponseFormatJSONSchema } from 'openai/resources/index';\n\nexport function systemPromptToExtract() {\n const preferredLanguage = getPreferredLanguage();\n\n return `\nYou are a versatile professional in software UI design and testing. Your outstanding contributions will impact the user experience of billions of users.\n\nThe user will give you a screenshot, the contents of it (optional), and some data requirements in <DATA_DEMAND>. You need to understand the user's requirements and extract the data satisfying the <DATA_DEMAND>.\n\nIf a key specifies a JSON data type (such as Number, String, Boolean, Object, Array), ensure the returned value strictly matches that data type.\n\nIf the user provides multiple reference images, please carefully review the reference images with the screenshot and provide the correct answer for <DATA_DEMAND>.\n\n\nReturn in the following JSON format:\n{\n thought: string, // the thinking process of the extraction, less then 300 words. Use ${preferredLanguage} in this field.\n data: any, // the extracted data. Make sure both the value and scheme meet the DATA_DEMAND. If you want to write some description in this field, use the same language as the DATA_DEMAND.\n errors: [], // string[], error message if any\n}\n\n# Example 1\nFor example, if the DATA_DEMAND is:\n\n<DATA_DEMAND>\n{\n \"name\": \"name shows on the left panel, string\",\n \"age\": \"age shows on the right panel, number\",\n \"isAdmin\": \"if the user is admin, boolean\"\n}\n</DATA_DEMAND>\n\nBy viewing the screenshot and page contents, you can extract the following data:\n\n{\n thought: \"According to the screenshot, i can see ...\",\n data: {\n name: \"John\",\n age: 30,\n isAdmin: true\n },\n}\n\n# Example 2\nIf the DATA_DEMAND is:\n\n<DATA_DEMAND>\nthe todo items list, string[]\n</DATA_DEMAND>\n\nBy viewing the screenshot and page contents, you can extract the following data:\n\n{\n thought: \"According to the screenshot, i can see ...\",\n data: [\"todo 1\", \"todo 2\", \"todo 3\"],\n}\n\n# Example 3\nIf the DATA_DEMAND is:\n\n<DATA_DEMAND>\nthe page title, string\n</DATA_DEMAND>\n\nBy viewing the screenshot and page contents, you can extract the following data:\n\n{\n thought: \"According to the screenshot, i can see ...\",\n data: \"todo list\",\n}\n\n# Example 4\nIf the DATA_DEMAND is:\n\n<DATA_DEMAND>\n{\n \"result\": \"Boolean, is it currently the SMS page?\"\n}\n</DATA_DEMAND>\n\nBy viewing the screenshot and page contents, you can extract the following data:\n\n{\n thought: \"According to the screenshot, i can see ...\",\n data: { result: true },\n}\n`;\n}\n\nexport const extractDataQueryPrompt = (\n pageDescription: string,\n dataQuery: string | Record<string, string>,\n) => {\n let dataQueryText = '';\n if (typeof dataQuery === 'string') {\n dataQueryText = dataQuery;\n } else {\n dataQueryText = JSON.stringify(dataQuery, null, 2);\n }\n\n return `\n<PageDescription>\n${pageDescription}\n</PageDescription>\n\n<DATA_DEMAND>\n${dataQueryText}\n</DATA_DEMAND>\n `;\n};\n\nexport const extractDataSchema: ResponseFormatJSONSchema = {\n type: 'json_schema',\n json_schema: {\n name: 'extract_data',\n strict: true,\n schema: {\n type: 'object',\n properties: {\n data: {\n type: 'object',\n description: 'The extracted data',\n },\n errors: {\n type: 'array',\n items: {\n type: 'string',\n },\n description: 'Error messages, if any',\n },\n },\n required: ['data', 'errors'],\n additionalProperties: false,\n },\n },\n};\n"],"names":["__webpack_require__","definition","key","Object","obj","prop","Symbol","systemPromptToExtract","preferredLanguage","getPreferredLanguage","extractDataQueryPrompt","pageDescription","dataQuery","dataQueryText","JSON","extractDataSchema"],"mappings":";;;IAAAA,oBAAoB,CAAC,GAAG,CAAC,UAASC;QACjC,IAAI,IAAIC,OAAOD,WACR,IAAGD,oBAAoB,CAAC,CAACC,YAAYC,QAAQ,CAACF,oBAAoB,CAAC,CAAC,UAASE,MACzEC,OAAO,cAAc,CAAC,UAASD,KAAK;YAAE,YAAY;YAAM,KAAKD,UAAU,CAACC,IAAI;QAAC;IAGzF;;;ICNAF,oBAAoB,CAAC,GAAG,CAACI,KAAKC,OAAUF,OAAO,SAAS,CAAC,cAAc,CAAC,IAAI,CAACC,KAAKC;;;ICClFL,oBAAoB,CAAC,GAAG,CAAC;QACxB,IAAG,AAAkB,eAAlB,OAAOM,UAA0BA,OAAO,WAAW,EACrDH,OAAO,cAAc,CAAC,UAASG,OAAO,WAAW,EAAE;YAAE,OAAO;QAAS;QAEtEH,OAAO,cAAc,CAAC,UAAS,cAAc;YAAE,OAAO;QAAK;IAC5D;;;;;;;;;;ACHO,SAASI;IACd,MAAMC,oBAAoBC,AAAAA,IAAAA,oBAAAA,oBAAAA,AAAAA;IAE1B,OAAO,CAAC;;;;;;;;;;;;uFAY6E,EAAED,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsE3G,CAAC;AACD;AAEO,MAAME,yBAAyB,CACpCC,iBACAC;IAEA,IAAIC,gBAAgB;IAElBA,gBADE,AAAqB,YAArB,OAAOD,YACOA,YAEAE,KAAK,SAAS,CAACF,WAAW,MAAM;IAGlD,OAAO,CAAC;;AAEV,EAAED,gBAAgB;;;;AAIlB,EAAEE,cAAc;;EAEd,CAAC;AACH;AAEO,MAAME,oBAA8C;IACzD,MAAM;IACN,aAAa;QACX,MAAM;QACN,QAAQ;QACR,QAAQ;YACN,MAAM;YACN,YAAY;gBACV,MAAM;oBACJ,MAAM;oBACN,aAAa;gBACf;gBACA,QAAQ;oBACN,MAAM;oBACN,OAAO;wBACL,MAAM;oBACR;oBACA,aAAa;gBACf;YACF;YACA,UAAU;gBAAC;gBAAQ;aAAS;YAC5B,sBAAsB;QACxB;IACF;AACF"}
|
|
@@ -27,9 +27,11 @@ __webpack_require__.d(__webpack_exports__, {
|
|
|
27
27
|
systemPromptToLocateElement: ()=>systemPromptToLocateElement,
|
|
28
28
|
findElementPrompt: ()=>findElementPrompt
|
|
29
29
|
});
|
|
30
|
+
const env_namespaceObject = require("@midscene/shared/env");
|
|
30
31
|
const external_common_js_namespaceObject = require("./common.js");
|
|
31
|
-
function systemPromptToLocateElement(
|
|
32
|
-
const
|
|
32
|
+
function systemPromptToLocateElement(modelFamily) {
|
|
33
|
+
const preferredLanguage = (0, env_namespaceObject.getPreferredLanguage)();
|
|
34
|
+
const bboxComment = (0, external_common_js_namespaceObject.bboxDescription)(modelFamily);
|
|
33
35
|
return `
|
|
34
36
|
## Role:
|
|
35
37
|
You are an AI assistant that helps identify UI elements.
|
|
@@ -62,7 +64,7 @@ When no element is found:
|
|
|
62
64
|
\`\`\`json
|
|
63
65
|
{
|
|
64
66
|
"bbox": [],
|
|
65
|
-
"errors": ["I can see ..., but {some element} is not found"]
|
|
67
|
+
"errors": ["I can see ..., but {some element} is not found. Use ${preferredLanguage}."]
|
|
66
68
|
}
|
|
67
69
|
\`\`\`
|
|
68
70
|
`;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-model/prompt/llm-locator.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/llm-locator.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type {
|
|
1
|
+
{"version":3,"file":"ai-model/prompt/llm-locator.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/llm-locator.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type { TModelFamily } from '@midscene/shared/env';\nimport { getPreferredLanguage } from '@midscene/shared/env';\nimport { bboxDescription } from './common';\nexport function systemPromptToLocateElement(\n modelFamily: TModelFamily | undefined,\n) {\n const preferredLanguage = getPreferredLanguage();\n const bboxComment = bboxDescription(modelFamily);\n return `\n## Role:\nYou are an AI assistant that helps identify UI elements.\n\n## Objective:\n- Identify elements in screenshots that match the user's description.\n- Provide the coordinates of the element that matches the user's description.\n\n## Output Format:\n\\`\\`\\`json\n{\n \"bbox\": [number, number, number, number], // ${bboxComment}\n \"errors\"?: string[]\n}\n\\`\\`\\`\n\nFields:\n* \\`bbox\\` is the bounding box of the element that matches the user's description\n* \\`errors\\` is an optional array of error messages (if any)\n\nFor example, when an element is found:\n\\`\\`\\`json\n{\n \"bbox\": [100, 100, 200, 200],\n \"errors\": []\n}\n\\`\\`\\`\n\nWhen no element is found:\n\\`\\`\\`json\n{\n \"bbox\": [],\n \"errors\": [\"I can see ..., but {some element} is not found. Use ${preferredLanguage}.\"]\n}\n\\`\\`\\`\n`;\n}\n\nexport const findElementPrompt = (targetElementDescription: string) =>\n `Find: ${targetElementDescription}`;\n"],"names":["__webpack_require__","definition","key","Object","obj","prop","Symbol","systemPromptToLocateElement","modelFamily","preferredLanguage","getPreferredLanguage","bboxComment","bboxDescription","findElementPrompt","targetElementDescription"],"mappings":";;;IAAAA,oBAAoB,CAAC,GAAG,CAAC,UAASC;QACjC,IAAI,IAAIC,OAAOD,WACR,IAAGD,oBAAoB,CAAC,CAACC,YAAYC,QAAQ,CAACF,oBAAoB,CAAC,CAAC,UAASE,MACzEC,OAAO,cAAc,CAAC,UAASD,KAAK;YAAE,YAAY;YAAM,KAAKD,UAAU,CAACC,IAAI;QAAC;IAGzF;;;ICNAF,oBAAoB,CAAC,GAAG,CAACI,KAAKC,OAAUF,OAAO,SAAS,CAAC,cAAc,CAAC,IAAI,CAACC,KAAKC;;;ICClFL,oBAAoB,CAAC,GAAG,CAAC;QACxB,IAAG,AAAkB,eAAlB,OAAOM,UAA0BA,OAAO,WAAW,EACrDH,OAAO,cAAc,CAAC,UAASG,OAAO,WAAW,EAAE;YAAE,OAAO;QAAS;QAEtEH,OAAO,cAAc,CAAC,UAAS,cAAc;YAAE,OAAO;QAAK;IAC5D;;;;;;;;;;ACHO,SAASI,4BACdC,WAAqC;IAErC,MAAMC,oBAAoBC,AAAAA,IAAAA,oBAAAA,oBAAAA,AAAAA;IAC1B,MAAMC,cAAcC,AAAAA,IAAAA,mCAAAA,eAAAA,AAAAA,EAAgBJ;IACpC,OAAO,CAAC;;;;;;;;;;;gDAWsC,EAAEG,YAAY;;;;;;;;;;;;;;;;;;;;;kEAqBI,EAAEF,kBAAkB;;;AAGtF,CAAC;AACD;AAEO,MAAMI,oBAAoB,CAACC,2BAChC,CAAC,MAAM,EAAEA,0BAA0B"}
|
|
@@ -27,19 +27,11 @@ __webpack_require__.d(__webpack_exports__, {
|
|
|
27
27
|
descriptionForAction: ()=>descriptionForAction,
|
|
28
28
|
systemPromptToTaskPlanning: ()=>systemPromptToTaskPlanning
|
|
29
29
|
});
|
|
30
|
+
const env_namespaceObject = require("@midscene/shared/env");
|
|
30
31
|
const zod_schema_utils_namespaceObject = require("@midscene/shared/zod-schema-utils");
|
|
31
32
|
const external_common_js_namespaceObject = require("./common.js");
|
|
32
|
-
const
|
|
33
|
-
|
|
34
|
-
'"note"?: string, // some important notes to finish the follow-up action should be written here, and the agent executing the subsequent steps will focus on this information. For example, the data extracted from the current screenshot which will be used in the follow-up action.',
|
|
35
|
-
`"log": string, // a brief preamble to the user explaining what you’re about to do`,
|
|
36
|
-
'"error"?: string, // Error messages about unexpected situations, if any. Only think it is an error when the situation is not foreseeable according to the instruction. Use the same language as the user\'s instruction.'
|
|
37
|
-
];
|
|
38
|
-
if (includeThought) fields.unshift('"thought": string, // your thought process about the next action');
|
|
39
|
-
return fields.join('\n ');
|
|
40
|
-
};
|
|
41
|
-
const vlLocateParam = (vlMode)=>{
|
|
42
|
-
if (vlMode) return `{bbox: [number, number, number, number], prompt: string } // ${(0, external_common_js_namespaceObject.bboxDescription)(vlMode)}`;
|
|
33
|
+
const vlLocateParam = (modelFamily)=>{
|
|
34
|
+
if (modelFamily) return `{bbox: [number, number, number, number], prompt: string } // ${(0, external_common_js_namespaceObject.bboxDescription)(modelFamily)}`;
|
|
43
35
|
return "{ prompt: string /* description of the target element */ }";
|
|
44
36
|
};
|
|
45
37
|
const findDefaultValue = (field)=>{
|
|
@@ -100,18 +92,33 @@ const descriptionForAction = (action, locatorSchemaTypeDescription)=>{
|
|
|
100
92
|
${tab}${fields.join(`\n${tab}`)}
|
|
101
93
|
`.trim();
|
|
102
94
|
};
|
|
103
|
-
async function systemPromptToTaskPlanning({ actionSpace,
|
|
104
|
-
|
|
105
|
-
|
|
95
|
+
async function systemPromptToTaskPlanning({ actionSpace, modelFamily, includeBbox, includeThought }) {
|
|
96
|
+
const preferredLanguage = (0, env_namespaceObject.getPreferredLanguage)();
|
|
97
|
+
if (includeBbox && !modelFamily) throw new Error('modelFamily cannot be undefined when includeBbox is true. A valid modelFamily is required for bbox-based location.');
|
|
98
|
+
const actionDescriptionList = actionSpace.map((action)=>descriptionForAction(action, vlLocateParam(includeBbox ? modelFamily : void 0)));
|
|
106
99
|
const actionList = actionDescriptionList.join('\n');
|
|
100
|
+
const thoughtFieldInstruction = `
|
|
101
|
+
## About the \`thought\` field (reasoning process)
|
|
102
|
+
|
|
103
|
+
The \`thought\` field captures your structured reasoning about the next action. It should include:
|
|
104
|
+
|
|
105
|
+
1. **Overall task**: What is the overall instruction/goal?
|
|
106
|
+
2. **Current observation**: What do I see on the current screen?
|
|
107
|
+
3. **Progress**: What steps have been completed so far?
|
|
108
|
+
4. **Next step**: What should the next step be and why?
|
|
109
|
+
5. **Action selection**: Which Action Type should be used to accomplish this step?
|
|
110
|
+
|
|
111
|
+
**Example thought structure:**
|
|
112
|
+
"The overall task is to [task]. On the current screen, I can see [observations]. We have completed [steps]. The next step should be [next action] because [reason]. I will use [Action Type] to accomplish this."
|
|
113
|
+
`;
|
|
107
114
|
const logFieldInstruction = `
|
|
108
115
|
## About the \`log\` field (preamble message)
|
|
109
116
|
|
|
110
|
-
The \`log\` field is a brief preamble message to the user explaining what you
|
|
117
|
+
The \`log\` field is a brief preamble message to the user explaining what you're about to do. It should follow these principles and examples:
|
|
111
118
|
|
|
112
|
-
- **Use
|
|
119
|
+
- **Use ${preferredLanguage}**
|
|
113
120
|
- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words or Chinese characters for quick updates).
|
|
114
|
-
- **Build on prior context**: if this is not the first action to be done, use the preamble message to connect the dots with what
|
|
121
|
+
- **Build on prior context**: if this is not the first action to be done, use the preamble message to connect the dots with what's been done so far and create a sense of momentum and clarity for the user to understand your next actions.
|
|
115
122
|
- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging.
|
|
116
123
|
|
|
117
124
|
**Examples:**
|
|
@@ -121,11 +128,6 @@ The \`log\` field is a brief preamble message to the user explaining what you’
|
|
|
121
128
|
- "Go back to find the login button"
|
|
122
129
|
`;
|
|
123
130
|
const shouldIncludeThought = includeThought ?? true;
|
|
124
|
-
const commonOutputFields = buildCommonOutputFields(shouldIncludeThought);
|
|
125
|
-
const exampleThoughtLine = shouldIncludeThought ? ` "thought": "The form has already been filled, I need to click the login button to login",
|
|
126
|
-
` : '';
|
|
127
|
-
const exampleThoughtLineWithNote = shouldIncludeThought ? ` "thought": "I need to note the titles in the current screenshot for further processing and scroll to find more titles",
|
|
128
|
-
` : '';
|
|
129
131
|
return `
|
|
130
132
|
Target: User will give you an instruction, some screenshots and previous logs indicating what have been done. Your task is to plan the next one action according to current situation to accomplish the instruction.
|
|
131
133
|
|
|
@@ -143,51 +145,45 @@ Please tell what the next one action is (or null if no action should be done) to
|
|
|
143
145
|
## Supporting actions
|
|
144
146
|
${actionList}
|
|
145
147
|
|
|
148
|
+
${shouldIncludeThought ? thoughtFieldInstruction : ''}
|
|
149
|
+
|
|
146
150
|
${logFieldInstruction}
|
|
147
151
|
|
|
148
152
|
## Return format
|
|
149
153
|
|
|
150
|
-
Return in
|
|
151
|
-
{
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
// k-v style parameter fields
|
|
158
|
-
},
|
|
159
|
-
} | null
|
|
160
|
-
}
|
|
154
|
+
Return in XML format with the following structure:
|
|
155
|
+
${shouldIncludeThought ? '<thought>Structured reasoning: overall task, current observation, progress, next step, action selection</thought>' : ''}
|
|
156
|
+
<note>CRITICAL: If any information from the current screenshot will be needed in follow-up actions, you MUST record it here completely. The current screenshot will NOT be available in subsequent steps, so this note is your only way to preserve essential information for later use. Examples: extracted data, element states, content that needs to be referenced. Leave empty if no follow-up information is needed.</note>
|
|
157
|
+
<log>a brief preamble to the user</log>
|
|
158
|
+
<error>error messages (optional)</error>
|
|
159
|
+
<action-type>the type of the action, or null if no action</action-type>
|
|
160
|
+
<action-param-json>JSON object containing the action parameters</action-param-json>
|
|
161
161
|
|
|
162
162
|
For example, if the instruction is to login and the form has already been filled, this is a valid return value:
|
|
163
163
|
|
|
164
|
+
${shouldIncludeThought ? '<thought>The overall task is to login. On the current screen, I can see a login form with username and password fields already filled, and a login button. We have completed filling in the credentials. The next step should be to submit the login form by clicking the login button. I will use Tap action to click it.</thought>' : ''}<log>Click the login button</log>
|
|
165
|
+
<action-type>Tap</action-type>
|
|
166
|
+
<action-param-json>
|
|
164
167
|
{
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
"type": "Tap",
|
|
168
|
-
"param": {
|
|
169
|
-
"locate": {
|
|
170
|
-
"prompt": "The login button"${vlMode && includeBbox ? ', "bbox": [100, 200, 300, 400]' : ''}
|
|
171
|
-
}
|
|
172
|
-
}
|
|
168
|
+
"locate": {
|
|
169
|
+
"prompt": "The login button"${modelFamily && includeBbox ? ', "bbox": [100, 200, 300, 400]' : ''}
|
|
173
170
|
}
|
|
174
171
|
}
|
|
172
|
+
</action-param-json>
|
|
175
173
|
|
|
176
174
|
For example, if the instruction is to find out every title in the screenshot, the return value should be:
|
|
177
175
|
|
|
176
|
+
${shouldIncludeThought ? '<thought>The overall task is to find out every title in the screenshot. On the current screen, I can see several article titles: \'Hello, world!\', \'Midscene 101\', and \'Model strategy\'. We have just started and observed the visible titles. The next step should be to scroll down to find more titles that may not be currently visible. I will use Scroll action to reveal more content.</thought>' : ''}<note>The titles in the current screenshot are: 'Hello, world!', 'Midscene 101', 'Model strategy'</note>
|
|
177
|
+
<log>Scroll to find more titles</log>
|
|
178
|
+
<action-type>Scroll</action-type>
|
|
179
|
+
<action-param-json>
|
|
178
180
|
{
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
"param": {
|
|
184
|
-
"locate": {
|
|
185
|
-
"prompt": "The page content area"
|
|
186
|
-
},
|
|
187
|
-
"direction": "down"
|
|
188
|
-
}
|
|
189
|
-
}
|
|
181
|
+
"locate": {
|
|
182
|
+
"prompt": "The page content area"
|
|
183
|
+
},
|
|
184
|
+
"direction": "down"
|
|
190
185
|
}
|
|
186
|
+
</action-param-json>
|
|
191
187
|
`;
|
|
192
188
|
}
|
|
193
189
|
exports.descriptionForAction = __webpack_exports__.descriptionForAction;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-model/prompt/llm-planning.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/llm-planning.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type { DeviceAction } from '@/types';\nimport type { TVlModeTypes } from '@midscene/shared/env';\nimport {\n getZodDescription,\n getZodTypeName,\n} from '@midscene/shared/zod-schema-utils';\nimport type { ResponseFormatJSONSchema } from 'openai/resources/index';\nimport type { z } from 'zod';\nimport { bboxDescription } from './common';\n\n// Note: put the log field first to trigger the CoT\n\nconst buildCommonOutputFields = (includeThought: boolean) => {\n const fields = [\n `\"note\"?: string, // some important notes to finish the follow-up action should be written here, and the agent executing the subsequent steps will focus on this information. For example, the data extracted from the current screenshot which will be used in the follow-up action.`,\n `\"log\": string, // a brief preamble to the user explaining what you’re about to do`,\n `\"error\"?: string, // Error messages about unexpected situations, if any. Only think it is an error when the situation is not foreseeable according to the instruction. Use the same language as the user's instruction.`,\n ];\n\n if (includeThought) {\n fields.unshift(\n `\"thought\": string, // your thought process about the next action`,\n );\n }\n\n return fields.join('\\n ');\n};\n\nconst vlLocateParam = (vlMode: TVlModeTypes | undefined) => {\n if (vlMode) {\n return `{bbox: [number, number, number, number], prompt: string } // ${bboxDescription(vlMode)}`;\n }\n return '{ prompt: string /* description of the target element */ }';\n};\n\n/**\n * Find ZodDefault in the wrapper chain and return its default value\n */\nconst findDefaultValue = (field: unknown): any | undefined => {\n let current = field;\n const visited = new Set<unknown>();\n\n while (current && !visited.has(current)) {\n visited.add(current);\n const currentWithDef = current as {\n _def?: {\n typeName?: string;\n defaultValue?: () => any;\n innerType?: unknown;\n };\n };\n\n if (!currentWithDef._def?.typeName) break;\n\n if (currentWithDef._def.typeName === 'ZodDefault') {\n return currentWithDef._def.defaultValue?.();\n }\n\n // Continue unwrapping if it's a wrapper type\n if (\n currentWithDef._def.typeName === 'ZodOptional' ||\n currentWithDef._def.typeName === 'ZodNullable'\n ) {\n current = currentWithDef._def.innerType;\n } else {\n break;\n }\n }\n\n return undefined;\n};\n\nexport const descriptionForAction = (\n action: DeviceAction<any>,\n locatorSchemaTypeDescription: string,\n) => {\n const tab = ' ';\n const fields: string[] = [];\n\n // Add the action type field\n fields.push(`- type: \"${action.name}\"`);\n\n // Handle paramSchema if it exists\n if (action.paramSchema) {\n const paramLines: string[] = [];\n\n // Check if paramSchema is a ZodObject with shape\n const schema = action.paramSchema as {\n _def?: { typeName?: string };\n shape?: Record<string, unknown>;\n };\n const isZodObject = schema._def?.typeName === 'ZodObject';\n\n if (isZodObject && schema.shape) {\n // Original logic for ZodObject schemas\n const shape = schema.shape;\n\n for (const [key, field] of Object.entries(shape)) {\n if (field && typeof field === 'object') {\n // Check if field is optional\n const isOptional =\n typeof (field as { isOptional?: () => boolean }).isOptional ===\n 'function' &&\n (field as { isOptional: () => boolean }).isOptional();\n const keyWithOptional = isOptional ? `${key}?` : key;\n\n // Get the type name using extracted helper\n const typeName = getZodTypeName(field, locatorSchemaTypeDescription);\n\n // Get description using extracted helper\n const description = getZodDescription(field as z.ZodTypeAny);\n\n // Check if field has a default value by searching the wrapper chain\n const defaultValue = findDefaultValue(field);\n const hasDefault = defaultValue !== undefined;\n\n // Build param line for this field\n let paramLine = `${keyWithOptional}: ${typeName}`;\n const comments: string[] = [];\n if (description) {\n comments.push(description);\n }\n if (hasDefault) {\n const defaultStr =\n typeof defaultValue === 'string'\n ? `\"${defaultValue}\"`\n : JSON.stringify(defaultValue);\n comments.push(`default: ${defaultStr}`);\n }\n if (comments.length > 0) {\n paramLine += ` // ${comments.join(', ')}`;\n }\n\n paramLines.push(paramLine);\n }\n }\n\n // Add the param section to fields if there are paramLines\n if (paramLines.length > 0) {\n fields.push('- param:');\n paramLines.forEach((line) => {\n fields.push(` - ${line}`);\n });\n }\n } else {\n // Handle non-object schemas (string, number, etc.)\n const typeName = getZodTypeName(schema);\n const description = getZodDescription(schema as z.ZodTypeAny);\n\n // For simple types, indicate that param should be the direct value, not an object\n let paramDescription = `- param: ${typeName}`;\n if (description) {\n paramDescription += ` // ${description}`;\n }\n paramDescription += ' (pass the value directly, not as an object)';\n\n fields.push(paramDescription);\n }\n }\n\n return `- ${action.name}, ${action.description || 'No description provided'}\n${tab}${fields.join(`\\n${tab}`)}\n`.trim();\n};\n\nexport async function systemPromptToTaskPlanning({\n actionSpace,\n vlMode,\n includeBbox,\n includeThought,\n}: {\n actionSpace: DeviceAction<any>[];\n vlMode: TVlModeTypes | undefined;\n includeBbox: boolean;\n includeThought?: boolean;\n}) {\n // Validate parameters: if includeBbox is true, vlMode must be defined\n if (includeBbox && !vlMode) {\n throw new Error(\n 'vlMode cannot be undefined when includeBbox is true. A valid vlMode is required for bbox-based location.',\n );\n }\n\n const actionDescriptionList = actionSpace.map((action) => {\n return descriptionForAction(\n action,\n vlLocateParam(includeBbox ? vlMode : undefined),\n );\n });\n const actionList = actionDescriptionList.join('\\n');\n\n const logFieldInstruction = `\n## About the \\`log\\` field (preamble message)\n\nThe \\`log\\` field is a brief preamble message to the user explaining what you’re about to do. It should follow these principles and examples:\n\n- **Use the same language as the user's instruction**\n- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words or Chinese characters for quick updates).\n- **Build on prior context**: if this is not the first action to be done, use the preamble message to connect the dots with what’s been done so far and create a sense of momentum and clarity for the user to understand your next actions.\n- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging.\n\n**Examples:**\n- \"Click the login button\"\n- \"Scroll to find the 'Yes' button in popup\"\n- \"Previous actions failed to find the 'Yes' button, i will try again\"\n- \"Go back to find the login button\"\n`;\n\n const shouldIncludeThought = includeThought ?? true;\n const commonOutputFields = buildCommonOutputFields(shouldIncludeThought);\n const exampleThoughtLine = shouldIncludeThought\n ? ` \"thought\": \"The form has already been filled, I need to click the login button to login\",\n`\n : '';\n const exampleThoughtLineWithNote = shouldIncludeThought\n ? ` \"thought\": \"I need to note the titles in the current screenshot for further processing and scroll to find more titles\",\n`\n : '';\n\n return `\nTarget: User will give you an instruction, some screenshots and previous logs indicating what have been done. Your task is to plan the next one action according to current situation to accomplish the instruction.\n\nPlease tell what the next one action is (or null if no action should be done) to do the tasks the instruction requires. \n\n## Rules\n\n- Don't give extra actions or plans beyond the instruction. For example, don't try to submit the form if the instruction is only to fill something.\n- Give just the next ONE action you should do\n- Consider the current screenshot and give the action that is most likely to accomplish the instruction. For example, if the next step is to click a button but it's not visible in the screenshot, you should try to find it first instead of give a click action.\n- Make sure the previous actions are completed successfully before performing the next step\n- If there are some error messages reported by the previous actions, don't give up, try parse a new action to recover. If the error persists for more than 5 times, you should think this is an error and set the \"error\" field to the error message.\n- Assertions are also important steps. When getting the assertion instruction, a solid conclusion is required. You should explicitly state your conclusion by calling the \"Print_Assert_Result\" action.\n\n## Supporting actions\n${actionList}\n\n${logFieldInstruction}\n\n## Return format\n\nReturn in JSON format:\n{\n ${commonOutputFields}\n \"action\": \n {\n \"type\": string, // the type of the action\n \"param\"?: { // The parameter of the action, if any\n // k-v style parameter fields\n }, \n } | null\n}\n\nFor example, if the instruction is to login and the form has already been filled, this is a valid return value:\n\n{\n${exampleThoughtLine} \"log\": \"Click the login button\",\n \"action\": {\n \"type\": \"Tap\",\n \"param\": {\n \"locate\": { \n \"prompt\": \"The login button\"${vlMode && includeBbox ? `, \"bbox\": [100, 200, 300, 400]` : ''}\n }\n }\n }\n}\n\nFor example, if the instruction is to find out every title in the screenshot, the return value should be:\n\n{\n${exampleThoughtLineWithNote} \"note\": \"The titles in the current screenshot are: 'Hello, world!', 'Midscene 101', 'Model strategy'\",\n \"log\": \"Scroll to find more titles\",\n \"action\": {\n \"type\": \"Scroll\",\n \"param\": {\n \"locate\": {\n \"prompt\": \"The page content area\"\n },\n \"direction\": \"down\"\n }\n }\n}\n`;\n}\n"],"names":["__webpack_require__","definition","key","Object","obj","prop","Symbol","buildCommonOutputFields","includeThought","fields","vlLocateParam","vlMode","bboxDescription","findDefaultValue","field","current","visited","Set","currentWithDef","descriptionForAction","action","locatorSchemaTypeDescription","tab","paramLines","schema","isZodObject","shape","isOptional","keyWithOptional","typeName","getZodTypeName","description","getZodDescription","defaultValue","hasDefault","undefined","paramLine","comments","defaultStr","JSON","line","paramDescription","systemPromptToTaskPlanning","actionSpace","includeBbox","Error","actionDescriptionList","actionList","logFieldInstruction","shouldIncludeThought","commonOutputFields","exampleThoughtLine","exampleThoughtLineWithNote"],"mappings":";;;IAAAA,oBAAoB,CAAC,GAAG,CAAC,UAASC;QACjC,IAAI,IAAIC,OAAOD,WACR,IAAGD,oBAAoB,CAAC,CAACC,YAAYC,QAAQ,CAACF,oBAAoB,CAAC,CAAC,UAASE,MACzEC,OAAO,cAAc,CAAC,UAASD,KAAK;YAAE,YAAY;YAAM,KAAKD,UAAU,CAACC,IAAI;QAAC;IAGzF;;;ICNAF,oBAAoB,CAAC,GAAG,CAACI,KAAKC,OAAUF,OAAO,SAAS,CAAC,cAAc,CAAC,IAAI,CAACC,KAAKC;;;ICClFL,oBAAoB,CAAC,GAAG,CAAC;QACxB,IAAG,AAAkB,eAAlB,OAAOM,UAA0BA,OAAO,WAAW,EACrDH,OAAO,cAAc,CAAC,UAASG,OAAO,WAAW,EAAE;YAAE,OAAO;QAAS;QAEtEH,OAAO,cAAc,CAAC,UAAS,cAAc;YAAE,OAAO;QAAK;IAC5D;;;;;;;;;;ACMA,MAAMI,0BAA0B,CAACC;IAC/B,MAAMC,SAAS;QACb;QACA,CAAC,iFAAiF,CAAC;QACnF;KACD;IAED,IAAID,gBACFC,OAAO,OAAO,CACZ;IAIJ,OAAOA,OAAO,IAAI,CAAC;AACrB;AAEA,MAAMC,gBAAgB,CAACC;IACrB,IAAIA,QACF,OAAO,CAAC,6DAA6D,EAAEC,AAAAA,IAAAA,mCAAAA,eAAAA,AAAAA,EAAgBD,SAAS;IAElG,OAAO;AACT;AAKA,MAAME,mBAAmB,CAACC;IACxB,IAAIC,UAAUD;IACd,MAAME,UAAU,IAAIC;IAEpB,MAAOF,WAAW,CAACC,QAAQ,GAAG,CAACD,SAAU;QACvCC,QAAQ,GAAG,CAACD;QACZ,MAAMG,iBAAiBH;QAQvB,IAAI,CAACG,eAAe,IAAI,EAAE,UAAU;QAEpC,IAAIA,AAAiC,iBAAjCA,eAAe,IAAI,CAAC,QAAQ,EAC9B,OAAOA,eAAe,IAAI,CAAC,YAAY;QAIzC,IACEA,AAAiC,kBAAjCA,eAAe,IAAI,CAAC,QAAQ,IAC5BA,AAAiC,kBAAjCA,eAAe,IAAI,CAAC,QAAQ,EAE5BH,UAAUG,eAAe,IAAI,CAAC,SAAS;aAEvC;IAEJ;AAGF;AAEO,MAAMC,uBAAuB,CAClCC,QACAC;IAEA,MAAMC,MAAM;IACZ,MAAMb,SAAmB,EAAE;IAG3BA,OAAO,IAAI,CAAC,CAAC,SAAS,EAAEW,OAAO,IAAI,CAAC,CAAC,CAAC;IAGtC,IAAIA,OAAO,WAAW,EAAE;QACtB,MAAMG,aAAuB,EAAE;QAG/B,MAAMC,SAASJ,OAAO,WAAW;QAIjC,MAAMK,cAAcD,OAAO,IAAI,EAAE,aAAa;QAE9C,IAAIC,eAAeD,OAAO,KAAK,EAAE;YAE/B,MAAME,QAAQF,OAAO,KAAK;YAE1B,KAAK,MAAM,CAACtB,KAAKY,MAAM,IAAIX,OAAO,OAAO,CAACuB,OACxC,IAAIZ,SAAS,AAAiB,YAAjB,OAAOA,OAAoB;gBAEtC,MAAMa,aACJ,AACE,cADF,OAAQb,MAAyC,UAAU,IAE1DA,MAAwC,UAAU;gBACrD,MAAMc,kBAAkBD,aAAa,GAAGzB,IAAI,CAAC,CAAC,GAAGA;gBAGjD,MAAM2B,WAAWC,AAAAA,IAAAA,iCAAAA,cAAAA,AAAAA,EAAehB,OAAOO;gBAGvC,MAAMU,cAAcC,AAAAA,IAAAA,iCAAAA,iBAAAA,AAAAA,EAAkBlB;gBAGtC,MAAMmB,eAAepB,iBAAiBC;gBACtC,MAAMoB,aAAaD,AAAiBE,WAAjBF;gBAGnB,IAAIG,YAAY,GAAGR,gBAAgB,EAAE,EAAEC,UAAU;gBACjD,MAAMQ,WAAqB,EAAE;gBAC7B,IAAIN,aACFM,SAAS,IAAI,CAACN;gBAEhB,IAAIG,YAAY;oBACd,MAAMI,aACJ,AAAwB,YAAxB,OAAOL,eACH,CAAC,CAAC,EAAEA,aAAa,CAAC,CAAC,GACnBM,KAAK,SAAS,CAACN;oBACrBI,SAAS,IAAI,CAAC,CAAC,SAAS,EAAEC,YAAY;gBACxC;gBACA,IAAID,SAAS,MAAM,GAAG,GACpBD,aAAa,CAAC,IAAI,EAAEC,SAAS,IAAI,CAAC,OAAO;gBAG3Cd,WAAW,IAAI,CAACa;YAClB;YAIF,IAAIb,WAAW,MAAM,GAAG,GAAG;gBACzBd,OAAO,IAAI,CAAC;gBACZc,WAAW,OAAO,CAAC,CAACiB;oBAClB/B,OAAO,IAAI,CAAC,CAAC,IAAI,EAAE+B,MAAM;gBAC3B;YACF;QACF,OAAO;YAEL,MAAMX,WAAWC,AAAAA,IAAAA,iCAAAA,cAAAA,AAAAA,EAAeN;YAChC,MAAMO,cAAcC,AAAAA,IAAAA,iCAAAA,iBAAAA,AAAAA,EAAkBR;YAGtC,IAAIiB,mBAAmB,CAAC,SAAS,EAAEZ,UAAU;YAC7C,IAAIE,aACFU,oBAAoB,CAAC,IAAI,EAAEV,aAAa;YAE1CU,oBAAoB;YAEpBhC,OAAO,IAAI,CAACgC;QACd;IACF;IAEA,OAAO,CAAC,EAAE,EAAErB,OAAO,IAAI,CAAC,EAAE,EAAEA,OAAO,WAAW,IAAI,0BAA0B;AAC9E,EAAEE,MAAMb,OAAO,IAAI,CAAC,CAAC,EAAE,EAAEa,KAAK,EAAE;AAChC,CAAC,CAAC,IAAI;AACN;AAEO,eAAeoB,2BAA2B,EAC/CC,WAAW,EACXhC,MAAM,EACNiC,WAAW,EACXpC,cAAc,EAMf;IAEC,IAAIoC,eAAe,CAACjC,QAClB,MAAM,IAAIkC,MACR;IAIJ,MAAMC,wBAAwBH,YAAY,GAAG,CAAC,CAACvB,SACtCD,qBACLC,QACAV,cAAckC,cAAcjC,SAASwB;IAGzC,MAAMY,aAAaD,sBAAsB,IAAI,CAAC;IAE9C,MAAME,sBAAsB,CAAC;;;;;;;;;;;;;;;AAe/B,CAAC;IAEC,MAAMC,uBAAuBzC,kBAAkB;IAC/C,MAAM0C,qBAAqB3C,wBAAwB0C;IACnD,MAAME,qBAAqBF,uBACvB,CAAC;AACP,CAAC,GACK;IACJ,MAAMG,6BAA6BH,uBAC/B,CAAC;AACP,CAAC,GACK;IAEJ,OAAO,CAAC;;;;;;;;;;;;;;;AAeV,EAAEF,WAAW;;AAEb,EAAEC,oBAAoB;;;;;;EAMpB,EAAEE,mBAAmB;;;;;;;;;;;;;AAavB,EAAEC,mBAAmB;;;;;oCAKe,EAAExC,UAAUiC,cAAc,mCAAmC,GAAG;;;;;;;;;AASpG,EAAEQ,2BAA2B;;;;;;;;;;;;AAY7B,CAAC;AACD"}
|
|
1
|
+
{"version":3,"file":"ai-model/prompt/llm-planning.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/llm-planning.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type { DeviceAction } from '@/types';\nimport type { TModelFamily } from '@midscene/shared/env';\nimport { getPreferredLanguage } from '@midscene/shared/env';\nimport {\n getZodDescription,\n getZodTypeName,\n} from '@midscene/shared/zod-schema-utils';\nimport type { ResponseFormatJSONSchema } from 'openai/resources/index';\nimport type { z } from 'zod';\nimport { bboxDescription } from './common';\n\nconst vlLocateParam = (modelFamily: TModelFamily | undefined) => {\n if (modelFamily) {\n return `{bbox: [number, number, number, number], prompt: string } // ${bboxDescription(modelFamily)}`;\n }\n return '{ prompt: string /* description of the target element */ }';\n};\n\n/**\n * Find ZodDefault in the wrapper chain and return its default value\n */\nconst findDefaultValue = (field: unknown): any | undefined => {\n let current = field;\n const visited = new Set<unknown>();\n\n while (current && !visited.has(current)) {\n visited.add(current);\n const currentWithDef = current as {\n _def?: {\n typeName?: string;\n defaultValue?: () => any;\n innerType?: unknown;\n };\n };\n\n if (!currentWithDef._def?.typeName) break;\n\n if (currentWithDef._def.typeName === 'ZodDefault') {\n return currentWithDef._def.defaultValue?.();\n }\n\n // Continue unwrapping if it's a wrapper type\n if (\n currentWithDef._def.typeName === 'ZodOptional' ||\n currentWithDef._def.typeName === 'ZodNullable'\n ) {\n current = currentWithDef._def.innerType;\n } else {\n break;\n }\n }\n\n return undefined;\n};\n\nexport const descriptionForAction = (\n action: DeviceAction<any>,\n locatorSchemaTypeDescription: string,\n) => {\n const tab = ' ';\n const fields: string[] = [];\n\n // Add the action type field\n fields.push(`- type: \"${action.name}\"`);\n\n // Handle paramSchema if it exists\n if (action.paramSchema) {\n const paramLines: string[] = [];\n\n // Check if paramSchema is a ZodObject with shape\n const schema = action.paramSchema as {\n _def?: { typeName?: string };\n shape?: Record<string, unknown>;\n };\n const isZodObject = schema._def?.typeName === 'ZodObject';\n\n if (isZodObject && schema.shape) {\n // Original logic for ZodObject schemas\n const shape = schema.shape;\n\n for (const [key, field] of Object.entries(shape)) {\n if (field && typeof field === 'object') {\n // Check if field is optional\n const isOptional =\n typeof (field as { isOptional?: () => boolean }).isOptional ===\n 'function' &&\n (field as { isOptional: () => boolean }).isOptional();\n const keyWithOptional = isOptional ? `${key}?` : key;\n\n // Get the type name using extracted helper\n const typeName = getZodTypeName(field, locatorSchemaTypeDescription);\n\n // Get description using extracted helper\n const description = getZodDescription(field as z.ZodTypeAny);\n\n // Check if field has a default value by searching the wrapper chain\n const defaultValue = findDefaultValue(field);\n const hasDefault = defaultValue !== undefined;\n\n // Build param line for this field\n let paramLine = `${keyWithOptional}: ${typeName}`;\n const comments: string[] = [];\n if (description) {\n comments.push(description);\n }\n if (hasDefault) {\n const defaultStr =\n typeof defaultValue === 'string'\n ? `\"${defaultValue}\"`\n : JSON.stringify(defaultValue);\n comments.push(`default: ${defaultStr}`);\n }\n if (comments.length > 0) {\n paramLine += ` // ${comments.join(', ')}`;\n }\n\n paramLines.push(paramLine);\n }\n }\n\n // Add the param section to fields if there are paramLines\n if (paramLines.length > 0) {\n fields.push('- param:');\n paramLines.forEach((line) => {\n fields.push(` - ${line}`);\n });\n }\n } else {\n // Handle non-object schemas (string, number, etc.)\n const typeName = getZodTypeName(schema);\n const description = getZodDescription(schema as z.ZodTypeAny);\n\n // For simple types, indicate that param should be the direct value, not an object\n let paramDescription = `- param: ${typeName}`;\n if (description) {\n paramDescription += ` // ${description}`;\n }\n paramDescription += ' (pass the value directly, not as an object)';\n\n fields.push(paramDescription);\n }\n }\n\n return `- ${action.name}, ${action.description || 'No description provided'}\n${tab}${fields.join(`\\n${tab}`)}\n`.trim();\n};\n\nexport async function systemPromptToTaskPlanning({\n actionSpace,\n modelFamily,\n includeBbox,\n includeThought,\n}: {\n actionSpace: DeviceAction<any>[];\n modelFamily: TModelFamily | undefined;\n includeBbox: boolean;\n includeThought?: boolean;\n}) {\n const preferredLanguage = getPreferredLanguage();\n\n // Validate parameters: if includeBbox is true, modelFamily must be defined\n if (includeBbox && !modelFamily) {\n throw new Error(\n 'modelFamily cannot be undefined when includeBbox is true. A valid modelFamily is required for bbox-based location.',\n );\n }\n\n const actionDescriptionList = actionSpace.map((action) => {\n return descriptionForAction(\n action,\n vlLocateParam(includeBbox ? modelFamily : undefined),\n );\n });\n const actionList = actionDescriptionList.join('\\n');\n\n const thoughtFieldInstruction = `\n## About the \\`thought\\` field (reasoning process)\n\nThe \\`thought\\` field captures your structured reasoning about the next action. It should include:\n\n1. **Overall task**: What is the overall instruction/goal?\n2. **Current observation**: What do I see on the current screen?\n3. **Progress**: What steps have been completed so far?\n4. **Next step**: What should the next step be and why?\n5. **Action selection**: Which Action Type should be used to accomplish this step?\n\n**Example thought structure:**\n\"The overall task is to [task]. On the current screen, I can see [observations]. We have completed [steps]. The next step should be [next action] because [reason]. I will use [Action Type] to accomplish this.\"\n`;\n\n const logFieldInstruction = `\n## About the \\`log\\` field (preamble message)\n\nThe \\`log\\` field is a brief preamble message to the user explaining what you're about to do. It should follow these principles and examples:\n\n- **Use ${preferredLanguage}**\n- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words or Chinese characters for quick updates).\n- **Build on prior context**: if this is not the first action to be done, use the preamble message to connect the dots with what's been done so far and create a sense of momentum and clarity for the user to understand your next actions.\n- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging.\n\n**Examples:**\n- \"Click the login button\"\n- \"Scroll to find the 'Yes' button in popup\"\n- \"Previous actions failed to find the 'Yes' button, i will try again\"\n- \"Go back to find the login button\"\n`;\n\n const shouldIncludeThought = includeThought ?? true;\n\n return `\nTarget: User will give you an instruction, some screenshots and previous logs indicating what have been done. Your task is to plan the next one action according to current situation to accomplish the instruction.\n\nPlease tell what the next one action is (or null if no action should be done) to do the tasks the instruction requires. \n\n## Rules\n\n- Don't give extra actions or plans beyond the instruction. For example, don't try to submit the form if the instruction is only to fill something.\n- Give just the next ONE action you should do\n- Consider the current screenshot and give the action that is most likely to accomplish the instruction. For example, if the next step is to click a button but it's not visible in the screenshot, you should try to find it first instead of give a click action.\n- Make sure the previous actions are completed successfully before performing the next step\n- If there are some error messages reported by the previous actions, don't give up, try parse a new action to recover. If the error persists for more than 5 times, you should think this is an error and set the \"error\" field to the error message.\n- Assertions are also important steps. When getting the assertion instruction, a solid conclusion is required. You should explicitly state your conclusion by calling the \"Print_Assert_Result\" action.\n\n## Supporting actions\n${actionList}\n\n${shouldIncludeThought ? thoughtFieldInstruction : ''}\n\n${logFieldInstruction}\n\n## Return format\n\nReturn in XML format with the following structure:\n${shouldIncludeThought ? '<thought>Structured reasoning: overall task, current observation, progress, next step, action selection</thought>' : ''}\n<note>CRITICAL: If any information from the current screenshot will be needed in follow-up actions, you MUST record it here completely. The current screenshot will NOT be available in subsequent steps, so this note is your only way to preserve essential information for later use. Examples: extracted data, element states, content that needs to be referenced. Leave empty if no follow-up information is needed.</note>\n<log>a brief preamble to the user</log>\n<error>error messages (optional)</error>\n<action-type>the type of the action, or null if no action</action-type>\n<action-param-json>JSON object containing the action parameters</action-param-json>\n\nFor example, if the instruction is to login and the form has already been filled, this is a valid return value:\n\n${shouldIncludeThought ? '<thought>The overall task is to login. On the current screen, I can see a login form with username and password fields already filled, and a login button. We have completed filling in the credentials. The next step should be to submit the login form by clicking the login button. I will use Tap action to click it.</thought>' : ''}<log>Click the login button</log>\n<action-type>Tap</action-type>\n<action-param-json>\n{\n \"locate\": {\n \"prompt\": \"The login button\"${modelFamily && includeBbox ? `, \"bbox\": [100, 200, 300, 400]` : ''}\n }\n}\n</action-param-json>\n\nFor example, if the instruction is to find out every title in the screenshot, the return value should be:\n\n${shouldIncludeThought ? '<thought>The overall task is to find out every title in the screenshot. On the current screen, I can see several article titles: \\'Hello, world!\\', \\'Midscene 101\\', and \\'Model strategy\\'. We have just started and observed the visible titles. The next step should be to scroll down to find more titles that may not be currently visible. I will use Scroll action to reveal more content.</thought>' : ''}<note>The titles in the current screenshot are: 'Hello, world!', 'Midscene 101', 'Model strategy'</note>\n<log>Scroll to find more titles</log>\n<action-type>Scroll</action-type>\n<action-param-json>\n{\n \"locate\": {\n \"prompt\": \"The page content area\"\n },\n \"direction\": \"down\"\n}\n</action-param-json>\n`;\n}\n"],"names":["__webpack_require__","definition","key","Object","obj","prop","Symbol","vlLocateParam","modelFamily","bboxDescription","findDefaultValue","field","current","visited","Set","currentWithDef","descriptionForAction","action","locatorSchemaTypeDescription","tab","fields","paramLines","schema","isZodObject","shape","isOptional","keyWithOptional","typeName","getZodTypeName","description","getZodDescription","defaultValue","hasDefault","undefined","paramLine","comments","defaultStr","JSON","line","paramDescription","systemPromptToTaskPlanning","actionSpace","includeBbox","includeThought","preferredLanguage","getPreferredLanguage","Error","actionDescriptionList","actionList","thoughtFieldInstruction","logFieldInstruction","shouldIncludeThought"],"mappings":";;;IAAAA,oBAAoB,CAAC,GAAG,CAAC,UAASC;QACjC,IAAI,IAAIC,OAAOD,WACR,IAAGD,oBAAoB,CAAC,CAACC,YAAYC,QAAQ,CAACF,oBAAoB,CAAC,CAAC,UAASE,MACzEC,OAAO,cAAc,CAAC,UAASD,KAAK;YAAE,YAAY;YAAM,KAAKD,UAAU,CAACC,IAAI;QAAC;IAGzF;;;ICNAF,oBAAoB,CAAC,GAAG,CAACI,KAAKC,OAAUF,OAAO,SAAS,CAAC,cAAc,CAAC,IAAI,CAACC,KAAKC;;;ICClFL,oBAAoB,CAAC,GAAG,CAAC;QACxB,IAAG,AAAkB,eAAlB,OAAOM,UAA0BA,OAAO,WAAW,EACrDH,OAAO,cAAc,CAAC,UAASG,OAAO,WAAW,EAAE;YAAE,OAAO;QAAS;QAEtEH,OAAO,cAAc,CAAC,UAAS,cAAc;YAAE,OAAO;QAAK;IAC5D;;;;;;;;;;;ACKA,MAAMI,gBAAgB,CAACC;IACrB,IAAIA,aACF,OAAO,CAAC,6DAA6D,EAAEC,AAAAA,IAAAA,mCAAAA,eAAAA,AAAAA,EAAgBD,cAAc;IAEvG,OAAO;AACT;AAKA,MAAME,mBAAmB,CAACC;IACxB,IAAIC,UAAUD;IACd,MAAME,UAAU,IAAIC;IAEpB,MAAOF,WAAW,CAACC,QAAQ,GAAG,CAACD,SAAU;QACvCC,QAAQ,GAAG,CAACD;QACZ,MAAMG,iBAAiBH;QAQvB,IAAI,CAACG,eAAe,IAAI,EAAE,UAAU;QAEpC,IAAIA,AAAiC,iBAAjCA,eAAe,IAAI,CAAC,QAAQ,EAC9B,OAAOA,eAAe,IAAI,CAAC,YAAY;QAIzC,IACEA,AAAiC,kBAAjCA,eAAe,IAAI,CAAC,QAAQ,IAC5BA,AAAiC,kBAAjCA,eAAe,IAAI,CAAC,QAAQ,EAE5BH,UAAUG,eAAe,IAAI,CAAC,SAAS;aAEvC;IAEJ;AAGF;AAEO,MAAMC,uBAAuB,CAClCC,QACAC;IAEA,MAAMC,MAAM;IACZ,MAAMC,SAAmB,EAAE;IAG3BA,OAAO,IAAI,CAAC,CAAC,SAAS,EAAEH,OAAO,IAAI,CAAC,CAAC,CAAC;IAGtC,IAAIA,OAAO,WAAW,EAAE;QACtB,MAAMI,aAAuB,EAAE;QAG/B,MAAMC,SAASL,OAAO,WAAW;QAIjC,MAAMM,cAAcD,OAAO,IAAI,EAAE,aAAa;QAE9C,IAAIC,eAAeD,OAAO,KAAK,EAAE;YAE/B,MAAME,QAAQF,OAAO,KAAK;YAE1B,KAAK,MAAM,CAACpB,KAAKS,MAAM,IAAIR,OAAO,OAAO,CAACqB,OACxC,IAAIb,SAAS,AAAiB,YAAjB,OAAOA,OAAoB;gBAEtC,MAAMc,aACJ,AACE,cADF,OAAQd,MAAyC,UAAU,IAE1DA,MAAwC,UAAU;gBACrD,MAAMe,kBAAkBD,aAAa,GAAGvB,IAAI,CAAC,CAAC,GAAGA;gBAGjD,MAAMyB,WAAWC,AAAAA,IAAAA,iCAAAA,cAAAA,AAAAA,EAAejB,OAAOO;gBAGvC,MAAMW,cAAcC,AAAAA,IAAAA,iCAAAA,iBAAAA,AAAAA,EAAkBnB;gBAGtC,MAAMoB,eAAerB,iBAAiBC;gBACtC,MAAMqB,aAAaD,AAAiBE,WAAjBF;gBAGnB,IAAIG,YAAY,GAAGR,gBAAgB,EAAE,EAAEC,UAAU;gBACjD,MAAMQ,WAAqB,EAAE;gBAC7B,IAAIN,aACFM,SAAS,IAAI,CAACN;gBAEhB,IAAIG,YAAY;oBACd,MAAMI,aACJ,AAAwB,YAAxB,OAAOL,eACH,CAAC,CAAC,EAAEA,aAAa,CAAC,CAAC,GACnBM,KAAK,SAAS,CAACN;oBACrBI,SAAS,IAAI,CAAC,CAAC,SAAS,EAAEC,YAAY;gBACxC;gBACA,IAAID,SAAS,MAAM,GAAG,GACpBD,aAAa,CAAC,IAAI,EAAEC,SAAS,IAAI,CAAC,OAAO;gBAG3Cd,WAAW,IAAI,CAACa;YAClB;YAIF,IAAIb,WAAW,MAAM,GAAG,GAAG;gBACzBD,OAAO,IAAI,CAAC;gBACZC,WAAW,OAAO,CAAC,CAACiB;oBAClBlB,OAAO,IAAI,CAAC,CAAC,IAAI,EAAEkB,MAAM;gBAC3B;YACF;QACF,OAAO;YAEL,MAAMX,WAAWC,AAAAA,IAAAA,iCAAAA,cAAAA,AAAAA,EAAeN;YAChC,MAAMO,cAAcC,AAAAA,IAAAA,iCAAAA,iBAAAA,AAAAA,EAAkBR;YAGtC,IAAIiB,mBAAmB,CAAC,SAAS,EAAEZ,UAAU;YAC7C,IAAIE,aACFU,oBAAoB,CAAC,IAAI,EAAEV,aAAa;YAE1CU,oBAAoB;YAEpBnB,OAAO,IAAI,CAACmB;QACd;IACF;IAEA,OAAO,CAAC,EAAE,EAAEtB,OAAO,IAAI,CAAC,EAAE,EAAEA,OAAO,WAAW,IAAI,0BAA0B;AAC9E,EAAEE,MAAMC,OAAO,IAAI,CAAC,CAAC,EAAE,EAAED,KAAK,EAAE;AAChC,CAAC,CAAC,IAAI;AACN;AAEO,eAAeqB,2BAA2B,EAC/CC,WAAW,EACXjC,WAAW,EACXkC,WAAW,EACXC,cAAc,EAMf;IACC,MAAMC,oBAAoBC,AAAAA,IAAAA,oBAAAA,oBAAAA,AAAAA;IAG1B,IAAIH,eAAe,CAAClC,aAClB,MAAM,IAAIsC,MACR;IAIJ,MAAMC,wBAAwBN,YAAY,GAAG,CAAC,CAACxB,SACtCD,qBACLC,QACAV,cAAcmC,cAAclC,cAAcyB;IAG9C,MAAMe,aAAaD,sBAAsB,IAAI,CAAC;IAE9C,MAAME,0BAA0B,CAAC;;;;;;;;;;;;;AAanC,CAAC;IAEC,MAAMC,sBAAsB,CAAC;;;;;QAKvB,EAAEN,kBAAkB;;;;;;;;;;AAU5B,CAAC;IAEC,MAAMO,uBAAuBR,kBAAkB;IAE/C,OAAO,CAAC;;;;;;;;;;;;;;;AAeV,EAAEK,WAAW;;AAEb,EAAEG,uBAAuBF,0BAA0B,GAAG;;AAEtD,EAAEC,oBAAoB;;;;;AAKtB,EAAEC,uBAAuB,sHAAsH,GAAG;;;;;;;;;AASlJ,EAAEA,uBAAuB,yUAAyU,GAAG;;;;;gCAKrU,EAAE3C,eAAekC,cAAc,mCAAmC,GAAG;;;;;;;AAOrG,EAAES,uBAAuB,iZAAiZ,GAAG;;;;;;;;;;;AAW7a,CAAC;AACD"}
|
|
@@ -27,9 +27,11 @@ __webpack_require__.d(__webpack_exports__, {
|
|
|
27
27
|
sectionLocatorInstruction: ()=>sectionLocatorInstruction,
|
|
28
28
|
systemPromptToLocateSection: ()=>systemPromptToLocateSection
|
|
29
29
|
});
|
|
30
|
+
const env_namespaceObject = require("@midscene/shared/env");
|
|
30
31
|
const external_common_js_namespaceObject = require("./common.js");
|
|
31
|
-
function systemPromptToLocateSection(
|
|
32
|
-
const
|
|
32
|
+
function systemPromptToLocateSection(modelFamily) {
|
|
33
|
+
const preferredLanguage = (0, env_namespaceObject.getPreferredLanguage)();
|
|
34
|
+
const bboxFormat = (0, external_common_js_namespaceObject.bboxDescription)(modelFamily);
|
|
33
35
|
return `
|
|
34
36
|
## Role:
|
|
35
37
|
You are an AI assistant that helps identify UI elements.
|
|
@@ -53,7 +55,7 @@ You are an AI assistant that helps identify UI elements.
|
|
|
53
55
|
Fields:
|
|
54
56
|
* \`bbox\` - Bounding box of the section containing the target element
|
|
55
57
|
* \`references_bbox\` - Optional array of bounding boxes for reference elements
|
|
56
|
-
* \`error\` - Optional error message if the section cannot be found
|
|
58
|
+
* \`error\` - Optional error message if the section cannot be found. Use ${preferredLanguage}.
|
|
57
59
|
|
|
58
60
|
Example:
|
|
59
61
|
If the description is "delete button on the second row with title 'Peter'", return:
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-model/prompt/llm-section-locator.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/llm-section-locator.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type {
|
|
1
|
+
{"version":3,"file":"ai-model/prompt/llm-section-locator.js","sources":["webpack/runtime/define_property_getters","webpack/runtime/has_own_property","webpack/runtime/make_namespace_object","../../../../src/ai-model/prompt/llm-section-locator.ts"],"sourcesContent":["__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n }\n }\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","import type { TModelFamily } from '@midscene/shared/env';\nimport { getPreferredLanguage } from '@midscene/shared/env';\nimport { bboxDescription } from './common';\n\nexport function systemPromptToLocateSection(\n modelFamily: TModelFamily | undefined,\n) {\n const preferredLanguage = getPreferredLanguage();\n const bboxFormat = bboxDescription(modelFamily);\n return `\n## Role:\nYou are an AI assistant that helps identify UI elements.\n\n## Objective:\n- Find a section containing the target element\n- If the description mentions reference elements, also locate sections containing those references\n\n## Output Format:\n\\`\\`\\`json\n{\n \"bbox\": [number, number, number, number], // ${bboxFormat}\n \"references_bbox\"?: [\n [number, number, number, number],\n ...\n ],\n \"error\"?: string\n}\n\\`\\`\\`\n\nFields:\n* \\`bbox\\` - Bounding box of the section containing the target element\n* \\`references_bbox\\` - Optional array of bounding boxes for reference elements\n* \\`error\\` - Optional error message if the section cannot be found. Use ${preferredLanguage}.\n\nExample:\nIf the description is \"delete button on the second row with title 'Peter'\", return:\n\\`\\`\\`json\n{\n \"bbox\": [100, 100, 200, 200],\n \"references_bbox\": [[100, 100, 200, 200]]\n}\n\\`\\`\\`\n`;\n}\n\nexport const sectionLocatorInstruction = (sectionDescription: string) =>\n `Find section containing: ${sectionDescription}`;\n"],"names":["__webpack_require__","definition","key","Object","obj","prop","Symbol","systemPromptToLocateSection","modelFamily","preferredLanguage","getPreferredLanguage","bboxFormat","bboxDescription","sectionLocatorInstruction","sectionDescription"],"mappings":";;;IAAAA,oBAAoB,CAAC,GAAG,CAAC,UAASC;QACjC,IAAI,IAAIC,OAAOD,WACR,IAAGD,oBAAoB,CAAC,CAACC,YAAYC,QAAQ,CAACF,oBAAoB,CAAC,CAAC,UAASE,MACzEC,OAAO,cAAc,CAAC,UAASD,KAAK;YAAE,YAAY;YAAM,KAAKD,UAAU,CAACC,IAAI;QAAC;IAGzF;;;ICNAF,oBAAoB,CAAC,GAAG,CAACI,KAAKC,OAAUF,OAAO,SAAS,CAAC,cAAc,CAAC,IAAI,CAACC,KAAKC;;;ICClFL,oBAAoB,CAAC,GAAG,CAAC;QACxB,IAAG,AAAkB,eAAlB,OAAOM,UAA0BA,OAAO,WAAW,EACrDH,OAAO,cAAc,CAAC,UAASG,OAAO,WAAW,EAAE;YAAE,OAAO;QAAS;QAEtEH,OAAO,cAAc,CAAC,UAAS,cAAc;YAAE,OAAO;QAAK;IAC5D;;;;;;;;;;ACFO,SAASI,4BACdC,WAAqC;IAErC,MAAMC,oBAAoBC,AAAAA,IAAAA,oBAAAA,oBAAAA,AAAAA;IAC1B,MAAMC,aAAaC,AAAAA,IAAAA,mCAAAA,eAAAA,AAAAA,EAAgBJ;IACnC,OAAO,CAAC;;;;;;;;;;;gDAWsC,EAAEG,WAAW;;;;;;;;;;;;yEAYY,EAAEF,kBAAkB;;;;;;;;;;AAU7F,CAAC;AACD;AAEO,MAAMI,4BAA4B,CAACC,qBACxC,CAAC,yBAAyB,EAAEA,oBAAoB"}
|
|
@@ -49,7 +49,7 @@ const external_openai_namespaceObject = require("openai");
|
|
|
49
49
|
var external_openai_default = /*#__PURE__*/ __webpack_require__.n(external_openai_namespaceObject);
|
|
50
50
|
const util_js_namespaceObject = require("../auto-glm/util.js");
|
|
51
51
|
async function createChatClient({ modelConfig }) {
|
|
52
|
-
const { socksProxy, httpProxy, modelName, openaiBaseURL, openaiApiKey, openaiExtraConfig, modelDescription, uiTarsModelVersion
|
|
52
|
+
const { socksProxy, httpProxy, modelName, openaiBaseURL, openaiApiKey, openaiExtraConfig, modelDescription, uiTarsModelVersion, modelFamily, createOpenAIClient, timeout } = modelConfig;
|
|
53
53
|
let proxyAgent;
|
|
54
54
|
const debugProxy = (0, logger_namespaceObject.getDebug)('ai:call:proxy');
|
|
55
55
|
const sanitizeProxyUrl = (url)=>{
|
|
@@ -143,13 +143,12 @@ async function createChatClient({ modelConfig }) {
|
|
|
143
143
|
completion: openai.chat.completions,
|
|
144
144
|
modelName,
|
|
145
145
|
modelDescription,
|
|
146
|
-
|
|
147
|
-
vlMode,
|
|
146
|
+
uiTarsModelVersion,
|
|
148
147
|
modelFamily
|
|
149
148
|
};
|
|
150
149
|
}
|
|
151
150
|
async function callAI(messages, modelConfig, options) {
|
|
152
|
-
const { completion, modelName, modelDescription,
|
|
151
|
+
const { completion, modelName, modelDescription, uiTarsModelVersion, modelFamily } = await createChatClient({
|
|
153
152
|
modelConfig
|
|
154
153
|
});
|
|
155
154
|
const maxTokens = env_namespaceObject.globalConfigManager.getEnvConfigValueAsNumber(env_namespaceObject.MIDSCENE_MODEL_MAX_TOKENS) ?? env_namespaceObject.globalConfigManager.getEnvConfigValueAsNumber(env_namespaceObject.OPENAI_MAX_TOKENS);
|
|
@@ -182,11 +181,11 @@ async function callAI(messages, modelConfig, options) {
|
|
|
182
181
|
temperature,
|
|
183
182
|
stream: !!isStreaming,
|
|
184
183
|
max_tokens: maxTokens,
|
|
185
|
-
...'qwen2.5-vl' ===
|
|
184
|
+
...'qwen2.5-vl' === modelFamily ? {
|
|
186
185
|
vl_high_resolution_images: true
|
|
187
186
|
} : {}
|
|
188
187
|
};
|
|
189
|
-
if ((0, util_js_namespaceObject.isAutoGLM)(
|
|
188
|
+
if ((0, util_js_namespaceObject.isAutoGLM)(modelFamily)) {
|
|
190
189
|
commonConfig.top_p = 0.85;
|
|
191
190
|
commonConfig.frequency_penalty = 0.2;
|
|
192
191
|
}
|
|
@@ -248,7 +247,7 @@ async function callAI(messages, modelConfig, options) {
|
|
|
248
247
|
}
|
|
249
248
|
}
|
|
250
249
|
content = accumulated;
|
|
251
|
-
debugProfileStats(`streaming model, ${modelName}, mode, ${
|
|
250
|
+
debugProfileStats(`streaming model, ${modelName}, mode, ${modelFamily || 'default'}, cost-ms, ${timeCost}, temperature, ${temperature ?? ''}`);
|
|
252
251
|
} else {
|
|
253
252
|
const retryCount = modelConfig.retryCount ?? 1;
|
|
254
253
|
const retryInterval = modelConfig.retryInterval ?? 2000;
|
|
@@ -262,7 +261,7 @@ async function callAI(messages, modelConfig, options) {
|
|
|
262
261
|
...deepThinkConfig
|
|
263
262
|
});
|
|
264
263
|
timeCost = Date.now() - startTime;
|
|
265
|
-
debugProfileStats(`model, ${modelName}, mode, ${
|
|
264
|
+
debugProfileStats(`model, ${modelName}, mode, ${modelFamily || 'default'}, ui-tars-version, ${uiTarsModelVersion}, prompt-tokens, ${result.usage?.prompt_tokens || ''}, completion-tokens, ${result.usage?.completion_tokens || ''}, total-tokens, ${result.usage?.total_tokens || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}, temperature, ${temperature ?? ''}`);
|
|
266
265
|
debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);
|
|
267
266
|
if (!result.choices) throw new Error(`invalid response from LLM service: ${JSON.stringify(result)}`);
|
|
268
267
|
content = result.choices[0].message.content;
|
|
@@ -308,8 +307,8 @@ async function callAIWithObjectResponse(messages, modelConfig, options) {
|
|
|
308
307
|
deepThink: options?.deepThink
|
|
309
308
|
});
|
|
310
309
|
(0, utils_namespaceObject.assert)(response, 'empty response');
|
|
311
|
-
const
|
|
312
|
-
const jsonContent = safeParseJson(response.content,
|
|
310
|
+
const modelFamily = modelConfig.modelFamily;
|
|
311
|
+
const jsonContent = safeParseJson(response.content, modelFamily);
|
|
313
312
|
(0, utils_namespaceObject.assert)('object' == typeof jsonContent, `failed to parse json response from model (${modelConfig.modelName}): ${response.content}`);
|
|
314
313
|
return {
|
|
315
314
|
content: jsonContent,
|
|
@@ -402,7 +401,7 @@ function normalizeJsonObject(obj) {
|
|
|
402
401
|
if ('string' == typeof obj) return obj.trim();
|
|
403
402
|
return obj;
|
|
404
403
|
}
|
|
405
|
-
function safeParseJson(input,
|
|
404
|
+
function safeParseJson(input, modelFamily) {
|
|
406
405
|
const cleanJsonString = extractJSONFromCodeBlock(input);
|
|
407
406
|
if (cleanJsonString?.match(/\((\d+),(\d+)\)/)) return cleanJsonString.match(/\((\d+),(\d+)\)/)?.slice(1).map(Number);
|
|
408
407
|
let parsed;
|
|
@@ -419,7 +418,7 @@ function safeParseJson(input, vlMode) {
|
|
|
419
418
|
} catch (error) {
|
|
420
419
|
lastError = error;
|
|
421
420
|
}
|
|
422
|
-
if ('doubao-vision' ===
|
|
421
|
+
if ('doubao-vision' === modelFamily || (0, util_js_namespaceObject.isUITars)(modelFamily)) {
|
|
423
422
|
const jsonString = preprocessDoubaoBboxJson(cleanJsonString);
|
|
424
423
|
try {
|
|
425
424
|
parsed = JSON.parse((0, external_jsonrepair_namespaceObject.jsonrepair)(jsonString));
|