@midscene/core 0.28.11 → 0.28.12-beta-20250923080328.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/dist/es/agent/agent.mjs +1 -1
  2. package/dist/es/agent/agent.mjs.map +1 -1
  3. package/dist/es/agent/tasks.mjs +19 -140
  4. package/dist/es/agent/tasks.mjs.map +1 -1
  5. package/dist/es/agent/utils.mjs +1 -1
  6. package/dist/es/ai-model/conversation-history.mjs +58 -0
  7. package/dist/es/ai-model/conversation-history.mjs.map +1 -0
  8. package/dist/es/ai-model/index.mjs +2 -1
  9. package/dist/es/ai-model/llm-planning.mjs +23 -3
  10. package/dist/es/ai-model/llm-planning.mjs.map +1 -1
  11. package/dist/es/ai-model/ui-tars-planning.mjs +26 -6
  12. package/dist/es/ai-model/ui-tars-planning.mjs.map +1 -1
  13. package/dist/es/utils.mjs +2 -2
  14. package/dist/lib/agent/agent.js +1 -1
  15. package/dist/lib/agent/agent.js.map +1 -1
  16. package/dist/lib/agent/tasks.js +18 -139
  17. package/dist/lib/agent/tasks.js.map +1 -1
  18. package/dist/lib/agent/utils.js +1 -1
  19. package/dist/lib/ai-model/conversation-history.js +92 -0
  20. package/dist/lib/ai-model/conversation-history.js.map +1 -0
  21. package/dist/lib/ai-model/index.js +6 -2
  22. package/dist/lib/ai-model/llm-planning.js +23 -3
  23. package/dist/lib/ai-model/llm-planning.js.map +1 -1
  24. package/dist/lib/ai-model/ui-tars-planning.js +26 -6
  25. package/dist/lib/ai-model/ui-tars-planning.js.map +1 -1
  26. package/dist/lib/utils.js +2 -2
  27. package/dist/types/agent/tasks.d.ts +3 -17
  28. package/dist/types/ai-model/conversation-history.d.ts +18 -0
  29. package/dist/types/ai-model/index.d.ts +1 -0
  30. package/dist/types/ai-model/llm-planning.d.ts +2 -0
  31. package/dist/types/ai-model/ui-tars-planning.d.ts +6 -18
  32. package/package.json +3 -3
@@ -1,4 +1,3 @@
1
- import { type ChatCompletionMessageParam } from '../ai-model';
2
1
  import type { AbstractInterface } from '../device';
3
2
  import { type DetailedLocateParam, type ExecutionTaskApply, type ExecutionTaskProgressOptions, Executor, type Insight, type InsightExtractOption, type InsightExtractParam, type MidsceneYamlFlowItem, type PlanningAction, type PlanningActionParamWaitFor, type PlanningLocateParam, type TMultimodalPrompt, type TUserPrompt } from '../index';
4
3
  import { type IModelConfig } from '@midscene/shared/env';
@@ -13,7 +12,7 @@ export declare class TaskExecutor {
13
12
  interface: AbstractInterface;
14
13
  insight: Insight;
15
14
  taskCache?: TaskCache;
16
- conversationHistory: ChatCompletionMessageParam[];
15
+ private conversationHistory;
17
16
  onTaskStartCallback?: ExecutionTaskProgressOptions['onTaskStart'];
18
17
  replanningCycleLimit?: number;
19
18
  get page(): AbstractInterface;
@@ -32,28 +31,15 @@ export declare class TaskExecutor {
32
31
  loadYamlFlowAsPlanning(userInstruction: string, yamlString: string): Promise<{
33
32
  executor: Executor;
34
33
  }>;
35
- private planningTaskFromPrompt;
36
- private planningTaskToGoal;
34
+ private createPlanningTask;
37
35
  runPlans(title: string, plans: PlanningAction[], modelConfig: IModelConfig): Promise<ExecutionResult>;
36
+ private getReplanningCycleLimit;
38
37
  action(userPrompt: string, modelConfig: IModelConfig, actionContext?: string): Promise<ExecutionResult<{
39
38
  yamlFlow?: MidsceneYamlFlowItem[];
40
39
  } | undefined>>;
41
- actionToGoal(userPrompt: string, modelConfig: IModelConfig): Promise<ExecutionResult<{
42
- yamlFlow?: MidsceneYamlFlowItem[];
43
- } | undefined>>;
44
40
  private createTypeQueryTask;
45
41
  createTypeQueryExecution<T>(type: 'Query' | 'Boolean' | 'Number' | 'String' | 'Assert', demand: InsightExtractParam, modelConfig: IModelConfig, opt?: InsightExtractOption, multimodalPrompt?: TMultimodalPrompt): Promise<ExecutionResult<T>>;
46
42
  assert(assertion: TUserPrompt, modelConfig: IModelConfig, opt?: InsightExtractOption): Promise<ExecutionResult<boolean>>;
47
- /**
48
- * Append a message to the conversation history
49
- * For user messages with images:
50
- * - Keep max 4 user image messages in history
51
- * - Remove oldest user image message when limit reached
52
- * For assistant messages:
53
- * - Simply append to history
54
- * @param conversationHistory Message to append
55
- */
56
- private appendConversationHistory;
57
43
  private appendErrorPlan;
58
44
  waitFor(assertion: TUserPrompt, opt: PlanningActionParamWaitFor, modelConfig: IModelConfig): Promise<ExecutionResult<void>>;
59
45
  }
@@ -0,0 +1,18 @@
1
+ import type { ChatCompletionMessageParam } from 'openai/resources/index';
2
+ export interface ConversationHistoryOptions {
3
+ maxUserImageMessages?: number;
4
+ initialMessages?: ChatCompletionMessageParam[];
5
+ }
6
+ export declare class ConversationHistory {
7
+ private readonly maxUserImageMessages;
8
+ private readonly messages;
9
+ constructor(options?: ConversationHistoryOptions);
10
+ append(message: ChatCompletionMessageParam): void;
11
+ seed(messages: ChatCompletionMessageParam[]): void;
12
+ reset(): void;
13
+ snapshot(): ChatCompletionMessageParam[];
14
+ get length(): number;
15
+ [Symbol.iterator](): IterableIterator<ChatCompletionMessageParam>;
16
+ toJSON(): ChatCompletionMessageParam[];
17
+ private pruneOldestUserMessageIfNecessary;
18
+ }
@@ -8,5 +8,6 @@ export { AiLocateElement, AiExtractElementInfo, AiLocateSection, } from './inspe
8
8
  export { plan } from './llm-planning';
9
9
  export { adaptBboxToRect } from './common';
10
10
  export { vlmPlanning, resizeImageForUiTars } from './ui-tars-planning';
11
+ export { ConversationHistory, type ConversationHistoryOptions, } from './conversation-history';
11
12
  export { AIActionType, type AIArgs } from './common';
12
13
  export { getMidsceneLocationSchema, type MidsceneLocationResultType, PointSchema, SizeSchema, RectSchema, TMultimodalPromptSchema, TUserPromptSchema, type TMultimodalPrompt, type TUserPrompt, findAllMidsceneLocatorField, dumpActionParam, loadActionParam, } from './common';
@@ -1,5 +1,6 @@
1
1
  import type { DeviceAction, InterfaceType, PlanningAIResponse, UIContext } from '../types';
2
2
  import type { IModelConfig } from '@midscene/shared/env';
3
+ import type { ConversationHistory } from './conversation-history';
3
4
  export declare function plan(userInstruction: string, opts: {
4
5
  context: UIContext;
5
6
  interfaceType: InterfaceType;
@@ -7,4 +8,5 @@ export declare function plan(userInstruction: string, opts: {
7
8
  log?: string;
8
9
  actionContext?: string;
9
10
  modelConfig: IModelConfig;
11
+ conversationHistory?: ConversationHistory;
10
12
  }): Promise<PlanningAIResponse>;
@@ -1,24 +1,12 @@
1
- import type { AIUsageInfo, MidsceneYamlFlowItem, PlanningAction, Size } from '../types';
1
+ import type { PlanningAIResponse, Size, UIContext } from '../types';
2
2
  import { type IModelConfig, UITarsModelVersion } from '@midscene/shared/env';
3
- import { actionParser } from '@ui-tars/action-parser';
4
- import type { ChatCompletionMessageParam } from 'openai/resources/index';
3
+ import type { ConversationHistory } from './conversation-history';
5
4
  type ActionType = 'click' | 'drag' | 'type' | 'hotkey' | 'finished' | 'scroll' | 'wait';
6
- export declare function vlmPlanning(options: {
7
- userInstruction: string;
8
- conversationHistory: ChatCompletionMessageParam[];
9
- size: {
10
- width: number;
11
- height: number;
12
- };
5
+ export declare function vlmPlanning(userInstruction: string, options: {
6
+ conversationHistory: ConversationHistory;
7
+ context: UIContext;
13
8
  modelConfig: IModelConfig;
14
- }): Promise<{
15
- actions: PlanningAction<any>[];
16
- actionsFromModel: ReturnType<typeof actionParser>['parsed'];
17
- action_summary: string;
18
- yamlFlow?: MidsceneYamlFlowItem[];
19
- usage?: AIUsageInfo;
20
- rawResponse?: string;
21
- }>;
9
+ }): Promise<PlanningAIResponse>;
22
10
  interface BaseAction {
23
11
  action_type: ActionType;
24
12
  action_inputs: Record<string, any>;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@midscene/core",
3
3
  "description": "Automate browser actions, extract data, and perform assertions using AI. It offers JavaScript SDK, Chrome extension, and support for scripting in YAML. See https://midscenejs.com/ for details.",
4
- "version": "0.28.11",
4
+ "version": "0.28.12-beta-20250923080328.0",
5
5
  "repository": "https://github.com/web-infra-dev/midscene",
6
6
  "homepage": "https://midscenejs.com/",
7
7
  "main": "./dist/lib/index.js",
@@ -87,8 +87,8 @@
87
87
  "zod": "3.24.3",
88
88
  "semver": "7.5.2",
89
89
  "js-yaml": "4.1.0",
90
- "@midscene/recorder": "0.28.11",
91
- "@midscene/shared": "0.28.11"
90
+ "@midscene/recorder": "0.28.12-beta-20250923080328.0",
91
+ "@midscene/shared": "0.28.12-beta-20250923080328.0"
92
92
  },
93
93
  "devDependencies": {
94
94
  "@microsoft/api-extractor": "^7.52.10",