pyautoscene 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyautoscene/ocr.py +70 -70
- pyautoscene/ocr_config.yaml +112 -112
- pyautoscene/references.py +78 -78
- pyautoscene/scene.py +61 -61
- pyautoscene/screen.py +79 -79
- pyautoscene/session.py +140 -140
- pyautoscene/utils.py +25 -25
- {pyautoscene-0.2.0.dist-info → pyautoscene-0.2.1.dist-info}/METADATA +5 -1
- pyautoscene-0.2.1.dist-info/RECORD +13 -0
- {pyautoscene-0.2.0.dist-info → pyautoscene-0.2.1.dist-info}/licenses/LICENSE +201 -201
- pyautoscene-0.2.0.dist-info/RECORD +0 -13
- {pyautoscene-0.2.0.dist-info → pyautoscene-0.2.1.dist-info}/WHEEL +0 -0
- {pyautoscene-0.2.0.dist-info → pyautoscene-0.2.1.dist-info}/entry_points.txt +0 -0
pyautoscene/screen.py
CHANGED
@@ -1,79 +1,79 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
import re
|
4
|
-
from dataclasses import dataclass
|
5
|
-
|
6
|
-
import numpy as np
|
7
|
-
import pyautogui as gui
|
8
|
-
from PIL import Image
|
9
|
-
from pyscreeze import Box
|
10
|
-
|
11
|
-
axis_pattern = re.compile(r"(?P<d>[xy]):\(?(?P<i>\d+)(?:-(?P<j>\d+))?\)?/(?P<n>\d+)")
|
12
|
-
|
13
|
-
|
14
|
-
@dataclass(frozen=True, slots=True)
|
15
|
-
class Region:
|
16
|
-
left: int
|
17
|
-
top: int
|
18
|
-
width: int
|
19
|
-
height: int
|
20
|
-
|
21
|
-
def to_box(self) -> Box:
|
22
|
-
"""Convert to a pyscreeze Box."""
|
23
|
-
return Box(self.left, self.top, self.width, self.height)
|
24
|
-
|
25
|
-
@classmethod
|
26
|
-
def from_box(cls, box: Box) -> Region:
|
27
|
-
"""Create a Region from a pyscreeze Box."""
|
28
|
-
return cls(left=box.left, top=box.top, width=box.width, height=box.height)
|
29
|
-
|
30
|
-
|
31
|
-
RegionSpec = Region | str
|
32
|
-
|
33
|
-
|
34
|
-
def generate_region_from_spec(
|
35
|
-
spec: RegionSpec, shape: tuple[int, int] | None = None
|
36
|
-
) -> Region:
|
37
|
-
if isinstance(spec, Region):
|
38
|
-
return spec
|
39
|
-
if shape is None:
|
40
|
-
img = np.array(gui.screenshot())
|
41
|
-
shape = (img.shape[0]), (img.shape[1])
|
42
|
-
|
43
|
-
default_region = {"left": 0, "top": 0, "width": shape[1], "height": shape[0]}
|
44
|
-
|
45
|
-
axis_mapping = {"x": ("left", "width", 1), "y": ("top", "height", 0)}
|
46
|
-
for axis, i, j, n in axis_pattern.findall(spec):
|
47
|
-
alignment, size_attr, dim_index = axis_mapping[axis]
|
48
|
-
size = shape[dim_index] // int(n)
|
49
|
-
i, j = int(i), int(j) if j else int(i)
|
50
|
-
default_region.update({
|
51
|
-
alignment: (i - 1) * size,
|
52
|
-
size_attr: (j - i + 1) * size,
|
53
|
-
})
|
54
|
-
|
55
|
-
return Region(**default_region)
|
56
|
-
|
57
|
-
|
58
|
-
def locate_on_screen(
|
59
|
-
reference: Image.Image | str,
|
60
|
-
region: RegionSpec | None = None,
|
61
|
-
confidence: float = 0.999,
|
62
|
-
grayscale: bool = True,
|
63
|
-
limit: int = 1,
|
64
|
-
) -> Region | None:
|
65
|
-
"""Locate a region on the screen."""
|
66
|
-
try:
|
67
|
-
location = gui.locateOnScreen(
|
68
|
-
reference,
|
69
|
-
region=generate_region_from_spec(region).to_box() if region else None,
|
70
|
-
grayscale=grayscale,
|
71
|
-
confidence=confidence,
|
72
|
-
limit=limit,
|
73
|
-
)
|
74
|
-
if location:
|
75
|
-
return Region.from_box(location)
|
76
|
-
except gui.ImageNotFoundException:
|
77
|
-
return None
|
78
|
-
except FileNotFoundError:
|
79
|
-
return None
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import re
|
4
|
+
from dataclasses import dataclass
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import pyautogui as gui
|
8
|
+
from PIL import Image
|
9
|
+
from pyscreeze import Box
|
10
|
+
|
11
|
+
axis_pattern = re.compile(r"(?P<d>[xy]):\(?(?P<i>\d+)(?:-(?P<j>\d+))?\)?/(?P<n>\d+)")
|
12
|
+
|
13
|
+
|
14
|
+
@dataclass(frozen=True, slots=True)
|
15
|
+
class Region:
|
16
|
+
left: int
|
17
|
+
top: int
|
18
|
+
width: int
|
19
|
+
height: int
|
20
|
+
|
21
|
+
def to_box(self) -> Box:
|
22
|
+
"""Convert to a pyscreeze Box."""
|
23
|
+
return Box(self.left, self.top, self.width, self.height)
|
24
|
+
|
25
|
+
@classmethod
|
26
|
+
def from_box(cls, box: Box) -> Region:
|
27
|
+
"""Create a Region from a pyscreeze Box."""
|
28
|
+
return cls(left=box.left, top=box.top, width=box.width, height=box.height)
|
29
|
+
|
30
|
+
|
31
|
+
RegionSpec = Region | str
|
32
|
+
|
33
|
+
|
34
|
+
def generate_region_from_spec(
|
35
|
+
spec: RegionSpec, shape: tuple[int, int] | None = None
|
36
|
+
) -> Region:
|
37
|
+
if isinstance(spec, Region):
|
38
|
+
return spec
|
39
|
+
if shape is None:
|
40
|
+
img = np.array(gui.screenshot())
|
41
|
+
shape = (img.shape[0]), (img.shape[1])
|
42
|
+
|
43
|
+
default_region = {"left": 0, "top": 0, "width": shape[1], "height": shape[0]}
|
44
|
+
|
45
|
+
axis_mapping = {"x": ("left", "width", 1), "y": ("top", "height", 0)}
|
46
|
+
for axis, i, j, n in axis_pattern.findall(spec):
|
47
|
+
alignment, size_attr, dim_index = axis_mapping[axis]
|
48
|
+
size = shape[dim_index] // int(n)
|
49
|
+
i, j = int(i), int(j) if j else int(i)
|
50
|
+
default_region.update({
|
51
|
+
alignment: (i - 1) * size,
|
52
|
+
size_attr: (j - i + 1) * size,
|
53
|
+
})
|
54
|
+
|
55
|
+
return Region(**default_region)
|
56
|
+
|
57
|
+
|
58
|
+
def locate_on_screen(
|
59
|
+
reference: Image.Image | str,
|
60
|
+
region: RegionSpec | None = None,
|
61
|
+
confidence: float = 0.999,
|
62
|
+
grayscale: bool = True,
|
63
|
+
limit: int = 1,
|
64
|
+
) -> Region | None:
|
65
|
+
"""Locate a region on the screen."""
|
66
|
+
try:
|
67
|
+
location = gui.locateOnScreen(
|
68
|
+
reference,
|
69
|
+
region=generate_region_from_spec(region).to_box() if region else None,
|
70
|
+
grayscale=grayscale,
|
71
|
+
confidence=confidence,
|
72
|
+
limit=limit,
|
73
|
+
)
|
74
|
+
if location:
|
75
|
+
return Region.from_box(location)
|
76
|
+
except gui.ImageNotFoundException:
|
77
|
+
return None
|
78
|
+
except FileNotFoundError:
|
79
|
+
return None
|
pyautoscene/session.py
CHANGED
@@ -1,140 +1,140 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
from typing import Callable
|
4
|
-
|
5
|
-
import networkx as nx
|
6
|
-
from statemachine import State, StateMachine
|
7
|
-
from statemachine.factory import StateMachineMetaclass
|
8
|
-
from statemachine.states import States
|
9
|
-
from statemachine.transition_list import TransitionList
|
10
|
-
|
11
|
-
from .scene import Scene
|
12
|
-
from .screen import Region
|
13
|
-
|
14
|
-
|
15
|
-
class SceneRecognitionError(Exception):
|
16
|
-
pass
|
17
|
-
|
18
|
-
|
19
|
-
def build_dynamic_state_machine(
|
20
|
-
scenes: list[Scene],
|
21
|
-
) -> tuple[StateMachine, dict[str, TransitionList], dict[str, Callable]]:
|
22
|
-
"""Create a dynamic StateMachine class from scenes using StateMachineMetaclass."""
|
23
|
-
|
24
|
-
states = {scene.name: scene for scene in scenes}
|
25
|
-
transitions = {}
|
26
|
-
leaf_actions = {}
|
27
|
-
for scene in scenes:
|
28
|
-
for action_name, action_info in scene.actions.items():
|
29
|
-
target_scene = action_info["transitions_to"]
|
30
|
-
if target_scene is not None:
|
31
|
-
event_name = f"event_{action_name}"
|
32
|
-
new_transition = scene.to(target_scene, event=event_name)
|
33
|
-
new_transition.on(action_info["action"])
|
34
|
-
transitions[event_name] = new_transition
|
35
|
-
else:
|
36
|
-
leaf_actions[action_name] = action_info["action"]
|
37
|
-
|
38
|
-
SessionSM = StateMachineMetaclass(
|
39
|
-
"SessionSM",
|
40
|
-
(StateMachine,),
|
41
|
-
{"states": States(states), **transitions}, # type: ignore[call-arg]
|
42
|
-
)
|
43
|
-
session_sm: StateMachine = SessionSM() # type: ignore[no-redef]
|
44
|
-
|
45
|
-
return session_sm, transitions, leaf_actions
|
46
|
-
|
47
|
-
|
48
|
-
def get_current_scene(scenes: list[Scene], region: Region | None = None) -> Scene:
|
49
|
-
"""Get the current scene from the list of scenes."""
|
50
|
-
current_scenes = [scene for scene in scenes if scene.is_on_screen(region)]
|
51
|
-
if len(current_scenes) == 1:
|
52
|
-
return current_scenes[0]
|
53
|
-
elif len(current_scenes) > 1:
|
54
|
-
raise SceneRecognitionError(
|
55
|
-
f"Multiple scenes are currently on screen.\n{' '.join(str(scene) for scene in current_scenes)}"
|
56
|
-
)
|
57
|
-
else:
|
58
|
-
raise SceneRecognitionError("No scene is currently on screen.")
|
59
|
-
|
60
|
-
|
61
|
-
class Session:
|
62
|
-
"""A session manages the state machine for GUI automation scenes."""
|
63
|
-
|
64
|
-
def __init__(self, scenes: list[Scene]):
|
65
|
-
self._scenes_list = scenes
|
66
|
-
self._scenes_dict = {scene.name: scene for scene in scenes}
|
67
|
-
|
68
|
-
# Create dynamic StateMachine class and instantiate it
|
69
|
-
self._sm, self.transitions, self.leaf_actions = build_dynamic_state_machine(
|
70
|
-
scenes
|
71
|
-
)
|
72
|
-
self.graph: nx.MultiDiGraph = nx.nx_pydot.from_pydot(self._sm._graph())
|
73
|
-
|
74
|
-
@property
|
75
|
-
def current_scene(self) -> State:
|
76
|
-
"""Get the current state."""
|
77
|
-
return self._sm.current_state
|
78
|
-
|
79
|
-
def expect(self, target_scene: Scene, **kwargs):
|
80
|
-
"""Navigate to a specific scene."""
|
81
|
-
if target_scene.is_on_screen():
|
82
|
-
return
|
83
|
-
|
84
|
-
present_scene = get_current_scene(self._scenes_list)
|
85
|
-
all_paths = list(
|
86
|
-
nx.all_simple_paths(
|
87
|
-
self.graph,
|
88
|
-
source=present_scene.name,
|
89
|
-
target=target_scene.name,
|
90
|
-
)
|
91
|
-
)
|
92
|
-
if len(all_paths) == 0:
|
93
|
-
raise SceneRecognitionError(
|
94
|
-
f"No path found from {present_scene.name} to {target_scene.name}"
|
95
|
-
)
|
96
|
-
elif len(all_paths) > 1:
|
97
|
-
raise SceneRecognitionError(
|
98
|
-
f"Multiple paths found from {present_scene.name} to {target_scene.name}"
|
99
|
-
)
|
100
|
-
|
101
|
-
path = all_paths[0]
|
102
|
-
events: list[str] = [
|
103
|
-
self.graph.get_edge_data(path[i], path[i + 1])[0]["label"] # type: ignore
|
104
|
-
for i in range(len(path) - 1)
|
105
|
-
]
|
106
|
-
|
107
|
-
for event in events:
|
108
|
-
self._sm.send(event, **kwargs)
|
109
|
-
|
110
|
-
def invoke(self, action_name: str, **kwargs):
|
111
|
-
"""Invoke an action in the current scene."""
|
112
|
-
event_name = f"event_{action_name}"
|
113
|
-
transition = next(
|
114
|
-
(tr for tr_name, tr in self.transitions.items() if tr_name == event_name),
|
115
|
-
None,
|
116
|
-
)
|
117
|
-
if transition:
|
118
|
-
return self._sm.send(event_name, **kwargs)
|
119
|
-
|
120
|
-
leaf_action = next(
|
121
|
-
(
|
122
|
-
action
|
123
|
-
for name, action in self.leaf_actions.items()
|
124
|
-
if name == action_name
|
125
|
-
),
|
126
|
-
None,
|
127
|
-
)
|
128
|
-
if leaf_action:
|
129
|
-
return leaf_action(**kwargs)
|
130
|
-
|
131
|
-
raise ValueError(
|
132
|
-
f"Action '{action_name}' not found in current scene '{self.current_scene.name}'"
|
133
|
-
)
|
134
|
-
|
135
|
-
def __repr__(self):
|
136
|
-
current = self.current_scene
|
137
|
-
current_name = current.name if current else "None"
|
138
|
-
return (
|
139
|
-
f"Session(scenes={list(self._scenes_dict.keys())}, current={current_name})"
|
140
|
-
)
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import Callable
|
4
|
+
|
5
|
+
import networkx as nx
|
6
|
+
from statemachine import State, StateMachine
|
7
|
+
from statemachine.factory import StateMachineMetaclass
|
8
|
+
from statemachine.states import States
|
9
|
+
from statemachine.transition_list import TransitionList
|
10
|
+
|
11
|
+
from .scene import Scene
|
12
|
+
from .screen import Region
|
13
|
+
|
14
|
+
|
15
|
+
class SceneRecognitionError(Exception):
|
16
|
+
pass
|
17
|
+
|
18
|
+
|
19
|
+
def build_dynamic_state_machine(
|
20
|
+
scenes: list[Scene],
|
21
|
+
) -> tuple[StateMachine, dict[str, TransitionList], dict[str, Callable]]:
|
22
|
+
"""Create a dynamic StateMachine class from scenes using StateMachineMetaclass."""
|
23
|
+
|
24
|
+
states = {scene.name: scene for scene in scenes}
|
25
|
+
transitions = {}
|
26
|
+
leaf_actions = {}
|
27
|
+
for scene in scenes:
|
28
|
+
for action_name, action_info in scene.actions.items():
|
29
|
+
target_scene = action_info["transitions_to"]
|
30
|
+
if target_scene is not None:
|
31
|
+
event_name = f"event_{action_name}"
|
32
|
+
new_transition = scene.to(target_scene, event=event_name)
|
33
|
+
new_transition.on(action_info["action"])
|
34
|
+
transitions[event_name] = new_transition
|
35
|
+
else:
|
36
|
+
leaf_actions[action_name] = action_info["action"]
|
37
|
+
|
38
|
+
SessionSM = StateMachineMetaclass(
|
39
|
+
"SessionSM",
|
40
|
+
(StateMachine,),
|
41
|
+
{"states": States(states), **transitions}, # type: ignore[call-arg]
|
42
|
+
)
|
43
|
+
session_sm: StateMachine = SessionSM() # type: ignore[no-redef]
|
44
|
+
|
45
|
+
return session_sm, transitions, leaf_actions
|
46
|
+
|
47
|
+
|
48
|
+
def get_current_scene(scenes: list[Scene], region: Region | None = None) -> Scene:
|
49
|
+
"""Get the current scene from the list of scenes."""
|
50
|
+
current_scenes = [scene for scene in scenes if scene.is_on_screen(region)]
|
51
|
+
if len(current_scenes) == 1:
|
52
|
+
return current_scenes[0]
|
53
|
+
elif len(current_scenes) > 1:
|
54
|
+
raise SceneRecognitionError(
|
55
|
+
f"Multiple scenes are currently on screen.\n{' '.join(str(scene) for scene in current_scenes)}"
|
56
|
+
)
|
57
|
+
else:
|
58
|
+
raise SceneRecognitionError("No scene is currently on screen.")
|
59
|
+
|
60
|
+
|
61
|
+
class Session:
|
62
|
+
"""A session manages the state machine for GUI automation scenes."""
|
63
|
+
|
64
|
+
def __init__(self, scenes: list[Scene]):
|
65
|
+
self._scenes_list = scenes
|
66
|
+
self._scenes_dict = {scene.name: scene for scene in scenes}
|
67
|
+
|
68
|
+
# Create dynamic StateMachine class and instantiate it
|
69
|
+
self._sm, self.transitions, self.leaf_actions = build_dynamic_state_machine(
|
70
|
+
scenes
|
71
|
+
)
|
72
|
+
self.graph: nx.MultiDiGraph = nx.nx_pydot.from_pydot(self._sm._graph())
|
73
|
+
|
74
|
+
@property
|
75
|
+
def current_scene(self) -> State:
|
76
|
+
"""Get the current state."""
|
77
|
+
return self._sm.current_state
|
78
|
+
|
79
|
+
def expect(self, target_scene: Scene, **kwargs):
|
80
|
+
"""Navigate to a specific scene."""
|
81
|
+
if target_scene.is_on_screen():
|
82
|
+
return
|
83
|
+
|
84
|
+
present_scene = get_current_scene(self._scenes_list)
|
85
|
+
all_paths = list(
|
86
|
+
nx.all_simple_paths(
|
87
|
+
self.graph,
|
88
|
+
source=present_scene.name,
|
89
|
+
target=target_scene.name,
|
90
|
+
)
|
91
|
+
)
|
92
|
+
if len(all_paths) == 0:
|
93
|
+
raise SceneRecognitionError(
|
94
|
+
f"No path found from {present_scene.name} to {target_scene.name}"
|
95
|
+
)
|
96
|
+
elif len(all_paths) > 1:
|
97
|
+
raise SceneRecognitionError(
|
98
|
+
f"Multiple paths found from {present_scene.name} to {target_scene.name}"
|
99
|
+
)
|
100
|
+
|
101
|
+
path = all_paths[0]
|
102
|
+
events: list[str] = [
|
103
|
+
self.graph.get_edge_data(path[i], path[i + 1])[0]["label"] # type: ignore
|
104
|
+
for i in range(len(path) - 1)
|
105
|
+
]
|
106
|
+
|
107
|
+
for event in events:
|
108
|
+
self._sm.send(event, **kwargs)
|
109
|
+
|
110
|
+
def invoke(self, action_name: str, **kwargs):
|
111
|
+
"""Invoke an action in the current scene."""
|
112
|
+
event_name = f"event_{action_name}"
|
113
|
+
transition = next(
|
114
|
+
(tr for tr_name, tr in self.transitions.items() if tr_name == event_name),
|
115
|
+
None,
|
116
|
+
)
|
117
|
+
if transition:
|
118
|
+
return self._sm.send(event_name, **kwargs)
|
119
|
+
|
120
|
+
leaf_action = next(
|
121
|
+
(
|
122
|
+
action
|
123
|
+
for name, action in self.leaf_actions.items()
|
124
|
+
if name == action_name
|
125
|
+
),
|
126
|
+
None,
|
127
|
+
)
|
128
|
+
if leaf_action:
|
129
|
+
return leaf_action(**kwargs)
|
130
|
+
|
131
|
+
raise ValueError(
|
132
|
+
f"Action '{action_name}' not found in current scene '{self.current_scene.name}'"
|
133
|
+
)
|
134
|
+
|
135
|
+
def __repr__(self):
|
136
|
+
current = self.current_scene
|
137
|
+
current_name = current.name if current else "None"
|
138
|
+
return (
|
139
|
+
f"Session(scenes={list(self._scenes_dict.keys())}, current={current_name})"
|
140
|
+
)
|
pyautoscene/utils.py
CHANGED
@@ -1,25 +1,25 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
import time
|
4
|
-
from keyword import iskeyword
|
5
|
-
from typing import Literal
|
6
|
-
|
7
|
-
import pyautogui as gui
|
8
|
-
|
9
|
-
LOCATE_AND_CLICK_DELAY = 0.2
|
10
|
-
|
11
|
-
|
12
|
-
def locate_and_click(
|
13
|
-
filename: str, clicks: int = 1, button: Literal["left", "right"] = "left"
|
14
|
-
):
|
15
|
-
time.sleep(LOCATE_AND_CLICK_DELAY)
|
16
|
-
locate = gui.locateOnScreen(filename, grayscale=True)
|
17
|
-
assert locate is not None, f"Could not locate {filename} on screen."
|
18
|
-
locate_center = (locate.left + locate.width // 2), (locate.top + locate.height // 2)
|
19
|
-
gui.moveTo(*locate_center, 0.6, gui.easeInOutQuad) # type: ignore
|
20
|
-
gui.click(clicks=clicks, button=button)
|
21
|
-
time.sleep(LOCATE_AND_CLICK_DELAY)
|
22
|
-
|
23
|
-
|
24
|
-
def is_valid_variable_name(name):
|
25
|
-
return name.isidentifier() and not iskeyword(name)
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import time
|
4
|
+
from keyword import iskeyword
|
5
|
+
from typing import Literal
|
6
|
+
|
7
|
+
import pyautogui as gui
|
8
|
+
|
9
|
+
LOCATE_AND_CLICK_DELAY = 0.2
|
10
|
+
|
11
|
+
|
12
|
+
def locate_and_click(
|
13
|
+
filename: str, clicks: int = 1, button: Literal["left", "right"] = "left"
|
14
|
+
):
|
15
|
+
time.sleep(LOCATE_AND_CLICK_DELAY)
|
16
|
+
locate = gui.locateOnScreen(filename, grayscale=True)
|
17
|
+
assert locate is not None, f"Could not locate {filename} on screen."
|
18
|
+
locate_center = (locate.left + locate.width // 2), (locate.top + locate.height // 2)
|
19
|
+
gui.moveTo(*locate_center, 0.6, gui.easeInOutQuad) # type: ignore
|
20
|
+
gui.click(clicks=clicks, button=button)
|
21
|
+
time.sleep(LOCATE_AND_CLICK_DELAY)
|
22
|
+
|
23
|
+
|
24
|
+
def is_valid_variable_name(name):
|
25
|
+
return name.isidentifier() and not iskeyword(name)
|
@@ -1,14 +1,18 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: pyautoscene
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.1
|
4
4
|
Summary: Advance GUI automation
|
5
5
|
Author-email: pritam-dey3 <pritam.pritamdey.984@gmail.com>
|
6
6
|
License-File: LICENSE
|
7
7
|
Requires-Python: >=3.13
|
8
8
|
Requires-Dist: networkx>=3.5
|
9
|
+
Requires-Dist: opencv-python-headless>=4.12.0.88
|
9
10
|
Requires-Dist: pillow>=11.3.0
|
10
11
|
Requires-Dist: pyautogui>=0.9.54
|
11
12
|
Requires-Dist: python-statemachine[diagrams]
|
13
|
+
Provides-Extra: ocr
|
14
|
+
Requires-Dist: onnxruntime>=1.22.0; extra == 'ocr'
|
15
|
+
Requires-Dist: rapidocr>=3.2.0; extra == 'ocr'
|
12
16
|
Description-Content-Type: text/markdown
|
13
17
|
|
14
18
|
# PyAutoScene
|
@@ -0,0 +1,13 @@
|
|
1
|
+
pyautoscene/__init__.py,sha256=clSA3hhv4eGTi1C4oN4BKX7i5ILZOy4ABVO0wFUfSXo,167
|
2
|
+
pyautoscene/ocr.py,sha256=y_JpmFLwUxS7PKeKjsF1o5r354j004y2bJNzfIcRei0,2168
|
3
|
+
pyautoscene/ocr_config.yaml,sha256=EGaKX1a-LyWk0gtI2wUo04LraFqGJ3aZnQ5NCPbgDYI,2231
|
4
|
+
pyautoscene/references.py,sha256=tsYVMhdawLCSk-leh0C9k-DwlLaP1niIHFR-bY3djcQ,2482
|
5
|
+
pyautoscene/scene.py,sha256=to-hpwODLKpD0vSHbxHvnPhV7JicEV5hmcxXC0yX5Z8,1997
|
6
|
+
pyautoscene/screen.py,sha256=26xPZ2PCE6jy8dKUFhMbCgD7XFI1McboB2E04HgSO2Q,2189
|
7
|
+
pyautoscene/session.py,sha256=Bm5E3nNKhpdE-ND44zeRvWvO2VOMU2WAcgE65w7iBMc,4726
|
8
|
+
pyautoscene/utils.py,sha256=v7TD8IUyY1jEyOXJ6520fNTpLY3k0-UxxTwSuoEtG_8,769
|
9
|
+
pyautoscene-0.2.1.dist-info/METADATA,sha256=S8oK-l3AQtKodp5PRMt2kdV2fL7yxD6E9Sn_tQUmpHA,6068
|
10
|
+
pyautoscene-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
11
|
+
pyautoscene-0.2.1.dist-info/entry_points.txt,sha256=6aKjylfDivCRMrJasIIi7ICU4fZR-8HjcOHhRmHpYpQ,49
|
12
|
+
pyautoscene-0.2.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
13
|
+
pyautoscene-0.2.1.dist-info/RECORD,,
|