multi-puzzle-solver 1.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- multi_puzzle_solver-1.1.8.dist-info/METADATA +4326 -0
- multi_puzzle_solver-1.1.8.dist-info/RECORD +106 -0
- multi_puzzle_solver-1.1.8.dist-info/WHEEL +5 -0
- multi_puzzle_solver-1.1.8.dist-info/top_level.txt +1 -0
- puzzle_solver/__init__.py +184 -0
- puzzle_solver/core/utils.py +298 -0
- puzzle_solver/core/utils_ortools.py +333 -0
- puzzle_solver/core/utils_visualizer.py +575 -0
- puzzle_solver/puzzles/abc_view/abc_view.py +75 -0
- puzzle_solver/puzzles/aquarium/aquarium.py +97 -0
- puzzle_solver/puzzles/area_51/area_51.py +159 -0
- puzzle_solver/puzzles/battleships/battleships.py +139 -0
- puzzle_solver/puzzles/binairo/binairo.py +98 -0
- puzzle_solver/puzzles/binairo/binairo_plus.py +7 -0
- puzzle_solver/puzzles/black_box/black_box.py +243 -0
- puzzle_solver/puzzles/branches/branches.py +64 -0
- puzzle_solver/puzzles/bridges/bridges.py +104 -0
- puzzle_solver/puzzles/chess_range/chess_melee.py +6 -0
- puzzle_solver/puzzles/chess_range/chess_range.py +406 -0
- puzzle_solver/puzzles/chess_range/chess_solo.py +9 -0
- puzzle_solver/puzzles/chess_sequence/chess_sequence.py +262 -0
- puzzle_solver/puzzles/circle_9/circle_9.py +44 -0
- puzzle_solver/puzzles/clouds/clouds.py +81 -0
- puzzle_solver/puzzles/connect_the_dots/connect_the_dots.py +50 -0
- puzzle_solver/puzzles/cow_and_cactus/cow_and_cactus.py +66 -0
- puzzle_solver/puzzles/dominosa/dominosa.py +67 -0
- puzzle_solver/puzzles/filling/filling.py +94 -0
- puzzle_solver/puzzles/flip/flip.py +64 -0
- puzzle_solver/puzzles/flood_it/flood_it.py +174 -0
- puzzle_solver/puzzles/flood_it/parse_map/parse_map.py +197 -0
- puzzle_solver/puzzles/galaxies/galaxies.py +110 -0
- puzzle_solver/puzzles/galaxies/parse_map/parse_map.py +216 -0
- puzzle_solver/puzzles/guess/guess.py +232 -0
- puzzle_solver/puzzles/heyawake/heyawake.py +152 -0
- puzzle_solver/puzzles/hidden_stars/hidden_stars.py +52 -0
- puzzle_solver/puzzles/hidoku/hidoku.py +59 -0
- puzzle_solver/puzzles/inertia/inertia.py +121 -0
- puzzle_solver/puzzles/inertia/parse_map/parse_map.py +207 -0
- puzzle_solver/puzzles/inertia/tsp.py +400 -0
- puzzle_solver/puzzles/kakurasu/kakurasu.py +38 -0
- puzzle_solver/puzzles/kakuro/kakuro.py +81 -0
- puzzle_solver/puzzles/kakuro/krypto_kakuro.py +95 -0
- puzzle_solver/puzzles/keen/keen.py +76 -0
- puzzle_solver/puzzles/kropki/kropki.py +94 -0
- puzzle_solver/puzzles/light_up/light_up.py +58 -0
- puzzle_solver/puzzles/linesweeper/linesweeper.py +71 -0
- puzzle_solver/puzzles/link_a_pix/link_a_pix.py +91 -0
- puzzle_solver/puzzles/lits/lits.py +138 -0
- puzzle_solver/puzzles/magnets/magnets.py +96 -0
- puzzle_solver/puzzles/map/map.py +56 -0
- puzzle_solver/puzzles/mathema_grids/mathema_grids.py +119 -0
- puzzle_solver/puzzles/mathrax/mathrax.py +93 -0
- puzzle_solver/puzzles/minesweeper/minesweeper.py +123 -0
- puzzle_solver/puzzles/mosaic/mosaic.py +38 -0
- puzzle_solver/puzzles/n_queens/n_queens.py +71 -0
- puzzle_solver/puzzles/nonograms/nonograms.py +121 -0
- puzzle_solver/puzzles/nonograms/nonograms_colored.py +220 -0
- puzzle_solver/puzzles/norinori/norinori.py +96 -0
- puzzle_solver/puzzles/number_path/number_path.py +76 -0
- puzzle_solver/puzzles/numbermaze/numbermaze.py +97 -0
- puzzle_solver/puzzles/nurikabe/nurikabe.py +130 -0
- puzzle_solver/puzzles/palisade/palisade.py +91 -0
- puzzle_solver/puzzles/pearl/pearl.py +107 -0
- puzzle_solver/puzzles/pipes/pipes.py +82 -0
- puzzle_solver/puzzles/range/range.py +59 -0
- puzzle_solver/puzzles/rectangles/rectangles.py +128 -0
- puzzle_solver/puzzles/ripple_effect/ripple_effect.py +83 -0
- puzzle_solver/puzzles/rooms/rooms.py +75 -0
- puzzle_solver/puzzles/schurs_numbers/schurs_numbers.py +73 -0
- puzzle_solver/puzzles/shakashaka/shakashaka.py +201 -0
- puzzle_solver/puzzles/shingoki/shingoki.py +116 -0
- puzzle_solver/puzzles/signpost/signpost.py +93 -0
- puzzle_solver/puzzles/singles/singles.py +53 -0
- puzzle_solver/puzzles/slant/parse_map/parse_map.py +135 -0
- puzzle_solver/puzzles/slant/slant.py +111 -0
- puzzle_solver/puzzles/slitherlink/slitherlink.py +130 -0
- puzzle_solver/puzzles/snail/snail.py +97 -0
- puzzle_solver/puzzles/split_ends/split_ends.py +93 -0
- puzzle_solver/puzzles/star_battle/star_battle.py +75 -0
- puzzle_solver/puzzles/star_battle/star_battle_shapeless.py +7 -0
- puzzle_solver/puzzles/stitches/parse_map/parse_map.py +267 -0
- puzzle_solver/puzzles/stitches/stitches.py +96 -0
- puzzle_solver/puzzles/sudoku/sudoku.py +267 -0
- puzzle_solver/puzzles/suguru/suguru.py +55 -0
- puzzle_solver/puzzles/suko/suko.py +54 -0
- puzzle_solver/puzzles/tapa/tapa.py +97 -0
- puzzle_solver/puzzles/tatami/tatami.py +64 -0
- puzzle_solver/puzzles/tents/tents.py +80 -0
- puzzle_solver/puzzles/thermometers/thermometers.py +82 -0
- puzzle_solver/puzzles/towers/towers.py +89 -0
- puzzle_solver/puzzles/tracks/tracks.py +88 -0
- puzzle_solver/puzzles/trees_logic/trees_logic.py +48 -0
- puzzle_solver/puzzles/troix/dumplings.py +7 -0
- puzzle_solver/puzzles/troix/troix.py +75 -0
- puzzle_solver/puzzles/twiddle/twiddle.py +112 -0
- puzzle_solver/puzzles/undead/undead.py +130 -0
- puzzle_solver/puzzles/unequal/unequal.py +128 -0
- puzzle_solver/puzzles/unruly/unruly.py +54 -0
- puzzle_solver/puzzles/vectors/vectors.py +94 -0
- puzzle_solver/puzzles/vermicelli/vermicelli.py +74 -0
- puzzle_solver/puzzles/walls/walls.py +52 -0
- puzzle_solver/puzzles/yajilin/yajilin.py +87 -0
- puzzle_solver/puzzles/yin_yang/parse_map/parse_map.py +172 -0
- puzzle_solver/puzzles/yin_yang/yin_yang.py +103 -0
- puzzle_solver/utils/etc/parser/board_color_digit.py +497 -0
- puzzle_solver/utils/visualizer.py +155 -0
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This file is a simple helper that parses the images from https://www.chiark.greenend.org.uk and converts them to a json file.
|
|
3
|
+
Look at the ./input_output/ directory for examples of input images and output json files.
|
|
4
|
+
The output json is used in the test_solve.py file to test the solver.
|
|
5
|
+
"""
|
|
6
|
+
# import json
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
import numpy as np
|
|
9
|
+
cv = None
|
|
10
|
+
Image = None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def extract_lines(bw):
|
|
14
|
+
# Create the images that will use to extract the horizontal and vertical lines
|
|
15
|
+
horizontal = np.copy(bw)
|
|
16
|
+
vertical = np.copy(bw)
|
|
17
|
+
|
|
18
|
+
cols = horizontal.shape[1]
|
|
19
|
+
horizontal_size = cols // 20
|
|
20
|
+
# Create structure element for extracting horizontal lines through morphology operations
|
|
21
|
+
horizontalStructure = cv.getStructuringElement(cv.MORPH_RECT, (horizontal_size, 1))
|
|
22
|
+
horizontal = cv.erode(horizontal, horizontalStructure)
|
|
23
|
+
horizontal = cv.dilate(horizontal, horizontalStructure)
|
|
24
|
+
horizontal_means = np.mean(horizontal, axis=1)
|
|
25
|
+
horizontal_cutoff = np.percentile(horizontal_means, 50)
|
|
26
|
+
# location where the horizontal lines are
|
|
27
|
+
horizontal_idx = np.where(horizontal_means > horizontal_cutoff)[0]
|
|
28
|
+
# print(f"horizontal_idx: {horizontal_idx}")
|
|
29
|
+
# height = len(horizontal_idx)
|
|
30
|
+
# show_wait_destroy("horizontal", horizontal) # this has the horizontal lines
|
|
31
|
+
|
|
32
|
+
rows = vertical.shape[0]
|
|
33
|
+
verticalsize = rows // 20
|
|
34
|
+
# Create structure element for extracting vertical lines through morphology operations
|
|
35
|
+
verticalStructure = cv.getStructuringElement(cv.MORPH_RECT, (1, verticalsize))
|
|
36
|
+
vertical = cv.erode(vertical, verticalStructure)
|
|
37
|
+
vertical = cv.dilate(vertical, verticalStructure)
|
|
38
|
+
vertical_means = np.mean(vertical, axis=0)
|
|
39
|
+
vertical_cutoff = np.percentile(vertical_means, 50)
|
|
40
|
+
vertical_idx = np.where(vertical_means > vertical_cutoff)[0]
|
|
41
|
+
# print(f"vertical_idx: {vertical_idx}")
|
|
42
|
+
# width = len(vertical_idx)
|
|
43
|
+
# print(f"height: {height}, width: {width}")
|
|
44
|
+
# print(f"vertical_means: {vertical_means}")
|
|
45
|
+
# show_wait_destroy("vertical", vertical) # this has the vertical lines
|
|
46
|
+
|
|
47
|
+
vertical = cv.bitwise_not(vertical)
|
|
48
|
+
# show_wait_destroy("vertical_bit", vertical)
|
|
49
|
+
|
|
50
|
+
return horizontal_idx, vertical_idx
|
|
51
|
+
|
|
52
|
+
def show_wait_destroy(winname, img):
|
|
53
|
+
cv.imshow(winname, img)
|
|
54
|
+
cv.moveWindow(winname, 500, 0)
|
|
55
|
+
cv.waitKey(0)
|
|
56
|
+
cv.destroyWindow(winname)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def mean_consecutives(arr: np.ndarray) -> np.ndarray:
|
|
60
|
+
"""if a sequence of values is consecutive, then average the values"""
|
|
61
|
+
sums = []
|
|
62
|
+
counts = []
|
|
63
|
+
for i in range(len(arr)):
|
|
64
|
+
if i == 0:
|
|
65
|
+
sums.append(arr[i])
|
|
66
|
+
counts.append(1)
|
|
67
|
+
elif arr[i] == arr[i-1] + 1:
|
|
68
|
+
sums[-1] += arr[i]
|
|
69
|
+
counts[-1] += 1
|
|
70
|
+
else:
|
|
71
|
+
sums.append(arr[i])
|
|
72
|
+
counts.append(1)
|
|
73
|
+
return np.array(sums) // np.array(counts)
|
|
74
|
+
|
|
75
|
+
def main(image):
|
|
76
|
+
global Image
|
|
77
|
+
global cv
|
|
78
|
+
from PIL import Image as Image_module
|
|
79
|
+
import cv2 as cv_module
|
|
80
|
+
Image = Image_module
|
|
81
|
+
cv = cv_module
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
image_path = Path(image)
|
|
85
|
+
output_path = image_path.parent / (image_path.stem + '.json')
|
|
86
|
+
src = cv.imread(image, cv.IMREAD_COLOR)
|
|
87
|
+
assert src is not None, f'Error opening image: {image}'
|
|
88
|
+
if len(src.shape) != 2:
|
|
89
|
+
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
|
|
90
|
+
else:
|
|
91
|
+
gray = src
|
|
92
|
+
# now the image is in grayscale
|
|
93
|
+
|
|
94
|
+
# Apply adaptiveThreshold at the bitwise_not of gray, notice the ~ symbol
|
|
95
|
+
gray = cv.bitwise_not(gray)
|
|
96
|
+
bw = cv.adaptiveThreshold(gray.copy(), 255, cv.ADAPTIVE_THRESH_MEAN_C, \
|
|
97
|
+
cv.THRESH_BINARY, 15, -2)
|
|
98
|
+
# show_wait_destroy("binary", bw)
|
|
99
|
+
|
|
100
|
+
# show_wait_destroy("src", src)
|
|
101
|
+
horizontal_idx, vertical_idx = extract_lines(bw)
|
|
102
|
+
horizontal_idx = mean_consecutives(horizontal_idx)
|
|
103
|
+
vertical_idx = mean_consecutives(vertical_idx)
|
|
104
|
+
median_vertical_dist = np.median(np.diff(vertical_idx))
|
|
105
|
+
median_horizontal_dist = np.median(np.diff(horizontal_idx))
|
|
106
|
+
print(f"median_vertical_dist: {median_vertical_dist}, median_horizontal_dist: {median_horizontal_dist}")
|
|
107
|
+
height = len(horizontal_idx)
|
|
108
|
+
width = len(vertical_idx)
|
|
109
|
+
print(f"height: {height}, width: {width}")
|
|
110
|
+
print(f"horizontal_idx: {horizontal_idx}")
|
|
111
|
+
print(f"vertical_idx: {vertical_idx}")
|
|
112
|
+
output_rgb = {}
|
|
113
|
+
j_idx = 0
|
|
114
|
+
for j in range(height - 1):
|
|
115
|
+
i_idx = 0
|
|
116
|
+
for i in range(width - 1):
|
|
117
|
+
hidx1, hidx2 = horizontal_idx[j], horizontal_idx[j+1]
|
|
118
|
+
vidx1, vidx2 = vertical_idx[i], vertical_idx[i+1]
|
|
119
|
+
hidx1 = max(0, hidx1 - 2)
|
|
120
|
+
hidx2 = min(src.shape[0], hidx2 + 4)
|
|
121
|
+
vidx1 = max(0, vidx1 - 2)
|
|
122
|
+
vidx2 = min(src.shape[1], vidx2 + 4)
|
|
123
|
+
if (hidx2 - hidx1) < median_horizontal_dist * 0.5 or (vidx2 - vidx1) < median_vertical_dist * 0.5:
|
|
124
|
+
continue
|
|
125
|
+
cell = src[hidx1:hidx2, vidx1:vidx2]
|
|
126
|
+
mid_x = cell.shape[1] // 2
|
|
127
|
+
mid_y = cell.shape[0] // 2
|
|
128
|
+
print(f"mid_x: {mid_x}, mid_y: {mid_y}")
|
|
129
|
+
cell_50_percent = cell[int(mid_y*0.5):int(mid_y*1.5), int(mid_x*0.5):int(mid_x*1.5)]
|
|
130
|
+
# show_wait_destroy(f"cell_{i_idx}_{j_idx}", cell_50_percent)
|
|
131
|
+
output_rgb[j_idx, i_idx] = cell_50_percent.mean(axis=(0, 1))
|
|
132
|
+
print(f"output_rgb[{j_idx}, {i_idx}]: {output_rgb[j_idx, i_idx]}")
|
|
133
|
+
i_idx += 1
|
|
134
|
+
j_idx += 1
|
|
135
|
+
|
|
136
|
+
colors_to_cluster = cluster_colors(output_rgb)
|
|
137
|
+
width = max(pos[1] for pos in output_rgb.keys()) + 1
|
|
138
|
+
height = max(pos[0] for pos in output_rgb.keys()) + 1
|
|
139
|
+
out = np.zeros((height, width), dtype=object)
|
|
140
|
+
print(colors_to_cluster)
|
|
141
|
+
for pos, cluster_id in colors_to_cluster.items():
|
|
142
|
+
out[pos[0], pos[1]] = cluster_id
|
|
143
|
+
print('Shape of out:', out.shape)
|
|
144
|
+
|
|
145
|
+
with open(output_path, 'w') as f:
|
|
146
|
+
f.write('[\n')
|
|
147
|
+
for i, row in enumerate(out):
|
|
148
|
+
f.write(' ' + str(row.tolist()).replace("'", '"'))
|
|
149
|
+
if i != len(out) - 1:
|
|
150
|
+
f.write(',')
|
|
151
|
+
f.write('\n')
|
|
152
|
+
f.write(']')
|
|
153
|
+
print('output json: ', output_path)
|
|
154
|
+
|
|
155
|
+
def euclidean_distance(a: tuple[int, int, int], b: tuple[int, int, int]) -> int:
|
|
156
|
+
return ((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2 + (a[2] - b[2]) ** 2) ** 0.5
|
|
157
|
+
|
|
158
|
+
KNOWN_COLORS = {
|
|
159
|
+
(0, 0, 255): 'Red',
|
|
160
|
+
(0, 255, 0): 'Green',
|
|
161
|
+
(255, 77, 51): 'Blue',
|
|
162
|
+
(0, 255, 255): 'Yellow',
|
|
163
|
+
(255, 153, 255): 'Pink',
|
|
164
|
+
(0, 128, 255): 'Orange',
|
|
165
|
+
(255, 204, 102): 'Cyan',
|
|
166
|
+
(179, 255, 179): 'Washed Green',
|
|
167
|
+
(77, 77, 128): 'Brown',
|
|
168
|
+
(179, 0, 128): 'Purple',
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
def cluster_colors(rgb: dict[tuple[int, int], tuple[int, int, int]]) -> dict[tuple[int, int, int], int]:
|
|
172
|
+
MIN_DIST = 10 # if distance between two colors is less than this, then they are the same color
|
|
173
|
+
colors_to_cluster = KNOWN_COLORS.copy()
|
|
174
|
+
for pos, color in rgb.items():
|
|
175
|
+
color = tuple(color)
|
|
176
|
+
if color in colors_to_cluster:
|
|
177
|
+
continue
|
|
178
|
+
for existing_color, existing_cluster_id in colors_to_cluster.items():
|
|
179
|
+
if euclidean_distance(color, existing_color) < MIN_DIST:
|
|
180
|
+
colors_to_cluster[color] = existing_cluster_id
|
|
181
|
+
break
|
|
182
|
+
else:
|
|
183
|
+
new_name = str(', '.join(str(int(c)) for c in color))
|
|
184
|
+
print('WARNING: new color found:', new_name, 'at pos:', pos)
|
|
185
|
+
colors_to_cluster[color] = new_name
|
|
186
|
+
pos_to_cluster = {pos: colors_to_cluster[tuple(color)] for pos, color in rgb.items()}
|
|
187
|
+
return pos_to_cluster
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
if __name__ == '__main__':
|
|
191
|
+
# to run this script and visualize the output, in the root run:
|
|
192
|
+
# python .\src\puzzle_solver\puzzles\flood_it\parse_map\parse_map.py | python .\src\puzzle_solver\utils\visualizer.py --read_stdin
|
|
193
|
+
# main(Path(__file__).parent / 'input_output' / 'flood.html#12x12c10m5%23637467359431429.png')
|
|
194
|
+
# main(Path(__file__).parent / 'input_output' / 'flood.html#12x12c6m5%23132018455881870.png')
|
|
195
|
+
# main(Path(__file__).parent / 'input_output' / 'flood.html#12x12c6m0%23668276603006993.png')
|
|
196
|
+
# main(Path(__file__).parent / 'input_output' / 'flood.html#20x20c8m0%23991967486182787.png')flood.html#20x20c4m0%23690338575695152
|
|
197
|
+
main(Path(__file__).parent / 'input_output' / 'flood.html#20x20c4m0%23690338575695152.png')
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
2
|
+
from typing import Iterable, Union
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
from ortools.sat.python import cp_model
|
|
6
|
+
|
|
7
|
+
from puzzle_solver.core.utils import Pos, get_all_pos, set_char, Direction, get_next_pos, in_bounds, get_opposite_direction, get_pos
|
|
8
|
+
from puzzle_solver.core.utils_ortools import generic_solve_all, SingleSolution, force_connected_component
|
|
9
|
+
from puzzle_solver.core.utils_visualizer import combined_function, id_board_to_wall_fn
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def parse_numpy(galaxies: np.ndarray) -> list[tuple[Pos, ...]]:
|
|
13
|
+
result = defaultdict(list)
|
|
14
|
+
for pos, arr_id in np.ndenumerate(galaxies):
|
|
15
|
+
if not arr_id.strip():
|
|
16
|
+
continue
|
|
17
|
+
result[arr_id].append(get_pos(x=pos[1], y=pos[0]))
|
|
18
|
+
return [positions for _, positions in sorted(result.items(), key=lambda x: x[0])]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Board:
|
|
22
|
+
def __init__(self, galaxies: Union[list[tuple[Pos, ...]], np.ndarray], V: int = None, H: int = None):
|
|
23
|
+
if isinstance(galaxies, np.ndarray):
|
|
24
|
+
V, H = galaxies.shape
|
|
25
|
+
galaxies = parse_numpy(galaxies)
|
|
26
|
+
else:
|
|
27
|
+
assert V is not None and H is not None, 'V and H must be provided if galaxies is not a numpy array'
|
|
28
|
+
assert V >= 1 and H >= 1, 'V and H must be at least 1'
|
|
29
|
+
assert all(isinstance(galaxy, Iterable) for galaxy in galaxies), 'galaxies must be a list of Iterables'
|
|
30
|
+
assert all(len(galaxy) in [1, 2, 4] for galaxy in galaxies), 'each galaxy must be exactly 1, 2, or 4 positions'
|
|
31
|
+
self.V = V
|
|
32
|
+
self.H = H
|
|
33
|
+
self.n_galaxies = len(galaxies)
|
|
34
|
+
self.galaxies = galaxies
|
|
35
|
+
self.prelocated_positions: set[Pos] = {pos: i for i, galaxy in enumerate(galaxies) for pos in galaxy}
|
|
36
|
+
|
|
37
|
+
self.model = cp_model.CpModel()
|
|
38
|
+
self.pos_to_galaxy: dict[Pos, dict[int, cp_model.IntVar]] = {p: {} for p in get_all_pos(V, H)} # each position can be part of exactly one out of many possible galaxies
|
|
39
|
+
self.allocated_pairs: set[tuple[Pos, Pos]] = set() # each pair is allocated to exactly one galaxy
|
|
40
|
+
|
|
41
|
+
self.create_vars()
|
|
42
|
+
self.add_all_constraints()
|
|
43
|
+
|
|
44
|
+
def create_vars(self):
|
|
45
|
+
for i in range(self.n_galaxies):
|
|
46
|
+
galaxy = self.galaxies[i]
|
|
47
|
+
if len(galaxy) == 1:
|
|
48
|
+
p1, p2 = galaxy[0], galaxy[0]
|
|
49
|
+
elif len(galaxy) == 2:
|
|
50
|
+
p1, p2 = galaxy[0], galaxy[1]
|
|
51
|
+
elif len(galaxy) == 4:
|
|
52
|
+
p1, p2 = galaxy[0], galaxy[3] # [1] and [2] will be linked with symmetry
|
|
53
|
+
self.expand_galaxy(p1, p2, i)
|
|
54
|
+
|
|
55
|
+
def expand_galaxy(self, p1: Pos, p2: Pos, galaxy_idx: int):
|
|
56
|
+
if (p1, p2) in self.allocated_pairs or (p2, p1) in self.allocated_pairs:
|
|
57
|
+
return
|
|
58
|
+
if p1 in self.prelocated_positions and self.prelocated_positions[p1] != galaxy_idx:
|
|
59
|
+
return
|
|
60
|
+
if p2 in self.prelocated_positions and self.prelocated_positions[p2] != galaxy_idx:
|
|
61
|
+
return
|
|
62
|
+
if not in_bounds(p1, self.V, self.H) or not in_bounds(p2, self.V, self.H):
|
|
63
|
+
return
|
|
64
|
+
self.bind_pair(p1, p2, galaxy_idx)
|
|
65
|
+
# symmetrically expand the galaxy until illegal position is hit
|
|
66
|
+
for direction in [Direction.RIGHT, Direction.UP, Direction.DOWN, Direction.LEFT]:
|
|
67
|
+
symmetrical_direction = get_opposite_direction(direction)
|
|
68
|
+
new_p1 = get_next_pos(p1, direction)
|
|
69
|
+
new_p2 = get_next_pos(p2, symmetrical_direction)
|
|
70
|
+
self.expand_galaxy(new_p1, new_p2, galaxy_idx)
|
|
71
|
+
|
|
72
|
+
def bind_pair(self, p1: Pos, p2: Pos, galaxy_idx: int):
|
|
73
|
+
assert galaxy_idx not in self.pos_to_galaxy[p1], f'p1={p1} already has galaxy idx={galaxy_idx}'
|
|
74
|
+
assert galaxy_idx not in self.pos_to_galaxy[p2], f'p2={p2} already has galaxy idx={galaxy_idx}'
|
|
75
|
+
self.allocated_pairs.add((p1, p2))
|
|
76
|
+
v1 = self.model.NewBoolVar(f'{p1}:{galaxy_idx}')
|
|
77
|
+
v2 = self.model.NewBoolVar(f'{p2}:{galaxy_idx}')
|
|
78
|
+
self.model.Add(v1 == v2)
|
|
79
|
+
self.pos_to_galaxy[p1][galaxy_idx] = v1
|
|
80
|
+
self.pos_to_galaxy[p2][galaxy_idx] = v2
|
|
81
|
+
|
|
82
|
+
def add_all_constraints(self):
|
|
83
|
+
galaxy_vars = {}
|
|
84
|
+
for pos in get_all_pos(self.V, self.H):
|
|
85
|
+
pos_vars = list(self.pos_to_galaxy[pos].values())
|
|
86
|
+
self.model.AddExactlyOne(pos_vars)
|
|
87
|
+
for galaxy_idx, v in self.pos_to_galaxy[pos].items():
|
|
88
|
+
galaxy_vars.setdefault(galaxy_idx, {})[pos] = v
|
|
89
|
+
for pos_vars in galaxy_vars.values():
|
|
90
|
+
force_connected_component(self.model, pos_vars)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def solve_and_print(self, verbose: bool = True):
|
|
94
|
+
def board_to_solution(board: Board, solver: cp_model.CpSolverSolutionCallback) -> SingleSolution:
|
|
95
|
+
assignment: dict[Pos, int] = {}
|
|
96
|
+
for pos, galaxy_vars in board.pos_to_galaxy.items():
|
|
97
|
+
for galaxy_idx, var in galaxy_vars.items(): # every pos is part of exactly one galaxy
|
|
98
|
+
if solver.Value(var) == 1:
|
|
99
|
+
assignment[pos] = galaxy_idx
|
|
100
|
+
break
|
|
101
|
+
return SingleSolution(assignment=assignment)
|
|
102
|
+
def callback(single_res: SingleSolution):
|
|
103
|
+
print("Solution found")
|
|
104
|
+
res = np.full((self.V, self.H), ' ', dtype=object)
|
|
105
|
+
for pos in get_all_pos(self.V, self.H):
|
|
106
|
+
set_char(res, pos, single_res.assignment[pos])
|
|
107
|
+
print(combined_function(self.V, self.H,
|
|
108
|
+
cell_flags=id_board_to_wall_fn(res),
|
|
109
|
+
center_char=lambda r, c: '.' if (Pos(x=c, y=r) in self.prelocated_positions) else ' '))
|
|
110
|
+
return generic_solve_all(self, board_to_solution, callback=callback if verbose else None, verbose=verbose)
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This file is a simple helper that parses the images from https://www.chiark.greenend.org.uk/~sgtatham/puzzles/js/galaxies.html and converts them to a json file.
|
|
3
|
+
Look at the ./input_output/ directory for examples of input images and output json files.
|
|
4
|
+
The output json is used in the test_solve.py file to test the solver.
|
|
5
|
+
"""
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
import numpy as np
|
|
8
|
+
cv = None
|
|
9
|
+
Image = None
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def extract_lines(bw):
|
|
13
|
+
# Create the images that will use to extract the horizontal and vertical lines
|
|
14
|
+
horizontal = np.copy(bw)
|
|
15
|
+
vertical = np.copy(bw)
|
|
16
|
+
|
|
17
|
+
cols = horizontal.shape[1]
|
|
18
|
+
horizontal_size = cols // 5
|
|
19
|
+
# Create structure element for extracting horizontal lines through morphology operations
|
|
20
|
+
horizontalStructure = cv.getStructuringElement(cv.MORPH_RECT, (horizontal_size, 1))
|
|
21
|
+
horizontal = cv.erode(horizontal, horizontalStructure)
|
|
22
|
+
horizontal = cv.dilate(horizontal, horizontalStructure)
|
|
23
|
+
horizontal_means = np.mean(horizontal, axis=1)
|
|
24
|
+
horizontal_cutoff = np.percentile(horizontal_means, 50)
|
|
25
|
+
# location where the horizontal lines are
|
|
26
|
+
horizontal_idx = np.where(horizontal_means > horizontal_cutoff)[0]
|
|
27
|
+
# print(f"horizontal_idx: {horizontal_idx}")
|
|
28
|
+
# height = len(horizontal_idx)
|
|
29
|
+
# show_wait_destroy("horizontal", horizontal) # this has the horizontal lines
|
|
30
|
+
|
|
31
|
+
rows = vertical.shape[0]
|
|
32
|
+
verticalsize = rows // 5
|
|
33
|
+
# Create structure element for extracting vertical lines through morphology operations
|
|
34
|
+
verticalStructure = cv.getStructuringElement(cv.MORPH_RECT, (1, verticalsize))
|
|
35
|
+
vertical = cv.erode(vertical, verticalStructure)
|
|
36
|
+
vertical = cv.dilate(vertical, verticalStructure)
|
|
37
|
+
vertical_means = np.mean(vertical, axis=0)
|
|
38
|
+
vertical_cutoff = np.percentile(vertical_means, 50)
|
|
39
|
+
vertical_idx = np.where(vertical_means > vertical_cutoff)[0]
|
|
40
|
+
# print(f"vertical_idx: {vertical_idx}")
|
|
41
|
+
# width = len(vertical_idx)
|
|
42
|
+
# print(f"height: {height}, width: {width}")
|
|
43
|
+
# print(f"vertical_means: {vertical_means}")
|
|
44
|
+
# show_wait_destroy("vertical", vertical) # this has the vertical lines
|
|
45
|
+
|
|
46
|
+
vertical = cv.bitwise_not(vertical)
|
|
47
|
+
# show_wait_destroy("vertical_bit", vertical)
|
|
48
|
+
|
|
49
|
+
return horizontal_idx, vertical_idx
|
|
50
|
+
|
|
51
|
+
def show_wait_destroy(winname, img):
|
|
52
|
+
cv.imshow(winname, img)
|
|
53
|
+
cv.moveWindow(winname, 500, 0)
|
|
54
|
+
cv.waitKey(0)
|
|
55
|
+
cv.destroyWindow(winname)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def mean_consecutives(arr: np.ndarray) -> np.ndarray:
|
|
59
|
+
"""if a sequence of values is consecutive, then average the values"""
|
|
60
|
+
sums = []
|
|
61
|
+
counts = []
|
|
62
|
+
for i in range(len(arr)):
|
|
63
|
+
if i == 0:
|
|
64
|
+
sums.append(arr[i])
|
|
65
|
+
counts.append(1)
|
|
66
|
+
elif arr[i] == arr[i-1] + 1:
|
|
67
|
+
sums[-1] += arr[i]
|
|
68
|
+
counts[-1] += 1
|
|
69
|
+
else:
|
|
70
|
+
sums.append(arr[i])
|
|
71
|
+
counts.append(1)
|
|
72
|
+
return np.array(sums) // np.array(counts)
|
|
73
|
+
|
|
74
|
+
def main(image):
|
|
75
|
+
global Image
|
|
76
|
+
global cv
|
|
77
|
+
import matplotlib.pyplot as plt
|
|
78
|
+
from PIL import Image as Image_module
|
|
79
|
+
import cv2 as cv_module
|
|
80
|
+
Image = Image_module
|
|
81
|
+
cv = cv_module
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
image_path = Path(image)
|
|
85
|
+
output_path = image_path.parent / (image_path.stem + '.json')
|
|
86
|
+
src = cv.imread(image, cv.IMREAD_COLOR)
|
|
87
|
+
assert src is not None, f'Error opening image: {image}'
|
|
88
|
+
if len(src.shape) != 2:
|
|
89
|
+
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
|
|
90
|
+
else:
|
|
91
|
+
gray = src
|
|
92
|
+
# now the image is in grayscale
|
|
93
|
+
|
|
94
|
+
# Apply adaptiveThreshold at the bitwise_not of gray, notice the ~ symbol
|
|
95
|
+
gray = cv.bitwise_not(gray)
|
|
96
|
+
bw = cv.adaptiveThreshold(gray.copy(), 255, cv.ADAPTIVE_THRESH_MEAN_C, \
|
|
97
|
+
cv.THRESH_BINARY, 15, -2)
|
|
98
|
+
# show_wait_destroy("binary", bw)
|
|
99
|
+
|
|
100
|
+
# show_wait_destroy("src", src)
|
|
101
|
+
horizontal_idx, vertical_idx = extract_lines(bw)
|
|
102
|
+
horizontal_idx = mean_consecutives(horizontal_idx)
|
|
103
|
+
vertical_idx = mean_consecutives(vertical_idx)
|
|
104
|
+
height = len(horizontal_idx)
|
|
105
|
+
width = len(vertical_idx)
|
|
106
|
+
print(f"height: {height}, width: {width}")
|
|
107
|
+
print(f"horizontal_idx: {horizontal_idx}")
|
|
108
|
+
print(f"vertical_idx: {vertical_idx}")
|
|
109
|
+
arr = np.zeros((height - 1, width - 1), dtype=object)
|
|
110
|
+
output = {(dx, dy): arr.copy() for dx in [-1, 0, 1] for dy in [-1, 0, 1]}
|
|
111
|
+
hists = {(dx, dy): {} for dx in [-1, 0, 1] for dy in [-1, 0, 1]}
|
|
112
|
+
for j in range(height - 1):
|
|
113
|
+
for i in range(width - 1):
|
|
114
|
+
hidx1, hidx2 = horizontal_idx[j], horizontal_idx[j+1]
|
|
115
|
+
vidx1, vidx2 = vertical_idx[i], vertical_idx[i+1]
|
|
116
|
+
hidx1 = max(0, hidx1 - 2)
|
|
117
|
+
hidx2 = min(src.shape[0], hidx2 + 4)
|
|
118
|
+
vidx1 = max(0, vidx1 - 2)
|
|
119
|
+
vidx2 = min(src.shape[1], vidx2 + 4)
|
|
120
|
+
cell = src[hidx1:hidx2, vidx1:vidx2]
|
|
121
|
+
mid_x = cell.shape[1] // 2
|
|
122
|
+
mid_y = cell.shape[0] // 2
|
|
123
|
+
cell = cv.bitwise_not(cell) # invert colors
|
|
124
|
+
for dx in [-1, 0, 1]:
|
|
125
|
+
for dy in [-1, 0, 1]:
|
|
126
|
+
mx = mid_x + dx*mid_x
|
|
127
|
+
my = mid_y + dy*mid_y
|
|
128
|
+
mx0 = max(0, mx - 5)
|
|
129
|
+
mx1 = min(cell.shape[1], mx + 5)
|
|
130
|
+
my0 = max(0, my - 5)
|
|
131
|
+
my1 = min(cell.shape[0], my + 5)
|
|
132
|
+
cell_part = cell[my0:my1, mx0:mx1]
|
|
133
|
+
hists[(dx, dy)][j, i] = np.sum(cell_part)
|
|
134
|
+
# top = cell[0:10, mid_y-5:mid_y+5]
|
|
135
|
+
# hists['top'][j, i] = np.sum(top)
|
|
136
|
+
# left = cell[mid_x-5:mid_x+5, 0:10]
|
|
137
|
+
# hists['left'][j, i] = np.sum(left)
|
|
138
|
+
# right = cell[mid_x-5:mid_x+5, -10:]
|
|
139
|
+
# hists['right'][j, i] = np.sum(right)
|
|
140
|
+
# bottom = cell[-10:, mid_y-5:mid_y+5]
|
|
141
|
+
# hists['bottom'][j, i] = np.sum(bottom)
|
|
142
|
+
# print(f"cell_{i}_{j}, ", [hists[(dx, dy)][j, i] for dx in [-1, 0, 1] for dy in [-1, 0, 1]])
|
|
143
|
+
# show_wait_destroy(f"cell_{i}_{j}", cell)
|
|
144
|
+
|
|
145
|
+
fig, axs = plt.subplots(3, 3)
|
|
146
|
+
target = 100
|
|
147
|
+
for dx in [-1, 0, 1]:
|
|
148
|
+
for dy in [-1, 0, 1]:
|
|
149
|
+
axs[dx+1, dy+1].hist(list(hists[(dx, dy)].values()), bins=100)
|
|
150
|
+
axs[dx+1, dy+1].set_title(f'{dx},{dy}')
|
|
151
|
+
# target = np.mean(list(hists[(dx, dy)].values()))
|
|
152
|
+
axs[dx+1, dy+1].axvline(target, color='red')
|
|
153
|
+
# plt.show()
|
|
154
|
+
# 1/0
|
|
155
|
+
for j in range(height - 1):
|
|
156
|
+
for i in range(width - 1):
|
|
157
|
+
sums_str = ''
|
|
158
|
+
out_str = ''
|
|
159
|
+
for dx in [-1, 0, 1]:
|
|
160
|
+
out_xpart = 'L' if dx == -1 else 'C' if dx == 0 else 'R'
|
|
161
|
+
for dy in [-1, 0, 1]:
|
|
162
|
+
out_ypart = 'T' if dy == -1 else 'C' if dy == 0 else 'B'
|
|
163
|
+
sums_str += str(hists[(dx, dy)][j, i]) + ' '
|
|
164
|
+
if hists[(dx, dy)][j, i] < target:
|
|
165
|
+
out_str += (out_xpart + out_ypart + ' ')
|
|
166
|
+
output[(dx, dy)][j, i] = 1
|
|
167
|
+
print(f"cell_{j}_{i}", end=': ')
|
|
168
|
+
print(out_str)
|
|
169
|
+
print(' Sums: ', sums_str)
|
|
170
|
+
|
|
171
|
+
out = np.full_like(output[(0, 0)], ' ', dtype='U2')
|
|
172
|
+
counter = 0
|
|
173
|
+
for j in range(out.shape[0]):
|
|
174
|
+
for i in range(out.shape[1]):
|
|
175
|
+
for dx in [-1, 0, 1]:
|
|
176
|
+
for dy in [-1, 0, 1]:
|
|
177
|
+
if output[(dx, dy)][j, i] == 1:
|
|
178
|
+
# out[j, i] = dxdy_to_char[(dx, dy)]
|
|
179
|
+
if dx == 0 and dy == 0: # single point
|
|
180
|
+
out[j, i] = str(counter).zfill(2)
|
|
181
|
+
counter += 1
|
|
182
|
+
elif dx == 0 and dy == 1: # vertical
|
|
183
|
+
out[j, i] = str(counter).zfill(2)
|
|
184
|
+
out[j+1, i] = str(counter).zfill(2)
|
|
185
|
+
counter += 1
|
|
186
|
+
elif dx == 1 and dy == 0: # horizontal
|
|
187
|
+
out[j, i] = str(counter).zfill(2)
|
|
188
|
+
out[j, i+1] = str(counter).zfill(2)
|
|
189
|
+
counter += 1
|
|
190
|
+
elif dx == 1 and dy == 1: # 2 by 2
|
|
191
|
+
out[j, i] = str(counter).zfill(2)
|
|
192
|
+
out[j+1, i] = str(counter).zfill(2)
|
|
193
|
+
out[j, i+1] = str(counter).zfill(2)
|
|
194
|
+
out[j+1, i+1] = str(counter).zfill(2)
|
|
195
|
+
counter += 1
|
|
196
|
+
|
|
197
|
+
# print(out)
|
|
198
|
+
with open(output_path, 'w') as f:
|
|
199
|
+
f.write('[\n')
|
|
200
|
+
for i, row in enumerate(out):
|
|
201
|
+
f.write(' ' + str(row.tolist()).replace("'", '"'))
|
|
202
|
+
if i != len(out) - 1:
|
|
203
|
+
f.write(',')
|
|
204
|
+
f.write('\n')
|
|
205
|
+
f.write(']')
|
|
206
|
+
print('output json: ', output_path)
|
|
207
|
+
|
|
208
|
+
if __name__ == '__main__':
|
|
209
|
+
# to run this script and visualize the output, in the root run:
|
|
210
|
+
# python .\src\puzzle_solver\puzzles\galaxies\parse_map\parse_map.py | python .\src\puzzle_solver\utils\visualizer.py --read_stdin
|
|
211
|
+
# main(Path(__file__).parent / 'input_output' / 'MTM6OSw4MjEsNDAx.png')
|
|
212
|
+
# main(Path(__file__).parent / 'input_output' / 'weekly_oct_3rd_2025.png')
|
|
213
|
+
# main(Path(__file__).parent / 'input_output' / 'star_battle_67f73ff90cd8cdb4b3e30f56f5261f4968f5dac940bc6.png')
|
|
214
|
+
# main(Path(__file__).parent / 'input_output' / 'LITS_MDoxNzksNzY3.png')
|
|
215
|
+
# main(Path(__file__).parent / 'input_output' / 'lits_OTo3LDMwNiwwMTU=.png')
|
|
216
|
+
main(Path(__file__).parent / 'input_output' / 'eofodowmumgzzdkopzlpzkzaezrhefoezejvdtxrzmpgozzemxjdcigcqzrk.png')
|