multi-puzzle-solver 0.9.31__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of multi-puzzle-solver might be problematic. Click here for more details.
- {multi_puzzle_solver-0.9.31.dist-info → multi_puzzle_solver-1.0.3.dist-info}/METADATA +335 -1
- multi_puzzle_solver-1.0.3.dist-info/RECORD +70 -0
- puzzle_solver/__init__.py +60 -1
- puzzle_solver/core/utils_ortools.py +8 -6
- puzzle_solver/core/utils_visualizer.py +12 -11
- puzzle_solver/puzzles/binairo/binairo.py +4 -4
- puzzle_solver/puzzles/black_box/black_box.py +5 -11
- puzzle_solver/puzzles/bridges/bridges.py +1 -1
- puzzle_solver/puzzles/chess_range/chess_range.py +3 -3
- puzzle_solver/puzzles/chess_range/chess_solo.py +1 -1
- puzzle_solver/puzzles/filling/filling.py +3 -3
- puzzle_solver/puzzles/flood_it/flood_it.py +174 -0
- puzzle_solver/puzzles/flood_it/parse_map/parse_map.py +198 -0
- puzzle_solver/puzzles/galaxies/galaxies.py +1 -1
- puzzle_solver/puzzles/galaxies/parse_map/parse_map.py +3 -3
- puzzle_solver/puzzles/guess/guess.py +1 -1
- puzzle_solver/puzzles/heyawake/heyawake.py +3 -3
- puzzle_solver/puzzles/inertia/inertia.py +1 -1
- puzzle_solver/puzzles/inertia/parse_map/parse_map.py +13 -10
- puzzle_solver/puzzles/inertia/tsp.py +5 -7
- puzzle_solver/puzzles/kakuro/kakuro.py +1 -1
- puzzle_solver/puzzles/keen/keen.py +2 -2
- puzzle_solver/puzzles/minesweeper/minesweeper.py +2 -3
- puzzle_solver/puzzles/nonograms/nonograms.py +3 -3
- puzzle_solver/puzzles/norinori/norinori.py +2 -2
- puzzle_solver/puzzles/nurikabe/nurikabe.py +2 -2
- puzzle_solver/puzzles/pipes/pipes.py +81 -0
- puzzle_solver/puzzles/range/range.py +1 -1
- puzzle_solver/puzzles/rectangles/rectangles.py +2 -6
- puzzle_solver/puzzles/shingoki/shingoki.py +1 -1
- puzzle_solver/puzzles/signpost/signpost.py +2 -2
- puzzle_solver/puzzles/slant/parse_map/parse_map.py +7 -5
- puzzle_solver/puzzles/slitherlink/slitherlink.py +1 -1
- puzzle_solver/puzzles/stitches/parse_map/parse_map.py +6 -5
- puzzle_solver/puzzles/stitches/stitches.py +1 -1
- puzzle_solver/puzzles/sudoku/sudoku.py +91 -20
- puzzle_solver/puzzles/tents/tents.py +2 -2
- puzzle_solver/puzzles/thermometers/thermometers.py +1 -1
- puzzle_solver/puzzles/towers/towers.py +1 -1
- puzzle_solver/puzzles/undead/undead.py +1 -1
- puzzle_solver/puzzles/unruly/unruly.py +1 -1
- puzzle_solver/puzzles/yin_yang/yin_yang.py +1 -1
- puzzle_solver/utils/visualizer.py +1 -1
- multi_puzzle_solver-0.9.31.dist-info/RECORD +0 -67
- {multi_puzzle_solver-0.9.31.dist-info → multi_puzzle_solver-1.0.3.dist-info}/WHEEL +0 -0
- {multi_puzzle_solver-0.9.31.dist-info → multi_puzzle_solver-1.0.3.dist-info}/top_level.txt +0 -0
|
@@ -56,13 +56,13 @@ class Board:
|
|
|
56
56
|
self.disallow_three_in_a_row(pos, Direction.RIGHT)
|
|
57
57
|
self.disallow_three_in_a_row(pos, Direction.DOWN)
|
|
58
58
|
|
|
59
|
-
# 3. Each row and column is unique.
|
|
59
|
+
# 3. Each row and column is unique.
|
|
60
60
|
if self.force_unique:
|
|
61
61
|
# a list per row
|
|
62
62
|
self.force_unique_double_list([[self.model_vars[pos] for pos in get_row_pos(row, self.H)] for row in range(self.V)])
|
|
63
63
|
# a list per column
|
|
64
64
|
self.force_unique_double_list([[self.model_vars[pos] for pos in get_col_pos(col, self.V)] for col in range(self.H)])
|
|
65
|
-
|
|
65
|
+
|
|
66
66
|
# if arithmetic is provided, add constraints for it
|
|
67
67
|
if self.arith_rows is not None:
|
|
68
68
|
assert self.arith_rows.shape == (self.V, self.H-1), f'arith_rows must be one column less than board, got {self.arith_rows.shape} for {self.board.shape}'
|
|
@@ -106,10 +106,10 @@ class Board:
|
|
|
106
106
|
|
|
107
107
|
codes = []
|
|
108
108
|
pow2 = [1 << k for k in range(m)] # weights for bit positions (LSB at index 0)
|
|
109
|
-
for i,
|
|
109
|
+
for i, line in enumerate(model_vars):
|
|
110
110
|
code = self.model.NewIntVar(0, (1 << m) - 1, f"code_{i}")
|
|
111
111
|
# Sum 2^k * r[k] == code
|
|
112
|
-
self.model.Add(code == sum(pow2[k] *
|
|
112
|
+
self.model.Add(code == sum(pow2[k] * line[k] for k in range(m)))
|
|
113
113
|
codes.append(code)
|
|
114
114
|
|
|
115
115
|
self.model.AddAllDifferent(codes)
|
|
@@ -50,7 +50,7 @@ class Board:
|
|
|
50
50
|
self.right_values = right
|
|
51
51
|
self.bottom_values = bottom
|
|
52
52
|
self.left_values = left
|
|
53
|
-
|
|
53
|
+
|
|
54
54
|
self.model = cp_model.CpModel()
|
|
55
55
|
self.ball_states: dict[Pos, cp_model.IntVar] = {}
|
|
56
56
|
# (entry_pos, T, cell_pos, direction) -> True if the beam that entered from the board at "entry_pos" is present in "cell_pos" and is going in the direction "direction" at time T
|
|
@@ -86,7 +86,7 @@ class Board:
|
|
|
86
86
|
for cell in self.get_all_pos_extended():
|
|
87
87
|
for direction in Direction:
|
|
88
88
|
self.beam_states[(entry_pos, t, cell, direction)] = self.model.NewBoolVar(f'beam:{entry_pos}:{t}:{cell}:{direction}')
|
|
89
|
-
|
|
89
|
+
|
|
90
90
|
for (entry_pos, t, cell, direction) in self.beam_states.keys():
|
|
91
91
|
if t not in self.beam_states_at_t:
|
|
92
92
|
self.beam_states_at_t[t] = {}
|
|
@@ -110,7 +110,7 @@ class Board:
|
|
|
110
110
|
beam_ids.extend((beam_id, Direction.LEFT) for beam_id in self.right_cells)
|
|
111
111
|
beam_ids.extend((beam_id, Direction.UP) for beam_id in self.bottom_cells)
|
|
112
112
|
beam_ids.extend((beam_id, Direction.RIGHT) for beam_id in self.left_cells)
|
|
113
|
-
|
|
113
|
+
|
|
114
114
|
for (beam_id, direction) in beam_ids:
|
|
115
115
|
# beam at t=0 is present at beam_id and facing direction
|
|
116
116
|
self.model.Add(self.beam_states[(beam_id, 0, beam_id, direction)] == 1)
|
|
@@ -189,7 +189,7 @@ class Board:
|
|
|
189
189
|
else:
|
|
190
190
|
ball_right = False
|
|
191
191
|
ball_right_not = True
|
|
192
|
-
|
|
192
|
+
|
|
193
193
|
pos_left = get_next_pos(cur_pos, direction_left)
|
|
194
194
|
pos_right = get_next_pos(cur_pos, direction_right)
|
|
195
195
|
pos_reflected = get_next_pos(cur_pos, reflected)
|
|
@@ -304,10 +304,4 @@ class Board:
|
|
|
304
304
|
ball_state = 'O' if single_res.assignment[pos] else ' '
|
|
305
305
|
res[pos.y][pos.x] = ball_state
|
|
306
306
|
print(res)
|
|
307
|
-
|
|
308
|
-
# print('non unique count:', count)
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
307
|
+
generic_solve_all(self, board_to_solution, callback=callback if verbose else None, verbose=verbose)
|
|
@@ -73,7 +73,7 @@ class Board:
|
|
|
73
73
|
xhoriz_min = min(horiz_bridge[0].x, horiz_bridge[1].x)
|
|
74
74
|
xhoriz_max = max(horiz_bridge[0].x, horiz_bridge[1].x)
|
|
75
75
|
yhoriz = horiz_bridge[0].y
|
|
76
|
-
|
|
76
|
+
|
|
77
77
|
# no equals because thats what the puzzle says
|
|
78
78
|
x_contained = xhoriz_min < xvert < xhoriz_max
|
|
79
79
|
y_contained = yvert_min < yhoriz < yvert_max
|
|
@@ -173,11 +173,11 @@ class Board:
|
|
|
173
173
|
self.H = 8 # board size
|
|
174
174
|
# the puzzle rules mean the only legal positions are the starting positions of the pieces
|
|
175
175
|
self.all_legal_positions: set[Pos] = {pos for _, pos in self.pieces.values()}
|
|
176
|
-
assert len(self.all_legal_positions) == len(self.pieces),
|
|
176
|
+
assert len(self.all_legal_positions) == len(self.pieces), 'positions are not unique'
|
|
177
177
|
|
|
178
178
|
self.model = cp_model.CpModel()
|
|
179
179
|
# Input numbers: N is number of piece, T is number of time steps (=N here), B is board size (=N here because the only legal positions are the starting positions of the pieces):
|
|
180
|
-
# Number of variables
|
|
180
|
+
# Number of variables
|
|
181
181
|
# piece_positions: O(NTB)
|
|
182
182
|
# is_dead: O(NT)
|
|
183
183
|
# mover: O(NT)
|
|
@@ -341,7 +341,7 @@ class Board:
|
|
|
341
341
|
for t in range(self.T - 1):
|
|
342
342
|
self.model.AddExactlyOne([self.victim[(p, t)] for p in range(self.N)])
|
|
343
343
|
|
|
344
|
-
# optional parameter to force
|
|
344
|
+
# optional parameter to force
|
|
345
345
|
if self.max_moves_per_piece is not None:
|
|
346
346
|
for p in range(self.N):
|
|
347
347
|
self.model.Add(sum([self.mover[(p, t)] for t in range(self.T - 1)]) <= self.max_moves_per_piece)
|
|
@@ -4,6 +4,6 @@ from .chess_range import PieceType
|
|
|
4
4
|
class Board(RangeBoard):
|
|
5
5
|
def __init__(self, pieces: list[str]):
|
|
6
6
|
king_pieces = [p for p in range(len(pieces)) if pieces[p][0] == 'K']
|
|
7
|
-
assert len(king_pieces) == 1,
|
|
7
|
+
assert len(king_pieces) == 1, 'exactly one king piece is required'
|
|
8
8
|
super().__init__(pieces, max_moves_per_piece=2, last_piece_alive=PieceType.KING)
|
|
9
9
|
|
|
@@ -3,8 +3,8 @@ from dataclasses import dataclass
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
from ortools.sat.python import cp_model
|
|
5
5
|
|
|
6
|
-
from puzzle_solver.core.utils import Pos,
|
|
7
|
-
from puzzle_solver.core.utils_ortools import generic_solve_all, SingleSolution
|
|
6
|
+
from puzzle_solver.core.utils import Pos, get_all_pos, get_char, set_char, polyominoes, in_bounds, get_next_pos, Direction
|
|
7
|
+
from puzzle_solver.core.utils_ortools import generic_solve_all, SingleSolution
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
@dataclass
|
|
@@ -85,7 +85,7 @@ class Board:
|
|
|
85
85
|
# exactly one shape is active at that position
|
|
86
86
|
self.model.AddExactlyOne(s.is_active for d in self.digits for s in self.body_loc_to_shape[(d,pos)])
|
|
87
87
|
# if a shape is active then all its body is active
|
|
88
|
-
|
|
88
|
+
|
|
89
89
|
for s_list in self.body_loc_to_shape.values():
|
|
90
90
|
for s in s_list:
|
|
91
91
|
for p in s.body:
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import time
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
from ortools.sat.python import cp_model
|
|
8
|
+
from ortools.sat.python.cp_model import LinearExpr as lxp
|
|
9
|
+
|
|
10
|
+
from puzzle_solver.core.utils import Pos, get_all_pos, get_neighbors4, get_char
|
|
11
|
+
from puzzle_solver.core.utils_ortools import generic_solve_all, SingleSolution
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Board:
|
|
15
|
+
def __init__(self, nodes: dict[int, int], edges: dict[int, set[int]], horizon: int, start_node_id: int):
|
|
16
|
+
self.T = horizon
|
|
17
|
+
self.nodes = nodes
|
|
18
|
+
self.edges = edges
|
|
19
|
+
self.start_node_id = start_node_id
|
|
20
|
+
self.K = len(set(nodes.values()))
|
|
21
|
+
|
|
22
|
+
self.model = cp_model.CpModel()
|
|
23
|
+
self.decision: dict[tuple[int, int], cp_model.IntVar] = {} # (t, k)
|
|
24
|
+
self.connected: dict[tuple[int, int], cp_model.IntVar] = {} # (t, cluster_id)
|
|
25
|
+
|
|
26
|
+
self.create_vars()
|
|
27
|
+
self.add_all_constraints()
|
|
28
|
+
|
|
29
|
+
def create_vars(self):
|
|
30
|
+
for t in range(self.T - 1): # (N-1) actions (we dont need to decide at time N)
|
|
31
|
+
for k in range(self.K):
|
|
32
|
+
self.decision[t, k] = self.model.NewBoolVar(f'decision:{t}:{k}')
|
|
33
|
+
for t in range(self.T):
|
|
34
|
+
for cluster_id in self.nodes:
|
|
35
|
+
self.connected[t, cluster_id] = self.model.NewBoolVar(f'connected:{t}:{cluster_id}')
|
|
36
|
+
|
|
37
|
+
def add_all_constraints(self):
|
|
38
|
+
# init time t=0, all clusters are not connected except start_node
|
|
39
|
+
for cluster_id in self.nodes:
|
|
40
|
+
if cluster_id == self.start_node_id:
|
|
41
|
+
self.model.Add(self.connected[0, cluster_id] == 1)
|
|
42
|
+
else:
|
|
43
|
+
self.model.Add(self.connected[0, cluster_id] == 0)
|
|
44
|
+
# each timestep I will pick either one or zero colors
|
|
45
|
+
for t in range(self.T - 1):
|
|
46
|
+
# print('fixing decision at time t=', t, 'to single action with colors', self.K)
|
|
47
|
+
self.model.Add(lxp.sum([self.decision[t, k] for k in range(self.K)]) <= 1)
|
|
48
|
+
# at the end of the game, all clusters must be connected
|
|
49
|
+
for cluster_id in self.nodes:
|
|
50
|
+
self.model.Add(self.connected[self.T-1, cluster_id] == 1)
|
|
51
|
+
|
|
52
|
+
for t in range(1, self.T):
|
|
53
|
+
for cluster_id in self.nodes:
|
|
54
|
+
# connected[t, i] must be 0 if all connencted clusters at t-1 are 0 (thus connected[t, i] <= sum(connected[t-1, j] for j in touching)
|
|
55
|
+
sum_neighbors = lxp.sum([self.connected[t-1, j] for j in self.edges[cluster_id]]) + self.connected[t-1, cluster_id]
|
|
56
|
+
self.model.Add(self.connected[t, cluster_id] <= sum_neighbors)
|
|
57
|
+
# connected[t, i] must be 0 if color chosen at time t does not match color of cluster i and not connected at t-1
|
|
58
|
+
cluster_color = self.nodes[cluster_id]
|
|
59
|
+
self.model.Add(self.connected[t, cluster_id] == 0).OnlyEnforceIf([self.decision[t-1, cluster_color].Not(), self.connected[t-1, cluster_id].Not()])
|
|
60
|
+
self.model.Add(self.connected[t, cluster_id] == 1).OnlyEnforceIf([self.connected[t-1, cluster_id]])
|
|
61
|
+
|
|
62
|
+
pairs = [(self.decision[t, k], t+1) for t in range(self.T - 1) for k in range(self.K)]
|
|
63
|
+
self.model.Minimize(lxp.weighted_sum([p[0] for p in pairs], [p[1] for p in pairs]))
|
|
64
|
+
|
|
65
|
+
def solve(self) -> list[SingleSolution]:
|
|
66
|
+
def board_to_solution(board: Board, solver: cp_model.CpSolverSolutionCallback) -> SingleSolution:
|
|
67
|
+
assignment: list[str] = [None for _ in range(self.T - 1)]
|
|
68
|
+
for t in range(self.T - 1):
|
|
69
|
+
for k in range(self.K):
|
|
70
|
+
if solver.Value(self.decision[t, k]) == 1:
|
|
71
|
+
assignment[t] = k
|
|
72
|
+
break
|
|
73
|
+
return SingleSolution(assignment=assignment)
|
|
74
|
+
return generic_solve_all(self, board_to_solution, verbose=False, max_solutions=1)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def solve_minimum_steps(board: np.array, start_pos: Optional[Pos] = None, verbose: bool = True) -> int:
|
|
78
|
+
tic = time.time()
|
|
79
|
+
all_colors: set[str] = {c.item().strip() for c in np.nditer(board) if c.item().strip()}
|
|
80
|
+
color_to_int: dict[str, int] = {c: i for i, c in enumerate(sorted(all_colors))} # colors string to color id
|
|
81
|
+
int_to_color: dict[int, str] = {i: c for c, i in color_to_int.items()}
|
|
82
|
+
|
|
83
|
+
graph: dict[Pos, int] = _board_to_graph(board) # position to cluster id
|
|
84
|
+
nodes: dict[int, int] = {cluster_id: color_to_int[get_char(board, pos)] for pos, cluster_id in graph.items()}
|
|
85
|
+
edges = _graph_to_edges(board, graph) # cluster id to touching cluster ids
|
|
86
|
+
if start_pos is None:
|
|
87
|
+
start_pos = Pos(0,0)
|
|
88
|
+
|
|
89
|
+
def solution_int_to_str(solution: SingleSolution):
|
|
90
|
+
return [int_to_color.get(color_id, '?') for color_id in solution.assignment]
|
|
91
|
+
|
|
92
|
+
def print_solution(solution: SingleSolution):
|
|
93
|
+
solution = solution_int_to_str(solution)
|
|
94
|
+
print("Solution:", solution)
|
|
95
|
+
solution = _binary_search_solution(nodes, edges, graph[start_pos], callback=print_solution if verbose else None, verbose=verbose)
|
|
96
|
+
if verbose:
|
|
97
|
+
if solution is None:
|
|
98
|
+
print("No solution found")
|
|
99
|
+
else:
|
|
100
|
+
solution = solution_int_to_str(solution)
|
|
101
|
+
print(f"Best Horizon is: T={len(solution)}")
|
|
102
|
+
print("Best solution is:", solution)
|
|
103
|
+
toc = time.time()
|
|
104
|
+
print(f"Time taken: {toc - tic:.2f} seconds")
|
|
105
|
+
return solution
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _board_to_graph(board: np.array) -> dict[int, set[int]]:
|
|
109
|
+
def dfs_flood(board: np.array, pos: Pos, cluster_id: int, graph: dict[Pos, int]):
|
|
110
|
+
if pos in graph:
|
|
111
|
+
return
|
|
112
|
+
graph[pos] = cluster_id
|
|
113
|
+
for neighbor in get_neighbors4(pos, board.shape[0], board.shape[1]):
|
|
114
|
+
if get_char(board, neighbor) == get_char(board, pos):
|
|
115
|
+
dfs_flood(board, neighbor, cluster_id, graph)
|
|
116
|
+
graph: dict[Pos, int] = {}
|
|
117
|
+
cluster_id = 0
|
|
118
|
+
V, H = board.shape
|
|
119
|
+
for pos in get_all_pos(V, H):
|
|
120
|
+
if pos in graph:
|
|
121
|
+
continue
|
|
122
|
+
dfs_flood(board, pos, cluster_id, graph)
|
|
123
|
+
cluster_id += 1
|
|
124
|
+
return graph
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def _graph_to_edges(board: np.array, graph: dict[Pos, int]) -> dict[int, set[int]]:
|
|
128
|
+
cluster_edges: dict[int, set[int]] = defaultdict(set)
|
|
129
|
+
V, H = board.shape
|
|
130
|
+
for pos in get_all_pos(V, H):
|
|
131
|
+
for neighbor in get_neighbors4(pos, V, H):
|
|
132
|
+
n1, n2 = graph[pos], graph[neighbor]
|
|
133
|
+
if n1 != n2:
|
|
134
|
+
cluster_edges[n1].add(n2)
|
|
135
|
+
cluster_edges[n2].add(n1)
|
|
136
|
+
return cluster_edges
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _binary_search_solution(nodes, edges, start_node_id, callback, verbose: bool = True):
|
|
140
|
+
if len(nodes) <= 1:
|
|
141
|
+
return SingleSolution(assignment=[])
|
|
142
|
+
min_T = 2
|
|
143
|
+
max_T = len(nodes)
|
|
144
|
+
hist = {} # record historical T and best solution
|
|
145
|
+
while min_T <= max_T:
|
|
146
|
+
if max_T - min_T <= 20: # small gap, just take the middle
|
|
147
|
+
T = min_T + (max_T - min_T) // 2
|
|
148
|
+
else: # large gap, just +5 the min to not go too far
|
|
149
|
+
T = min_T + 15
|
|
150
|
+
# main check for binary search
|
|
151
|
+
if T in hist: # already done and found solution
|
|
152
|
+
solutions = hist[T]
|
|
153
|
+
else:
|
|
154
|
+
if verbose:
|
|
155
|
+
print(f"Trying with exactly {T-1} moves...", end='')
|
|
156
|
+
sys.stdout.flush()
|
|
157
|
+
binst = Board(nodes=nodes, edges=edges, horizon=T, start_node_id=start_node_id)
|
|
158
|
+
solutions = binst.solve()
|
|
159
|
+
if verbose:
|
|
160
|
+
print(' Possible!' if len(solutions) > 0 else ' Not possible!')
|
|
161
|
+
if len(solutions) > 0:
|
|
162
|
+
callback(solutions[0])
|
|
163
|
+
if min_T == max_T:
|
|
164
|
+
hist[T] = solutions
|
|
165
|
+
break
|
|
166
|
+
if len(solutions) > 0:
|
|
167
|
+
hist[T] = solutions
|
|
168
|
+
max_T = T
|
|
169
|
+
else:
|
|
170
|
+
min_T = T + 1
|
|
171
|
+
best_solution = min(hist.items(), key=lambda x: x[0])[1][0]
|
|
172
|
+
return best_solution
|
|
173
|
+
|
|
174
|
+
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This file is a simple helper that parses the images from https://www.chiark.greenend.org.uk and converts them to a json file.
|
|
3
|
+
Look at the ./input_output/ directory for examples of input images and output json files.
|
|
4
|
+
The output json is used in the test_solve.py file to test the solver.
|
|
5
|
+
"""
|
|
6
|
+
# import json
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
import numpy as np
|
|
9
|
+
cv = None
|
|
10
|
+
Image = None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def extract_lines(bw):
|
|
14
|
+
# Create the images that will use to extract the horizontal and vertical lines
|
|
15
|
+
horizontal = np.copy(bw)
|
|
16
|
+
vertical = np.copy(bw)
|
|
17
|
+
|
|
18
|
+
cols = horizontal.shape[1]
|
|
19
|
+
horizontal_size = cols // 20
|
|
20
|
+
# Create structure element for extracting horizontal lines through morphology operations
|
|
21
|
+
horizontalStructure = cv.getStructuringElement(cv.MORPH_RECT, (horizontal_size, 1))
|
|
22
|
+
horizontal = cv.erode(horizontal, horizontalStructure)
|
|
23
|
+
horizontal = cv.dilate(horizontal, horizontalStructure)
|
|
24
|
+
horizontal_means = np.mean(horizontal, axis=1)
|
|
25
|
+
horizontal_cutoff = np.percentile(horizontal_means, 50)
|
|
26
|
+
# location where the horizontal lines are
|
|
27
|
+
horizontal_idx = np.where(horizontal_means > horizontal_cutoff)[0]
|
|
28
|
+
# print(f"horizontal_idx: {horizontal_idx}")
|
|
29
|
+
# height = len(horizontal_idx)
|
|
30
|
+
# show_wait_destroy("horizontal", horizontal) # this has the horizontal lines
|
|
31
|
+
|
|
32
|
+
rows = vertical.shape[0]
|
|
33
|
+
verticalsize = rows // 20
|
|
34
|
+
# Create structure element for extracting vertical lines through morphology operations
|
|
35
|
+
verticalStructure = cv.getStructuringElement(cv.MORPH_RECT, (1, verticalsize))
|
|
36
|
+
vertical = cv.erode(vertical, verticalStructure)
|
|
37
|
+
vertical = cv.dilate(vertical, verticalStructure)
|
|
38
|
+
vertical_means = np.mean(vertical, axis=0)
|
|
39
|
+
vertical_cutoff = np.percentile(vertical_means, 50)
|
|
40
|
+
vertical_idx = np.where(vertical_means > vertical_cutoff)[0]
|
|
41
|
+
# print(f"vertical_idx: {vertical_idx}")
|
|
42
|
+
# width = len(vertical_idx)
|
|
43
|
+
# print(f"height: {height}, width: {width}")
|
|
44
|
+
# print(f"vertical_means: {vertical_means}")
|
|
45
|
+
# show_wait_destroy("vertical", vertical) # this has the vertical lines
|
|
46
|
+
|
|
47
|
+
vertical = cv.bitwise_not(vertical)
|
|
48
|
+
# show_wait_destroy("vertical_bit", vertical)
|
|
49
|
+
|
|
50
|
+
return horizontal_idx, vertical_idx
|
|
51
|
+
|
|
52
|
+
def show_wait_destroy(winname, img):
|
|
53
|
+
cv.imshow(winname, img)
|
|
54
|
+
cv.moveWindow(winname, 500, 0)
|
|
55
|
+
cv.waitKey(0)
|
|
56
|
+
cv.destroyWindow(winname)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def mean_consecutives(arr: np.ndarray) -> np.ndarray:
|
|
60
|
+
"""if a sequence of values is consecutive, then average the values"""
|
|
61
|
+
sums = []
|
|
62
|
+
counts = []
|
|
63
|
+
for i in range(len(arr)):
|
|
64
|
+
if i == 0:
|
|
65
|
+
sums.append(arr[i])
|
|
66
|
+
counts.append(1)
|
|
67
|
+
elif arr[i] == arr[i-1] + 1:
|
|
68
|
+
sums[-1] += arr[i]
|
|
69
|
+
counts[-1] += 1
|
|
70
|
+
else:
|
|
71
|
+
sums.append(arr[i])
|
|
72
|
+
counts.append(1)
|
|
73
|
+
return np.array(sums) // np.array(counts)
|
|
74
|
+
|
|
75
|
+
def main(image):
|
|
76
|
+
global Image
|
|
77
|
+
global cv
|
|
78
|
+
import matplotlib.pyplot as plt
|
|
79
|
+
from PIL import Image as Image_module
|
|
80
|
+
import cv2 as cv_module
|
|
81
|
+
Image = Image_module
|
|
82
|
+
cv = cv_module
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
image_path = Path(image)
|
|
86
|
+
output_path = image_path.parent / (image_path.stem + '.json')
|
|
87
|
+
src = cv.imread(image, cv.IMREAD_COLOR)
|
|
88
|
+
assert src is not None, f'Error opening image: {image}'
|
|
89
|
+
if len(src.shape) != 2:
|
|
90
|
+
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
|
|
91
|
+
else:
|
|
92
|
+
gray = src
|
|
93
|
+
# now the image is in grayscale
|
|
94
|
+
|
|
95
|
+
# Apply adaptiveThreshold at the bitwise_not of gray, notice the ~ symbol
|
|
96
|
+
gray = cv.bitwise_not(gray)
|
|
97
|
+
bw = cv.adaptiveThreshold(gray.copy(), 255, cv.ADAPTIVE_THRESH_MEAN_C, \
|
|
98
|
+
cv.THRESH_BINARY, 15, -2)
|
|
99
|
+
# show_wait_destroy("binary", bw)
|
|
100
|
+
|
|
101
|
+
# show_wait_destroy("src", src)
|
|
102
|
+
horizontal_idx, vertical_idx = extract_lines(bw)
|
|
103
|
+
horizontal_idx = mean_consecutives(horizontal_idx)
|
|
104
|
+
vertical_idx = mean_consecutives(vertical_idx)
|
|
105
|
+
median_vertical_dist = np.median(np.diff(vertical_idx))
|
|
106
|
+
median_horizontal_dist = np.median(np.diff(horizontal_idx))
|
|
107
|
+
print(f"median_vertical_dist: {median_vertical_dist}, median_horizontal_dist: {median_horizontal_dist}")
|
|
108
|
+
height = len(horizontal_idx)
|
|
109
|
+
width = len(vertical_idx)
|
|
110
|
+
print(f"height: {height}, width: {width}")
|
|
111
|
+
print(f"horizontal_idx: {horizontal_idx}")
|
|
112
|
+
print(f"vertical_idx: {vertical_idx}")
|
|
113
|
+
output_rgb = {}
|
|
114
|
+
j_idx = 0
|
|
115
|
+
for j in range(height - 1):
|
|
116
|
+
i_idx = 0
|
|
117
|
+
for i in range(width - 1):
|
|
118
|
+
hidx1, hidx2 = horizontal_idx[j], horizontal_idx[j+1]
|
|
119
|
+
vidx1, vidx2 = vertical_idx[i], vertical_idx[i+1]
|
|
120
|
+
hidx1 = max(0, hidx1 - 2)
|
|
121
|
+
hidx2 = min(src.shape[0], hidx2 + 4)
|
|
122
|
+
vidx1 = max(0, vidx1 - 2)
|
|
123
|
+
vidx2 = min(src.shape[1], vidx2 + 4)
|
|
124
|
+
if (hidx2 - hidx1) < median_horizontal_dist * 0.5 or (vidx2 - vidx1) < median_vertical_dist * 0.5:
|
|
125
|
+
continue
|
|
126
|
+
cell = src[hidx1:hidx2, vidx1:vidx2]
|
|
127
|
+
mid_x = cell.shape[1] // 2
|
|
128
|
+
mid_y = cell.shape[0] // 2
|
|
129
|
+
print(f"mid_x: {mid_x}, mid_y: {mid_y}")
|
|
130
|
+
cell_50_percent = cell[int(mid_y*0.5):int(mid_y*1.5), int(mid_x*0.5):int(mid_x*1.5)]
|
|
131
|
+
# show_wait_destroy(f"cell_{i_idx}_{j_idx}", cell_50_percent)
|
|
132
|
+
output_rgb[j_idx, i_idx] = cell_50_percent.mean(axis=(0, 1))
|
|
133
|
+
print(f"output_rgb[{j_idx}, {i_idx}]: {output_rgb[j_idx, i_idx]}")
|
|
134
|
+
i_idx += 1
|
|
135
|
+
j_idx += 1
|
|
136
|
+
|
|
137
|
+
colors_to_cluster = cluster_colors(output_rgb)
|
|
138
|
+
width = max(pos[1] for pos in output_rgb.keys()) + 1
|
|
139
|
+
height = max(pos[0] for pos in output_rgb.keys()) + 1
|
|
140
|
+
out = np.zeros((height, width), dtype=object)
|
|
141
|
+
print(colors_to_cluster)
|
|
142
|
+
for pos, cluster_id in colors_to_cluster.items():
|
|
143
|
+
out[pos[0], pos[1]] = cluster_id
|
|
144
|
+
print('Shape of out:', out.shape)
|
|
145
|
+
|
|
146
|
+
with open(output_path, 'w') as f:
|
|
147
|
+
f.write('[\n')
|
|
148
|
+
for i, row in enumerate(out):
|
|
149
|
+
f.write(' ' + str(row.tolist()).replace("'", '"'))
|
|
150
|
+
if i != len(out) - 1:
|
|
151
|
+
f.write(',')
|
|
152
|
+
f.write('\n')
|
|
153
|
+
f.write(']')
|
|
154
|
+
print('output json: ', output_path)
|
|
155
|
+
|
|
156
|
+
def euclidean_distance(a: tuple[int, int, int], b: tuple[int, int, int]) -> int:
|
|
157
|
+
return ((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2 + (a[2] - b[2]) ** 2) ** 0.5
|
|
158
|
+
|
|
159
|
+
KNOWN_COLORS = {
|
|
160
|
+
(0, 0, 255): 'Red',
|
|
161
|
+
(0, 255, 0): 'Green',
|
|
162
|
+
(255, 77, 51): 'Blue',
|
|
163
|
+
(0, 255, 255): 'Yellow',
|
|
164
|
+
(255, 153, 255): 'Pink',
|
|
165
|
+
(0, 128, 255): 'Orange',
|
|
166
|
+
(255, 204, 102): 'Cyan',
|
|
167
|
+
(179, 255, 179): 'Washed Green',
|
|
168
|
+
(77, 77, 128): 'Brown',
|
|
169
|
+
(179, 0, 128): 'Purple',
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
def cluster_colors(rgb: dict[tuple[int, int], tuple[int, int, int]]) -> dict[tuple[int, int, int], int]:
|
|
173
|
+
MIN_DIST = 10 # if distance between two colors is less than this, then they are the same color
|
|
174
|
+
colors_to_cluster = KNOWN_COLORS.copy()
|
|
175
|
+
for pos, color in rgb.items():
|
|
176
|
+
color = tuple(color)
|
|
177
|
+
if color in colors_to_cluster:
|
|
178
|
+
continue
|
|
179
|
+
for existing_color, existing_cluster_id in colors_to_cluster.items():
|
|
180
|
+
if euclidean_distance(color, existing_color) < MIN_DIST:
|
|
181
|
+
colors_to_cluster[color] = existing_cluster_id
|
|
182
|
+
break
|
|
183
|
+
else:
|
|
184
|
+
new_name = str(', '.join(str(int(c)) for c in color))
|
|
185
|
+
print('WARNING: new color found:', new_name, 'at pos:', pos)
|
|
186
|
+
colors_to_cluster[color] = new_name
|
|
187
|
+
pos_to_cluster = {pos: colors_to_cluster[tuple(color)] for pos, color in rgb.items()}
|
|
188
|
+
return pos_to_cluster
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
if __name__ == '__main__':
|
|
192
|
+
# to run this script and visualize the output, in the root run:
|
|
193
|
+
# python .\src\puzzle_solver\puzzles\flood_it\parse_map\parse_map.py | python .\src\puzzle_solver\utils\visualizer.py --read_stdin
|
|
194
|
+
# main(Path(__file__).parent / 'input_output' / 'flood.html#12x12c10m5%23637467359431429.png')
|
|
195
|
+
# main(Path(__file__).parent / 'input_output' / 'flood.html#12x12c6m5%23132018455881870.png')
|
|
196
|
+
# main(Path(__file__).parent / 'input_output' / 'flood.html#12x12c6m0%23668276603006993.png')
|
|
197
|
+
# main(Path(__file__).parent / 'input_output' / 'flood.html#20x20c8m0%23991967486182787.png')flood.html#20x20c4m0%23690338575695152
|
|
198
|
+
main(Path(__file__).parent / 'input_output' / 'flood.html#20x20c4m0%23690338575695152.png')
|
|
@@ -85,7 +85,7 @@ class Board:
|
|
|
85
85
|
self.model.AddExactlyOne(pos_vars)
|
|
86
86
|
for galaxy_idx, v in self.pos_to_galaxy[pos].items():
|
|
87
87
|
galaxy_vars.setdefault(galaxy_idx, {})[pos] = v
|
|
88
|
-
for
|
|
88
|
+
for pos_vars in galaxy_vars.values():
|
|
89
89
|
force_connected_component(self.model, pos_vars)
|
|
90
90
|
|
|
91
91
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
"""
|
|
2
|
-
This file is a simple helper that parses the images from https://www.chiark.greenend.org.uk/~sgtatham/puzzles/js/galaxies.html and converts them to a json file.
|
|
2
|
+
This file is a simple helper that parses the images from https://www.chiark.greenend.org.uk/~sgtatham/puzzles/js/galaxies.html and converts them to a json file.
|
|
3
3
|
Look at the ./input_output/ directory for examples of input images and output json files.
|
|
4
4
|
The output json is used in the test_solve.py file to test the solver.
|
|
5
5
|
"""
|
|
@@ -25,7 +25,7 @@ def extract_lines(bw):
|
|
|
25
25
|
# location where the horizontal lines are
|
|
26
26
|
horizontal_idx = np.where(horizontal_means > horizontal_cutoff)[0]
|
|
27
27
|
# print(f"horizontal_idx: {horizontal_idx}")
|
|
28
|
-
height = len(horizontal_idx)
|
|
28
|
+
# height = len(horizontal_idx)
|
|
29
29
|
# show_wait_destroy("horizontal", horizontal) # this has the horizontal lines
|
|
30
30
|
|
|
31
31
|
rows = vertical.shape[0]
|
|
@@ -38,7 +38,7 @@ def extract_lines(bw):
|
|
|
38
38
|
vertical_cutoff = np.percentile(vertical_means, 50)
|
|
39
39
|
vertical_idx = np.where(vertical_means > vertical_cutoff)[0]
|
|
40
40
|
# print(f"vertical_idx: {vertical_idx}")
|
|
41
|
-
width = len(vertical_idx)
|
|
41
|
+
# width = len(vertical_idx)
|
|
42
42
|
# print(f"height: {height}, width: {width}")
|
|
43
43
|
# print(f"vertical_means: {vertical_means}")
|
|
44
44
|
# show_wait_destroy("vertical", vertical) # this has the vertical lines
|
|
@@ -5,7 +5,7 @@ import numpy as np
|
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class Board:
|
|
8
|
-
def __init__(self, num_pegs: int = 4, all_colors:
|
|
8
|
+
def __init__(self, num_pegs: int = 4, all_colors: tuple[str] = ('R', 'Y', 'G', 'B', 'O', 'P'), show_warnings: bool = True, show_progress: bool = False):
|
|
9
9
|
assert num_pegs >= 1, 'num_pegs must be at least 1'
|
|
10
10
|
assert len(all_colors) == len(set(all_colors)), 'all_colors must contain only unique colors'
|
|
11
11
|
self.previous_guesses = []
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
import numpy as np
|
|
2
2
|
from ortools.sat.python import cp_model
|
|
3
3
|
|
|
4
|
-
from puzzle_solver.core.utils import Pos, get_all_pos, get_neighbors4, get_pos,
|
|
4
|
+
from puzzle_solver.core.utils import Pos, get_all_pos, get_neighbors4, get_pos, get_char
|
|
5
5
|
from puzzle_solver.core.utils_ortools import generic_solve_all, SingleSolution, force_connected_component
|
|
6
6
|
from puzzle_solver.core.utils_visualizer import render_shaded_grid
|
|
7
7
|
|
|
8
|
+
|
|
8
9
|
def return_3_consecutives(int_list: list[int]) -> list[tuple[int, int]]:
|
|
9
|
-
"""Given a list of integers (mostly with duplicates), return every consecutive sequence of 3 integer changes.
|
|
10
|
+
"""Given a list of integers (mostly with duplicates), return every consecutive sequence of 3 integer changes.
|
|
10
11
|
i.e. return a list of (begin_idx, end_idx) tuples where for each r=int_list[begin_idx:end_idx] we have r[0]!=r[1] and r[-2]!=r[-1] and len(r)>=3"""
|
|
11
12
|
out = []
|
|
12
13
|
change_indices = [i for i in range(len(int_list) - 1) if int_list[i] != int_list[i+1]]
|
|
@@ -18,7 +19,6 @@ def return_3_consecutives(int_list: list[int]) -> list[tuple[int, int]]:
|
|
|
18
19
|
continue
|
|
19
20
|
out.append((begin_idx, end_idx))
|
|
20
21
|
return out
|
|
21
|
-
|
|
22
22
|
|
|
23
23
|
class Board:
|
|
24
24
|
def __init__(self, board: np.array, region_to_clue: dict[str, int]):
|
|
@@ -118,4 +118,4 @@ def solve_optimal_walk(
|
|
|
118
118
|
seed: int = 0,
|
|
119
119
|
verbose: bool = False
|
|
120
120
|
) -> list[tuple[Pos, Pos]]:
|
|
121
|
-
return tsp.solve_optimal_walk(start_pos, edges, gems_to_edges, restarts=restarts, time_limit_ms=time_limit_ms, seed=seed, verbose=verbose)
|
|
121
|
+
return tsp.solve_optimal_walk(start_pos, edges, gems_to_edges, restarts=restarts, time_limit_ms=time_limit_ms, seed=seed, verbose=verbose)
|
|
@@ -1,25 +1,24 @@
|
|
|
1
1
|
"""
|
|
2
|
-
This file is a simple helper that parses the images from https://www.chiark.greenend.org.uk/~sgtatham/puzzles/js/inertia.html and converts them to a json file.
|
|
2
|
+
This file is a simple helper that parses the images from https://www.chiark.greenend.org.uk/~sgtatham/puzzles/js/inertia.html and converts them to a json file.
|
|
3
3
|
Look at the ./input_output/ directory for examples of input images and output json files.
|
|
4
4
|
The output json is used in the test_solve.py file to test the solver.
|
|
5
5
|
"""
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
import numpy as np
|
|
8
|
-
import numpy as np
|
|
9
8
|
cv = None
|
|
10
9
|
Image = None
|
|
11
10
|
|
|
12
11
|
def load_cell_templates(p: Path) -> dict[str, dict]:
|
|
13
|
-
img = Image.open(p)
|
|
12
|
+
# img = Image.open(p)
|
|
14
13
|
src = cv.imread(p, cv.IMREAD_COLOR)
|
|
15
|
-
rgb = np.asarray(img).astype(np.float32) / 255.0
|
|
14
|
+
# rgb = np.asarray(img).astype(np.float32) / 255.0
|
|
16
15
|
if len(src.shape) != 2:
|
|
17
16
|
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
|
|
18
17
|
else:
|
|
19
18
|
gray = src
|
|
20
19
|
gray = cv.bitwise_not(gray)
|
|
21
|
-
bw = cv.adaptiveThreshold(gray.copy(), 255, cv.ADAPTIVE_THRESH_MEAN_C, \
|
|
22
|
-
|
|
20
|
+
# bw = cv.adaptiveThreshold(gray.copy(), 255, cv.ADAPTIVE_THRESH_MEAN_C, \
|
|
21
|
+
# cv.THRESH_BINARY, 15, -2)
|
|
23
22
|
return {"gray": gray}
|
|
24
23
|
|
|
25
24
|
|
|
@@ -53,10 +52,14 @@ def get_distance_robust(cell: np.ndarray, template: np.ndarray, max_shift: int =
|
|
|
53
52
|
for dy in range(-max_shift, max_shift + 1):
|
|
54
53
|
for dx in range(-max_shift, max_shift + 1):
|
|
55
54
|
# compute overlapping slices for this shift
|
|
56
|
-
y0a = max(0, dy)
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
55
|
+
y0a = max(0, dy)
|
|
56
|
+
y1a = H + min(0, dy)
|
|
57
|
+
x0a = max(0, dx)
|
|
58
|
+
x1a = W + min(0, dx)
|
|
59
|
+
y0b = max(0, -dy)
|
|
60
|
+
y1b = H + min(0, -dy)
|
|
61
|
+
x0b = max(0, -dx)
|
|
62
|
+
x1b = W + min(0, -dx)
|
|
60
63
|
|
|
61
64
|
if y1a <= y0a or x1a <= x0a: # no overlap
|
|
62
65
|
continue
|