I'm using bfs and iddfs to find the optimal solution of the 8 tile puzzle, however, my IDDFS is missing solutions and i'm not sure why. ive checked and it seems that every node visits all it's sons, atleast in the first iteration, so i'm not sure what the source of the problem might be. with the tiles arranged as [1,4,5,0,8,2,3,6,7] my iddfs does not seem to find the correct solution at the appropriate depth, and stumbles upon another solution later: bfs solution path: [1, 4, 5, 2, 8, 5, 4, 1, 3, 6, 7, 8, 5, 4, 1]
iddfs solution path: [8, 2, 5, 4, 1, 8, 2, 1, 8, 2, 1, 8, 4, 5, 8, 4, 2, 1, 3, 6, 7, 8, 5, 2, 1]
note that my iddfs k (depth) is increased by intervals of 1, so i should have found the optimal solution at the same depth my bfs did, meaning at the earliest possible depth. i'm adding the iddfs code and the rest of the code needed to run it below:
# helper method for the iddfs, solves a dfs to a limited depth k
def dfs_solve_k(self, node, depth):
if depth < 0:
return False
self.total_count += 1
board_tuple = tuple(tuple(row) for row in node.board_array)
self.explored.add(board_tuple)
if node.check_end_state():
return True
# Iterate over possible swap positions in a specific order
for swappable_pos in node.possible_swap_positions:
tracker = node.board_array[swappable_pos.i][swappable_pos.j]
# Perform swap to reach a new board
new_board = node.swap(swappable_pos)
new_board_tuple = tuple(tuple(row) for row in new_board)
# Check if the new board state has not been explored before
if new_board_tuple not in self.explored:
new_node = StateNode(new_board, swappable_pos)
is_solved = self.dfs_solve_k(new_node, depth - 1)
if is_solved:
self.path.append(tracker)
return True
return False
# iterative deepening dfs
def iddfs(self, node, max_depth):
search_depth = 1
while search_depth <= max_depth:
self.explored.clear()
self.path = []
is_solved = self.dfs_solve_k(node, search_depth)
if is_solved:
self.path = self.path[::-1]
return True
else:
search_depth += 1
return False
additional code for testing:
rows = 3
columns = 3
goal_state = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
class Position:
def __init__(self, i, j):
self.i = i
self.j = j
class Directions(Enum):
LEFT = Position(0, -1)
RIGHT = Position(0, 1)
UP = Position(-1, 0)
DOWN = Position(1, 0)
def get_inv_count(board):
inv_count = 0
empty_value = 0
n = len(board)
for i in range(n * n):
for j in range(i + 1, n * n):
row_i, col_i = divmod(i, n)
row_j, col_j = divmod(j, n)
val_i = board[row_i][col_i]
val_j = board[row_j][col_j]
if val_i != empty_value and val_j != empty_value and val_i > val_j:
inv_count += 1
return inv_count
# This function returns true if the given puzzle is solvable.
def is_solvable(puzzle):
# Count inversions in the given puzzle
inv_count = get_inv_count(puzzle)
# Return true if the inversion count is even.
return inv_count % 2 == 0
def find_position(value, board):
for i in range(3):
for j in range(3):
if board[i][j] == value:
return Position(i, j)
def calculate_distance(position1, position2):
x1, y1 = position1
x2 = position2.i
y2 = position2.j
distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
return distance
def heuristic(board_state):
distance_score = 0
for i in range(3):
for j in range(3):
cell_value = board_state[i][j]
if cell_value != 0: # Skip the empty tile (represented by 0)
target_position = find_position(cell_value, goal_state)
current_position = (i, j)
distance = calculate_distance(current_position, target_position)
distance_score += distance
return distance_score
class StateNode:
def __init__(self, board, zero_pos, score=0, parent=None):
self.board_array = board
self.zero_pos = zero_pos
self.score = score
self.possible_swap_positions = []
self.parent_node = parent
for direction in Directions:
new_zero_pos = Position(self.zero_pos.i + direction.value.i,
self.zero_pos.j + direction.value.j)
if (
0 <= new_zero_pos.i < len(self.board_array) and
0 <= new_zero_pos.j < len(self.board_array)
):
self.possible_swap_positions.append(new_zero_pos)
def get_value(self, position):
return self.board_array[position.i][position.j]
def swap(self, swappable_pos):
# deep copy our array
new_board = deepcopy(self.board_array)
temp_value = self.board_array[swappable_pos.i][swappable_pos.j]
# change current zero to value in new position
new_board[self.zero_pos.i][self.zero_pos.j] = temp_value
# change swapped position to zero
new_board[swappable_pos.i][swappable_pos.j] = 0
return new_board
def check_end_state(self):
for board_row, end_state_row in zip(self.board_array, goal_state):
if board_row != end_state_row:
return False
return True
class GameBoard:
def __init__(self, values_array):
self.total_count = 0
self.rows = rows
self.columns = columns
self.path = []
self.explored = set()
expected_size = rows * columns
self.game_board = [[0] * columns for _ in range(rows)]
# Check if the dimensions of values_array match the dimensions of the game_board
try:
values_array = [int(value) for value in values_array]
except ValueError:
raise ValueError("All elements in values_array must be convertible to integers.")
if len(values_array) == expected_size:
self.game_board = [values_array[i:i + self.columns] for i in range(0, expected_size, self.columns)]
else:
raise ValueError(
"The dimensions of the provided values_array do not match the dimensions of the game board.")
Be careful how you define whether a state has been explored or not. There might be a longer path that has a different start, reaches a intermediate state shared with your solution path before finally reaching the solution state. When running DFS at a the maximum depth of your solution (here 15), you might follow first this longer path and visit the intermediate state but without being able to reach the final state, as the length of the alternative path exceeds 15. Then when following the first steps of your desired solution path, you'll reach the intermediate already visited state, and decide not to extend your search.
In conclusion, your IDDFS will find a solution if it exists, but not necessarly the shortest.