I implemented a negamax and when trying to implemente the alpha beta version the results are different. To my understanding they should be the same regardless of node ordering.

My evaluation function returns a balanced value with the perspective of the player and if game ended returns MAX if won else MIN. My initial parameters are alpha = MIN beta = MAX

```
def negamax(self, player, board, depth):
if depth == 0 or board.end_of_game():
return self.evaluate(player, board), None
value = self.MIN
def try_move(move):
temp_board = deepcopy(board)
temp_board.make_move(move, player)
return temp_board
moves = board.legal_moves(player)
if not moves: # Current player has no move
return self.evaluate(player, board), None
best_move = moves[0]
for m in moves:
value, best_move = max((value, best_move), (-self.negamax(board.opponent(player), try_move(m), depth-1)[0], m))
return value, best_move
def negamax_AB(self, player, board, depth, alpha, beta):
if depth == 0 or board.end_of_game():
return self.evaluate(player, board), None
value = self.MIN
def try_move(move):
temp_board = deepcopy(board)
temp_board.make_move(move, player)
return temp_board
moves = board.legal_moves(player)
if not moves: # Current player has no move
return self.evaluate(player, board), None
best_move = moves[0]
for m in moves:
value, best_move = max((value, best_move), (-self.negamax_AB(board.opponent(player), try_move(m), depth-1, -beta, -alpha)[0], m))
alpha = max(alpha, value)
if alpha >= beta:
break
return value, best_move
```