|
|
""" |
|
|
Nexus-Nano Evaluator |
|
|
Ultra-lightweight 2.8M parameter CNN |
|
|
|
|
|
Research: MobileNet architecture principles for efficiency |
|
|
""" |
|
|
|
|
|
import onnxruntime as ort |
|
|
import numpy as np |
|
|
import chess |
|
|
import logging |
|
|
from pathlib import Path |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
class NexusNanoEvaluator: |
|
|
""" |
|
|
Lightweight evaluator for Nexus-Nano |
|
|
Optimized for speed over accuracy |
|
|
""" |
|
|
|
|
|
PIECE_VALUES = { |
|
|
chess.PAWN: 100, |
|
|
chess.KNIGHT: 320, |
|
|
chess.BISHOP: 330, |
|
|
chess.ROOK: 500, |
|
|
chess.QUEEN: 900, |
|
|
chess.KING: 0 |
|
|
} |
|
|
|
|
|
def __init__(self, model_path: str, num_threads: int = 1): |
|
|
"""Initialize with single-threaded ONNX session for speed""" |
|
|
|
|
|
self.model_path = Path(model_path) |
|
|
if not self.model_path.exists(): |
|
|
raise FileNotFoundError(f"Model not found: {model_path}") |
|
|
|
|
|
|
|
|
sess_options = ort.SessionOptions() |
|
|
sess_options.intra_op_num_threads = num_threads |
|
|
sess_options.inter_op_num_threads = num_threads |
|
|
sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL |
|
|
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL |
|
|
|
|
|
logger.info(f"Loading Nexus-Nano model...") |
|
|
self.session = ort.InferenceSession( |
|
|
str(self.model_path), |
|
|
sess_options=sess_options, |
|
|
providers=['CPUExecutionProvider'] |
|
|
) |
|
|
|
|
|
self.input_name = self.session.get_inputs()[0].name |
|
|
self.output_name = self.session.get_outputs()[0].name |
|
|
|
|
|
logger.info(f"✅ Model loaded: {self.input_name} -> {self.output_name}") |
|
|
|
|
|
def fen_to_tensor(self, board: chess.Board) -> np.ndarray: |
|
|
""" |
|
|
Fast 12-channel tensor conversion |
|
|
Optimized for minimal overhead |
|
|
""" |
|
|
tensor = np.zeros((1, 12, 8, 8), dtype=np.float32) |
|
|
|
|
|
|
|
|
piece_channels = { |
|
|
chess.PAWN: 0, chess.KNIGHT: 1, chess.BISHOP: 2, |
|
|
chess.ROOK: 3, chess.QUEEN: 4, chess.KING: 5 |
|
|
} |
|
|
|
|
|
|
|
|
for square, piece in board.piece_map().items(): |
|
|
rank, file = divmod(square, 8) |
|
|
channel = piece_channels[piece.piece_type] |
|
|
if piece.color == chess.BLACK: |
|
|
channel += 6 |
|
|
tensor[0, channel, rank, file] = 1.0 |
|
|
|
|
|
return tensor |
|
|
|
|
|
def evaluate_neural(self, board: chess.Board) -> float: |
|
|
""" |
|
|
Fast neural evaluation |
|
|
Single forward pass, minimal post-processing |
|
|
""" |
|
|
input_tensor = self.fen_to_tensor(board) |
|
|
outputs = self.session.run([self.output_name], {self.input_name: input_tensor}) |
|
|
|
|
|
|
|
|
raw_value = float(outputs[0][0][0]) |
|
|
|
|
|
|
|
|
return raw_value * 300.0 |
|
|
|
|
|
def evaluate_material(self, board: chess.Board) -> int: |
|
|
"""Fast material count""" |
|
|
material = 0 |
|
|
|
|
|
for piece_type, value in self.PIECE_VALUES.items(): |
|
|
if piece_type == chess.KING: |
|
|
continue |
|
|
white = len(board.pieces(piece_type, chess.WHITE)) |
|
|
black = len(board.pieces(piece_type, chess.BLACK)) |
|
|
material += (white - black) * value |
|
|
|
|
|
return material |
|
|
|
|
|
def evaluate_hybrid(self, board: chess.Board) -> float: |
|
|
""" |
|
|
Fast hybrid: 85% neural + 15% material |
|
|
Higher material weight for stability in fast games |
|
|
""" |
|
|
neural = self.evaluate_neural(board) |
|
|
material = self.evaluate_material(board) |
|
|
|
|
|
hybrid = 0.85 * neural + 0.15 * material |
|
|
|
|
|
if board.turn == chess.BLACK: |
|
|
hybrid = -hybrid |
|
|
|
|
|
return hybrid |
|
|
|
|
|
def get_model_size_mb(self) -> float: |
|
|
"""Get model size""" |
|
|
return self.model_path.stat().st_size / (1024 * 1024) |