@InProceedings{ICIAP2022, author="Abukmeil, Mohanad and Ferrari, Stefano and Genovese, Angelo and Piuri, Vincenzo and Scotti, Fabio", editor="Sclaroff, Stan and Distante, Cosimo and Leo, Marco and Farinella, Giovanni M. and Tombari, Federico", title="Grad$_2${VAE}: An Explainable Variational Autoencoder Model Based on Online Attentions Preserving Curvatures of Representations", booktitle="Image Analysis and Processing -- ICIAP 2022", year="2022", publisher="Springer International Publishing", address="Cham", pages="670--681", abstract="Unsupervised learning (UL) is a class of machine learning (ML) that learns data, reduces dimensionality, and visualizes decisions without labels. Among UL models, a variational autoencoder (VAE) is considered a UL model that is regulated by variational inference to approximate the posterior distribution of large datasets. In this paper, we propose a novel explainable artificial intelligence (XAI) method to visually explain the VAE behavior based on the second-order derivative of the latent space concerning the encoding layers, which reflects the amount of acceleration required from encoding to decoding space. Our model is termed as Grad{\$}{\$}{\_}{\{}2{\}}{\$}{\$}2VAE and it is able to capture the local curvatures of the representations to build online attention that visually explains the model's behavior. Besides the VAE explanation, we employ our method for anomaly detection, where our model outperforms the recent UL deep models when generalizing it for large-scale anomaly data.", isbn="978-3-031-06427-2" }