@inproceedings{e92b4852057a43a3bdfd5b99821e4d1f,
title = "Poster: Characterizing adversarial subspaces by mutual information",
abstract = "Deep learning is well-known for its great performances on images classification, object detection, and natural language processing. However, the recent research has demonstrated that visually indistinguishable images called adversarial examples can successfully fool neural networks by carefully crafting. In this paper, we design a detector named MID, calculating mutual information to characterize adversarial subspaces. Meanwhile, we use the defense framework called MagNet and mount the detector MID on it. Experimental results show that projected gradient descent (PGD), basic iterative method (BIM), Carlini and Wanger's attack (C&W attack) and elastic-net attack to deep neural network (elastic-net and L1 rules) can be effectively defended by our method.",
keywords = "Adversarial examples, Neural networks",
author = "Hsu, {Chia Yi} and Chen, {Pin Yu} and Yu, {Chia Mu}",
year = "2019",
month = jul,
day = "2",
doi = "10.1145/3321705.3331002",
language = "English",
series = "AsiaCCS 2019 - Proceedings of the 2019 ACM Asia Conference on Computer and Communications Security",
publisher = "Association for Computing Machinery, Inc",
pages = "667--669",
booktitle = "AsiaCCS 2019 - Proceedings of the 2019 ACM Asia Conference on Computer and Communications Security",
note = "2019 ACM Asia Conference on Computer and Communications Security, AsiaCCS 2019 ; Conference date: 09-07-2019 Through 12-07-2019",
}