D2
Computer Vision and Machine Learning

David Stutz (PhD Student)

MSc David Stutz

Address
Max-Planck-Institut für Informatik
Saarland Informatics Campus
Campus
Location
-
Phone
+49 681 9325 0
Fax
+49 681 9325 2099

Personal Information

About Me | Blog | CV | GitHub | LinkedIn | Google Scholar

Bachelor/master theses available; topics on adversarial robustness — robustness of deep neural networks against adversarial examples.

Publications

Stutz, D., Hein, M., & Schiele, B. (2021). Relating Adversarially Robust Generalization to Flat Minima. In IEEE/CVF International Conference on Computer Vision (ICCV 2021). Virtual Event: IEEE. doi:10.1109/ICCV48922.2021.00771
Export
BibTeX
@inproceedings{Stutz_ICCV21, TITLE = {Relating Adversarially Robust Generalization to Flat Minima}, AUTHOR = {Stutz, David and Hein, Matthias and Schiele, Bernt}, LANGUAGE = {eng}, ISBN = {978-1-6654-2812-5}, DOI = {10.1109/ICCV48922.2021.00771}, PUBLISHER = {IEEE}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF International Conference on Computer Vision (ICCV 2021)}, PAGES = {7787--7797}, ADDRESS = {Virtual Event}, }
Endnote
%0 Conference Proceedings %A Stutz, David %A Hein, Matthias %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Relating Adversarially Robust Generalization to Flat Minima : %G eng %U http://hdl.handle.net/21.11116/0000-0009-8101-3 %R 10.1109/ICCV48922.2021.00771 %D 2021 %B International Conference on Computer Vision %Z date of event: 2021-10-11 - 2021-10-17 %C Virtual Event %B IEEE/CVF International Conference on Computer Vision %P 7787 - 7797 %I IEEE %@ 978-1-6654-2812-5
Stutz, D., & Geiger, A. (2018a). Learning 3D Shape Completion under Weak Supervision. International Journal of Computer Vision, 128. doi:10.1007/s11263-018-1126-y
Export
BibTeX
@article{Stutz2018IJCV, TITLE = {Learning {3D} Shape Completion under Weak Supervision}, AUTHOR = {Stutz, David and Geiger, Andreas}, LANGUAGE = {eng}, ISSN = {0920-5691}, DOI = {10.1007/s11263-018-1126-y}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2018}, JOURNAL = {International Journal of Computer Vision}, VOLUME = {128}, PAGES = {1162--1181}, }
Endnote
%0 Journal Article %A Stutz, David %A Geiger, Andreas %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations %T Learning 3D Shape Completion under Weak Supervision : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A28C-9 %R 10.1007/s11263-018-1126-y %7 2018 %D 2018 %J International Journal of Computer Vision %O Int. J. Comput. Vis. %V 128 %& 1162 %P 1162 - 1181 %I Springer %C New York, NY %@ false
Stutz, D., & Geiger, A. (2018b). Learning 3D Shape Completion from Laser Scan Data with Weak Supervision. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018). Salt Lake City, UT, USA: IEEE. doi:10.1109/CVPR.2018.00209
Export
BibTeX
@inproceedings{Stutz2018CVPRb, TITLE = {Learning {3D} Shape Completion from Laser Scan Data with Weak Supervision}, AUTHOR = {Stutz, David and Geiger, Andreas}, LANGUAGE = {eng}, ISBN = {978-1-5386-6420-9}, DOI = {10.1109/CVPR.2018.00209}, PUBLISHER = {IEEE}, YEAR = {2018}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018)}, PAGES = {1955--1964}, ADDRESS = {Salt Lake City, UT, USA}, }
Endnote
%0 Conference Proceedings %A Stutz, David %A Geiger, Andreas %+ Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society External Organizations %T Learning 3D Shape Completion from Laser Scan Data with Weak Supervision : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A296-D %R 10.1109/CVPR.2018.00209 %D 2018 %B 31st IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2018-06-18 - 2018-06-22 %C Salt Lake City, UT, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 1955 - 1964 %I IEEE %@ 978-1-5386-6420-9
Stutz, D., Chandramoorthy, N., Hein, M., & Schiele, B. (2021a). Random and Adversarial Bit Error Robustness: Energy-Efficient and Secure DNN Accelerators. Retrieved from https://arxiv.org/abs/2104.08323
(arXiv: 2104.08323)
Abstract
Deep neural network (DNN) accelerators received considerable attention in recent years due to the potential to save energy compared to mainstream hardware. Low-voltage operation of DNN accelerators allows to further reduce energy consumption significantly, however, causes bit-level failures in the memory storing the quantized DNN weights. Furthermore, DNN accelerators have been shown to be vulnerable to adversarial attacks on voltage controllers or individual bits. In this paper, we show that a combination of robust fixed-point quantization, weight clipping, as well as random bit error training (RandBET) or adversarial bit error training (AdvBET) improves robustness against random or adversarial bit errors in quantized DNN weights significantly. This leads not only to high energy savings for low-voltage operation as well as low-precision quantization, but also improves security of DNN accelerators. Our approach generalizes across operating voltages and accelerators, as demonstrated on bit errors from profiled SRAM arrays, and achieves robustness against both targeted and untargeted bit-level attacks. Without losing more than 0.8%/2% in test accuracy, we can reduce energy consumption on CIFAR10 by 20%/30% for 8/4-bit quantization using RandBET. Allowing up to 320 adversarial bit errors, AdvBET reduces test error from above 90% (chance level) to 26.22% on CIFAR10.
Export
BibTeX
@online{Stutz2104.08323, TITLE = {Random and Adversarial Bit Error Robustness: {E}nergy-Efficient and Secure {DNN} Accelerators}, AUTHOR = {Stutz, David and Chandramoorthy, Nandhini and Hein, Matthias and Schiele, Bernt}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2104.08323}, EPRINT = {2104.08323}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Deep neural network (DNN) accelerators received considerable attention in recent years due to the potential to save energy compared to mainstream hardware. Low-voltage operation of DNN accelerators allows to further reduce energy consumption significantly, however, causes bit-level failures in the memory storing the quantized DNN weights. Furthermore, DNN accelerators have been shown to be vulnerable to adversarial attacks on voltage controllers or individual bits. In this paper, we show that a combination of robust fixed-point quantization, weight clipping, as well as random bit error training (RandBET) or adversarial bit error training (AdvBET) improves robustness against random or adversarial bit errors in quantized DNN weights significantly. This leads not only to high energy savings for low-voltage operation as well as low-precision quantization, but also improves security of DNN accelerators. Our approach generalizes across operating voltages and accelerators, as demonstrated on bit errors from profiled SRAM arrays, and achieves robustness against both targeted and untargeted bit-level attacks. Without losing more than 0.8%/2% in test accuracy, we can reduce energy consumption on CIFAR10 by 20%/30% for 8/4-bit quantization using RandBET. Allowing up to 320 adversarial bit errors, AdvBET reduces test error from above 90% (chance level) to 26.22% on CIFAR10.}, }
Endnote
%0 Report %A Stutz, David %A Chandramoorthy, Nandhini %A Hein, Matthias %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Random and Adversarial Bit Error Robustness: Energy-Efficient and Secure DNN Accelerators : %G eng %U http://hdl.handle.net/21.11116/0000-0009-8108-C %U https://arxiv.org/abs/2104.08323 %D 2021 %X Deep neural network (DNN) accelerators received considerable attention in recent years due to the potential to save energy compared to mainstream hardware. Low-voltage operation of DNN accelerators allows to further reduce energy consumption significantly, however, causes bit-level failures in the memory storing the quantized DNN weights. Furthermore, DNN accelerators have been shown to be vulnerable to adversarial attacks on voltage controllers or individual bits. In this paper, we show that a combination of robust fixed-point quantization, weight clipping, as well as random bit error training (RandBET) or adversarial bit error training (AdvBET) improves robustness against random or adversarial bit errors in quantized DNN weights significantly. This leads not only to high energy savings for low-voltage operation as well as low-precision quantization, but also improves security of DNN accelerators. Our approach generalizes across operating voltages and accelerators, as demonstrated on bit errors from profiled SRAM arrays, and achieves robustness against both targeted and untargeted bit-level attacks. Without losing more than 0.8%/2% in test accuracy, we can reduce energy consumption on CIFAR10 by 20%/30% for 8/4-bit quantization using RandBET. Allowing up to 320 adversarial bit errors, AdvBET reduces test error from above 90% (chance level) to 26.22% on CIFAR10. %K Computer Science, Learning, cs.LG,Computer Science, Architecture, cs.AR,Computer Science, Cryptography and Security, cs.CR,Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rao, S., Stutz, D., & Schiele, B. (2021). Adversarial Training Against Location-Optimized Adversarial Patches. In Computer Vision -- ECCV Workshops 2020. Glasgow, UK: Springer. doi:10.1007/978-3-030-68238-5_32
Export
BibTeX
@inproceedings{DBLP:conf/eccv/RaoSS20, TITLE = {Adversarial Training Against Location-Optimized Adversarial Patches}, AUTHOR = {Rao, Sukrut and Stutz, David and Schiele, Bernt}, LANGUAGE = {eng}, ISBN = {978-3-030-68237-8}, DOI = {10.1007/978-3-030-68238-5_32}, PUBLISHER = {Springer}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2021}, BOOKTITLE = {Computer Vision -- ECCV Workshops 2020}, EDITOR = {Bartoli, Adrian and Fusiello, Andrea}, PAGES = {429--448}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {12539}, ADDRESS = {Glasgow, UK}, }
Endnote
%0 Conference Proceedings %A Rao, Sukrut %A Stutz, David %A Schiele, Bernt %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Adversarial Training Against Location-Optimized Adversarial Patches : %G eng %U http://hdl.handle.net/21.11116/0000-0008-1662-1 %R 10.1007/978-3-030-68238-5_32 %D 2021 %B 16th European Conference on Computer Vision %Z date of event: 2020-08-23 - 2020-08-28 %C Glasgow, UK %B Computer Vision -- ECCV Workshops 2020 %E Bartoli, Adrian; Fusiello, Andrea %P 429 - 448 %I Springer %@ 978-3-030-68237-8 %B Lecture Notes in Computer Science %N 12539
Stutz, D., Hein,, M., & Schiele, B. (2019). Disentangling Adversarial Robustness and Generalization. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019). Long Beach, CA, USA: IEEE. doi:10.1109/CVPR.2019.00714
Export
BibTeX
@inproceedings{Stutz2018ARXIV, TITLE = {Disentangling Adversarial Robustness and Generalization}, AUTHOR = {Stutz, David and Hein,, Matthias and Schiele, Bernt}, LANGUAGE = {eng}, ISBN = {978-1-7281-3293-8}, DOI = {10.1109/CVPR.2019.00714}, PUBLISHER = {IEEE}, YEAR = {2019}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019)}, PAGES = {6969--6980}, ADDRESS = {Long Beach, CA, USA}, }
Endnote
%0 Conference Proceedings %A Stutz, David %A Hein,, Matthias %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Disentangling Adversarial Robustness and Generalization : %G eng %U http://hdl.handle.net/21.11116/0000-0002-A285-0 %R 10.1109/CVPR.2019.00714 %D 2019 %B 32nd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2019-06-16 - 2019-06-20 %C Long Beach, CA, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 6969 - 6980 %I IEEE %@ 978-1-7281-3293-8
Stutz, D., Hein, M., & Schiele, B. (2019). Confidence-Calibrated Adversarial Training and Detection: More Robust Models Generalizing Beyond the Attack Used During Training. Retrieved from http://arxiv.org/abs/1910.06259
(arXiv: 1910.06259)
Abstract
Adversarial training is the standard to train models robust against adversarial examples. However, especially for complex datasets, adversarial training incurs a significant loss in accuracy and is known to generalize poorly to stronger attacks, e.g., larger perturbations or other threat models. In this paper, we introduce confidence-calibrated adversarial training (CCAT) where the key idea is to enforce that the confidence on adversarial examples decays with their distance to the attacked examples. We show that CCAT preserves better the accuracy of normal training while robustness against adversarial examples is achieved via confidence thresholding, i.e., detecting adversarial examples based on their confidence. Most importantly, in strong contrast to adversarial training, the robustness of CCAT generalizes to larger perturbations and other threat models, not encountered during training. For evaluation, we extend the commonly used robust test error to our detection setting, present an adaptive attack with backtracking and allow the attacker to select, per test example, the worst-case adversarial example from multiple black- and white-box attacks. We present experimental results using $L_\infty$, $L_2$, $L_1$ and $L_0$ attacks on MNIST, SVHN and Cifar10.
Export
BibTeX
@online{Stutz_arXiv1910.06259, TITLE = {Confidence-Calibrated Adversarial Training and Detection: More Robust Models Generalizing Beyond the Attack Used During Training}, AUTHOR = {Stutz, David and Hein, Matthias and Schiele, Bernt}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1910.06259}, EPRINT = {1910.06259}, EPRINTTYPE = {arXiv}, YEAR = {2019}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Adversarial training is the standard to train models robust against adversarial examples. However, especially for complex datasets, adversarial training incurs a significant loss in accuracy and is known to generalize poorly to stronger attacks, e.g., larger perturbations or other threat models. In this paper, we introduce confidence-calibrated adversarial training (CCAT) where the key idea is to enforce that the confidence on adversarial examples decays with their distance to the attacked examples. We show that CCAT preserves better the accuracy of normal training while robustness against adversarial examples is achieved via confidence thresholding, i.e., detecting adversarial examples based on their confidence. Most importantly, in strong contrast to adversarial training, the robustness of CCAT generalizes to larger perturbations and other threat models, not encountered during training. For evaluation, we extend the commonly used robust test error to our detection setting, present an adaptive attack with backtracking and allow the attacker to select, per test example, the worst-case adversarial example from multiple black- and white-box attacks. We present experimental results using $L_\infty$, $L_2$, $L_1$ and $L_0$ attacks on MNIST, SVHN and Cifar10.}, }
Endnote
%0 Report %A Stutz, David %A Hein, Matthias %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Confidence-Calibrated Adversarial Training and Detection: More Robust Models Generalizing Beyond the Attack Used During Training : %G eng %U http://hdl.handle.net/21.11116/0000-0005-5559-8 %U http://arxiv.org/abs/1910.06259 %D 2019 %X Adversarial training is the standard to train models robust against adversarial examples. However, especially for complex datasets, adversarial training incurs a significant loss in accuracy and is known to generalize poorly to stronger attacks, e.g., larger perturbations or other threat models. In this paper, we introduce confidence-calibrated adversarial training (CCAT) where the key idea is to enforce that the confidence on adversarial examples decays with their distance to the attacked examples. We show that CCAT preserves better the accuracy of normal training while robustness against adversarial examples is achieved via confidence thresholding, i.e., detecting adversarial examples based on their confidence. Most importantly, in strong contrast to adversarial training, the robustness of CCAT generalizes to larger perturbations and other threat models, not encountered during training. For evaluation, we extend the commonly used robust test error to our detection setting, present an adaptive attack with backtracking and allow the attacker to select, per test example, the worst-case adversarial example from multiple black- and white-box attacks. We present experimental results using $L_\infty$, $L_2$, $L_1$ and $L_0$ attacks on MNIST, SVHN and Cifar10. %K Computer Science, Learning, cs.LG,Computer Science, Cryptography and Security, cs.CR,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Statistics, Machine Learning, stat.ML
Stutz, D., Chandramoorthy, N., Hein, M., & Schiele, B. (2021b). Bit Error Robustness for Energy-Efficient DNN Accelerators. In Proceedings of the 4th MLSys Conference. Virtual Conference: mlsys.org.
Abstract
Deep neural network (DNN) accelerators received considerable attention in past years due to saved energy compared to mainstream hardware. Low-voltage operation of DNN accelerators allows to further reduce energy consumption significantly, however, causes bit-level failures in the memory storing the quantized DNN weights. In this paper, we show that a combination of robust fixed-point quantization, weight clipping, and random bit error training (RandBET) improves robustness against random bit errors in (quantized) DNN weights significantly. This leads to high energy savings from both low-voltage operation as well as low-precision quantization. Our approach generalizes across operating voltages and accelerators, as demonstrated on bit errors from profiled SRAM arrays. We also discuss why weight clipping alone is already a quite effective way to achieve robustness against bit errors. Moreover, we specifically discuss the involved trade-offs regarding accuracy, robustness and precision: Without losing more than 1% in accuracy compared to a normally trained 8-bit DNN, we can reduce energy consumption on CIFAR-10 by 20%. Higher energy savings of, e.g., 30%, are possible at the cost of 2.5% accuracy, even for 4-bit DNNs.
Export
BibTeX
@inproceedings{StutzMLSYS2021, TITLE = {Bit Error Robustness for Energy-Efficient {DNN} Accelerators}, AUTHOR = {Stutz, David and Chandramoorthy, Nandhini and Hein, Matthias and Schiele, Bernt}, LANGUAGE = {eng}, PUBLISHER = {mlsys.org}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Deep neural network (DNN) accelerators received considerable attention in past years due to saved energy compared to mainstream hardware. Low-voltage operation of DNN accelerators allows to further reduce energy consumption significantly, however, causes bit-level failures in the memory storing the quantized DNN weights. In this paper, we show that a combination of robust fixed-point quantization, weight clipping, and random bit error training (RandBET) improves robustness against random bit errors in (quantized) DNN weights significantly. This leads to high energy savings from both low-voltage operation as well as low-precision quantization. Our approach generalizes across operating voltages and accelerators, as demonstrated on bit errors from profiled SRAM arrays. We also discuss why weight clipping alone is already a quite effective way to achieve robustness against bit errors. Moreover, we specifically discuss the involved trade-offs regarding accuracy, robustness and precision: Without losing more than 1% in accuracy compared to a normally trained 8-bit DNN, we can reduce energy consumption on CIFAR-10 by 20%. Higher energy savings of, e.g., 30%, are possible at the cost of 2.5% accuracy, even for 4-bit DNNs.}, BOOKTITLE = {Proceedings of the 4th MLSys Conference}, EDITOR = {Smola, A. and Dimakis, A. and Stoica, I.}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Stutz, David %A Chandramoorthy, Nandhini %A Hein, Matthias %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Bit Error Robustness for Energy-Efficient DNN Accelerators : %G eng %U http://hdl.handle.net/21.11116/0000-0007-80D4-8 %D 2021 %B Fourth Conference on Machine Learning and Systems %Z date of event: 2021-04-05 - 2021-04-09 %C Virtual Conference %X Deep neural network (DNN) accelerators received considerable attention in past years due to saved energy compared to mainstream hardware. Low-voltage operation of DNN accelerators allows to further reduce energy consumption significantly, however, causes bit-level failures in the memory storing the quantized DNN weights. In this paper, we show that a combination of robust fixed-point quantization, weight clipping, and random bit error training (RandBET) improves robustness against random bit errors in (quantized) DNN weights significantly. This leads to high energy savings from both low-voltage operation as well as low-precision quantization. Our approach generalizes across operating voltages and accelerators, as demonstrated on bit errors from profiled SRAM arrays. We also discuss why weight clipping alone is already a quite effective way to achieve robustness against bit errors. Moreover, we specifically discuss the involved trade-offs regarding accuracy, robustness and precision: Without losing more than 1% in accuracy compared to a normally trained 8-bit DNN, we can reduce energy consumption on CIFAR-10 by 20%. Higher energy savings of, e.g., 30%, are possible at the cost of 2.5% accuracy, even for 4-bit DNNs. %K Computer Science, Learning, cs.LG,Computer Science, Architecture, cs.AR,Computer Science, Cryptography and Security, cs.CR,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Statistics, Machine Learning, stat.ML %B Proceedings of the 4th MLSys Conference %E Smola, A.; Dimakis, A.; Stoica, I. %I mlsys.org
Stutz, D., Hein, M., & Schiele, B. (2020). Confidence-Calibrated Adversarial Training: Generalizing to Unseen Attacks. In Proceedings of the 37th International Conference on Machine Learning (ICML 2020). Virtual Conference: MLResearchPress.
Export
BibTeX
@inproceedings{DBLP:conf/icml/Stutz0S20, TITLE = {Confidence-Calibrated Adversarial Training: {G}eneralizing to Unseen Attacks}, AUTHOR = {Stutz, David and Hein, Matthias and Schiele, Bernt}, LANGUAGE = {eng}, ISSN = {2640-3498}, PUBLISHER = {MLResearchPress}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings of the 37th International Conference on Machine Learning (ICML 2020)}, EDITOR = {Daum{\'e}, Hal and Singh, Aarti}, PAGES = {9155--9166}, SERIES = {Proceedings of Machine Learning Research}, VOLUME = {119}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Stutz, David %A Hein, Matthias %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Confidence-Calibrated Adversarial Training: Generalizing to Unseen Attacks : %G eng %U http://hdl.handle.net/21.11116/0000-0007-AA75-6 %D 2020 %B 37th International Conference on Machine Learning %Z date of event: 2020-07-13 - 2020-07-18 %C Virtual Conference %B Proceedings of the 37th International Conference on Machine Learning %E Daumé, Hal; Singh, Aarti %P 9155 - 9166 %I MLResearchPress %B Proceedings of Machine Learning Research %N 119 %@ false %U http://proceedings.mlr.press/v119/stutz20a/stutz20a.pdf