Abstract
B-cos Networks have been shown to be effective for obtaining highly human
interpretable explanations of model decisions by architecturally enforcing
stronger alignment between inputs and weight. B-cos variants of convolutional
networks (CNNs) and vision transformers (ViTs), which primarily replace linear
layers with B-cos transformations, perform competitively to their respective
standard variants while also yielding explanations that are faithful by design.
However, it has so far been necessary to train these models from scratch, which
is increasingly infeasible in the era of large, pre-trained foundation models.
In this work, inspired by the architectural similarities in standard DNNs and
B-cos networks, we propose 'B-cosification', a novel approach to transform
existing pre-trained models to become inherently interpretable. We perform a
thorough study of design choices to perform this conversion, both for
convolutional neural networks and vision transformers. We find that
B-cosification can yield models that are on par with B-cos models trained from
scratch in terms of interpretability, while often outperforming them in terms
of classification performance at a fraction of the training cost. Subsequently,
we apply B-cosification to a pretrained CLIP model, and show that, even with
limited data and compute cost, we obtain a B-cosified version that is highly
interpretable and competitive on zero shot performance across a variety of
datasets. We release our code and pre-trained model weights at
github.com/shrebox/B-cosification.
BibTeX
@inproceedings{Arya_Neurips24, TITLE = {B-cosification: {T}ransforming Deep Neural Networks to be Inherently Interpretable}, AUTHOR = {Arya, Shreyash and Rao, Sukrut and Boehle, Moritz and Schiele, Bernt}, LANGUAGE = {eng}, EPRINT = {2411.00715}, EPRINTTYPE = {arXiv}, YEAR = {2024}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, ABSTRACT = {B-cos Networks have been shown to be effective for obtaining highly human<br>interpretable explanations of model decisions by architecturally enforcing<br>stronger alignment between inputs and weight. B-cos variants of convolutional<br>networks (CNNs) and vision transformers (ViTs), which primarily replace linear<br>layers with B-cos transformations, perform competitively to their respective<br>standard variants while also yielding explanations that are faithful by design.<br>However, it has so far been necessary to train these models from scratch, which<br>is increasingly infeasible in the era of large, pre-trained foundation models.<br>In this work, inspired by the architectural similarities in standard DNNs and<br>B-cos networks, we propose 'B-cosification', a novel approach to transform<br>existing pre-trained models to become inherently interpretable. We perform a<br>thorough study of design choices to perform this conversion, both for<br>convolutional neural networks and vision transformers. We find that<br>B-cosification can yield models that are on par with B-cos models trained from<br>scratch in terms of interpretability, while often outperforming them in terms<br>of classification performance at a fraction of the training cost. Subsequently,<br>we apply B-cosification to a pretrained CLIP model, and show that, even with<br>limited data and compute cost, we obtain a B-cosified version that is highly<br>interpretable and competitive on zero shot performance across a variety of<br>datasets. We release our code and pre-trained model weights at<br>https://github.com/shrebox/B-cosification.<br>}, BOOKTITLE = {Advances in Neural Information Processing Systems 37 (NeurIPS 2024)}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Arya, Shreyash %A Rao, Sukrut %A Boehle, Moritz %A Schiele, Bernt %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T B-cosification: Transforming Deep Neural Networks to be Inherently Interpretable : %G eng %U http://hdl.handle.net/21.11116/0000-0010-0FBE-9 %D 2024 %B 38th Conference on Neural Information Processing Systems %Z date of event: 2024-12-10 - 2024-12-15 %C Vancouver, Canada %X B-cos Networks have been shown to be effective for obtaining highly human<br>interpretable explanations of model decisions by architecturally enforcing<br>stronger alignment between inputs and weight. B-cos variants of convolutional<br>networks (CNNs) and vision transformers (ViTs), which primarily replace linear<br>layers with B-cos transformations, perform competitively to their respective<br>standard variants while also yielding explanations that are faithful by design.<br>However, it has so far been necessary to train these models from scratch, which<br>is increasingly infeasible in the era of large, pre-trained foundation models.<br>In this work, inspired by the architectural similarities in standard DNNs and<br>B-cos networks, we propose 'B-cosification', a novel approach to transform<br>existing pre-trained models to become inherently interpretable. We perform a<br>thorough study of design choices to perform this conversion, both for<br>convolutional neural networks and vision transformers. We find that<br>B-cosification can yield models that are on par with B-cos models trained from<br>scratch in terms of interpretability, while often outperforming them in terms<br>of classification performance at a fraction of the training cost. Subsequently,<br>we apply B-cosification to a pretrained CLIP model, and show that, even with<br>limited data and compute cost, we obtain a B-cosified version that is highly<br>interpretable and competitive on zero shot performance across a variety of<br>datasets. We release our code and pre-trained model weights at<br>https://github.com/shrebox/B-cosification.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Learning, cs.LG %B Advances in Neural Information Processing Systems 37