b'@article{Mueller_TPAMI25,'b'\nTITLE = {Examining the Impact of Optical Aberrations to Image Classification and Object Detection Models},\nAUTHOR = {M{\\"u}ller, Patrick and Braun, Alexander and Keuper, Margret},\nLANGUAGE = {eng},\nISSN = {0162-8828},\nDOI = {10.1109/TPAMI.2025.3622234},\nPUBLISHER = {IEEE},\nADDRESS = {Philadelphia, PA},\nYEAR = {2025},\nMARGINALMARK = {$\\bullet$},\nDATE = {2025},\nABSTRACT = {Deep neural networks (DNNs) have proven to be successful in various computer<br>vision applications such that models even infer in safety-critical situations.<br>Therefore, vision models have to behave in a robust way to disturbances such as<br>noise or blur. While seminal benchmarks exist to evaluate model robustness to<br>diverse corruptions, blur is often approximated in an overly simplistic way to<br>model defocus, while ignoring the different blur kernel shapes that result from<br>optical systems. To study model robustness against realistic optical blur<br>effects, this paper proposes two datasets of blur corruptions, which we denote<br>OpticsBench and LensCorruptions. OpticsBench examines primary aberrations such<br>as coma, defocus, and astigmatism, i.e. aberrations that can be represented by<br>varying a single parameter of Zernike polynomials. To go beyond the principled<br>but synthetic setting of primary aberrations, LensCorruptions samples linear<br>combinations in the vector space spanned by Zernike polynomials, corresponding<br>to 100 real lenses. Evaluations for image classification and object detection<br>on ImageNet and MSCOCO show that for a variety of different pre-trained models,<br>the performance on OpticsBench and LensCorruptions varies significantly,<br>indicating the need to consider realistic image corruptions to evaluate a<br>model\'s robustness against blur.<br>},\nJOURNAL = {IEEE Transactions on Pattern Analysis and Machine Intelligence},\nVOLUME = {48},\nNUMBER = {3},\nPAGES = {2139--2153},\n}\n'