Current Year

Article
Elek, O., Zhang, R., Sumin, D., et al. 2021. Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for 3D Printing. Optics Express29, 5.
Export
BibTeX
@article{Elek2021, TITLE = {Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for {3D} Printing}, AUTHOR = {Elek, Oskar and Zhang, Ran and Sumin, Denis and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and Krivanek, Jaroslav and Weyrich, Tim}, LANGUAGE = {eng}, ISSN = {1094-4087}, DOI = {10.1364/OE.406095}, PUBLISHER = {Optical Society of America}, ADDRESS = {Washington, DC}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, JOURNAL = {Optics Express}, VOLUME = {29}, NUMBER = {5}, PAGES = {7568--7588}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Zhang, Ran %A Sumin, Denis %A Myszkowski, Karol %A Bickel, Bernd %A Wilkie, Alexander %A Krivanek, Jaroslav %A Weyrich, Tim %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations %T Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-E013-6 %R 10.1364/OE.406095 %7 2021 %D 2021 %J Optics Express %O Opt. Express %V 29 %N 5 %& 7568 %P 7568 - 7588 %I Optical Society of America %C Washington, DC %@ false
Lagunas, M., Serrano, A., Gutierrez, D., and Masia, B. 2021. The Joint Role of Geometry and Illumination on Material Recognition. Journal of Vision21, 2.
Export
BibTeX
@article{Lagunas2021_MatRecog, TITLE = {The Joint Role of Geometry and Illumination on Material Recognition}, AUTHOR = {Lagunas, Manuel and Serrano, Ana and Gutierrez, Diego and Masia, Belen}, LANGUAGE = {eng}, ISSN = {1534-7362}, DOI = {10.1167/jov.21.2.2}, PUBLISHER = {Scholar One, Inc.}, ADDRESS = {Charlottesville, VA}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, JOURNAL = {Journal of Vision}, VOLUME = {21}, NUMBER = {2}, PAGES = {1--18}, }
Endnote
%0 Journal Article %A Lagunas, Manuel %A Serrano, Ana %A Gutierrez, Diego %A Masia, Belen %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T The Joint Role of Geometry and Illumination on Material Recognition : %G eng %U http://hdl.handle.net/21.11116/0000-0007-EAF9-9 %R 10.1167/jov.21.2.2 %7 2021 %D 2021 %J Journal of Vision %V 21 %N 2 %& 1 %P 1 - 18 %I Scholar One, Inc. %C Charlottesville, VA %@ false
Meka, A., Shafiei, M., Zollhöfer, M., Richardt, C., and Theobalt, C. Real-time Global Illumination Decomposition of Videos. ACM Transactions on Graphics.
(Accepted/in press)
Export
BibTeX
@article{Meka:2021, TITLE = {Real-time Global Illumination Decomposition of Videos}, AUTHOR = {Meka, Abhimitra and Shafiei, Mohammad and Zollh{\"o}fer, Michael and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2021}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics}, }
Endnote
%0 Journal Article %A Meka, Abhimitra %A Shafiei, Mohammad %A Zollhöfer, Michael %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Global Illumination Decomposition of Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0007-EE07-6 %D 2021 %J ACM Transactions on Graphics %I ACM %C New York, NY %@ false
Rittig, T., Sumin, D., Babaei, V., et al. Neural Acceleration of Scattering-Aware Color 3D Printing. Computer Graphics Forum (Proc. EUROGRAPHICS 2021)40, 2.
(Accepted/in press)
Export
BibTeX
@article{rittig2021neural, TITLE = {Neural Acceleration of Scattering-Aware Color {3D} Printing}, AUTHOR = {Rittig, Tobias and Sumin, Denis and Babaei, Vahid and Didyk, Piotr and Voloboy, Alexei and Wilkie, Alexander and Bickel, Bernd and Myszkowski, Karol and Weyrich, Tim and Krivanek, Jaroslav}, LANGUAGE = {eng}, ISSN = {0167-7055}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2021}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {40}, NUMBER = {2}, BOOKTITLE = {42nd Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2021)}, }
Endnote
%0 Journal Article %A Rittig, Tobias %A Sumin, Denis %A Babaei, Vahid %A Didyk, Piotr %A Voloboy, Alexei %A Wilkie, Alexander %A Bickel, Bernd %A Myszkowski, Karol %A Weyrich, Tim %A Krivanek, Jaroslav %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Neural Acceleration of Scattering-Aware Color 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-F073-8 %D 2021 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 40 %N 2 %I Blackwell-Wiley %C Oxford %@ false %B 42nd Annual Conference of the European Association for Computer Graphics %O EUROGRAPHICS 2021 EG 2021
Van Onzenoodt, C., Singh, G., Ropinski, T., and Ritschel, T. Blue Noise Plots. Computer Graphics Forum (Proc. EUROGRAPHICS 2021)40, 2.
(Accepted/in press)
Export
BibTeX
@article{onzenoodt2021blue, TITLE = {Blue Noise Plots}, AUTHOR = {van Onzenoodt, Christian and Singh, Gurprit and Ropinski, Timo and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2021}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {40}, NUMBER = {2}, BOOKTITLE = {42nd Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2021)}, }
Endnote
%0 Journal Article %A van Onzenoodt, Christian %A Singh, Gurprit %A Ropinski, Timo %A Ritschel, Tobias %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Blue Noise Plots : %G eng %U http://hdl.handle.net/21.11116/0000-0008-010F-7 %D 2021 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 40 %N 2 %I Blackwell-Wiley %C Oxford %@ false %B 42nd Annual Conference of the European Association for Computer Graphics %O EUROGRAPHICS 2021 EG 2021
Conference Paper
Rao, S., Stutz, D., and Schiele, B. 2021. Adversarial Training Against Location-Optimized Adversarial Patches. Computer Vision -- ECCV Workshops 2020, Springer.
Export
BibTeX
@inproceedings{DBLP:conf/eccv/RaoSS20, TITLE = {Adversarial Training Against Location-Optimized Adversarial Patches}, AUTHOR = {Rao, Sukrut and Stutz, David and Schiele, Bernt}, LANGUAGE = {eng}, ISBN = {978-3-030-68237-8}, DOI = {10.1007/978-3-030-68238-5_32}, PUBLISHER = {Springer}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2021}, BOOKTITLE = {Computer Vision -- ECCV Workshops 2020}, EDITOR = {Bartoli, Adrian and Fusiello, Andrea}, PAGES = {429--448}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {12539}, ADDRESS = {Glasgow, UK}, }
Endnote
%0 Conference Proceedings %A Rao, Sukrut %A Stutz, David %A Schiele, Bernt %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Adversarial Training Against Location-Optimized Adversarial Patches : %G eng %U http://hdl.handle.net/21.11116/0000-0008-1662-1 %R 10.1007/978-3-030-68238-5_32 %D 2021 %B 16th European Conference on Computer Vision %Z date of event: 2020-08-23 - 2020-08-28 %C Glasgow, UK %B Computer Vision -- ECCV Workshops 2020 %E Bartoli, Adrian; Fusiello, Andrea %P 429 - 448 %I Springer %@ 978-3-030-68237-8 %B Lecture Notes in Computer Science %N 12539
Paper
Birdal, T., Golyanik, V., Theobalt, C., and Guibas, L. 2021. Quantum Permutation Synchronization. https://arxiv.org/abs/2101.07755.
(arXiv: 2101.07755)
Abstract
We present QuantumSync, the first quantum algorithm for solving a synchronization problem in the context of computer vision. In particular, we focus on permutation synchronization which involves solving a non-convex optimization problem in discrete variables. We start by formulating synchronization into a quadratic unconstrained binary optimization problem (QUBO). While such formulation respects the binary nature of the problem, ensuring that the result is a set of permutations requires extra care. Hence, we: (i) show how to insert permutation constraints into a QUBO problem and (ii) solve the constrained QUBO problem on the current generation of the adiabatic quantum computers D-Wave. Thanks to the quantum annealing, we guarantee global optimality with high probability while sampling the energy landscape to yield confidence estimates. Our proof-of-concepts realization on the adiabatic D-Wave computer demonstrates that quantum machines offer a promising way to solve the prevalent yet difficult synchronization problems.
Export
BibTeX
@online{Birdal_2101.07755, TITLE = {Quantum Permutation Synchronization}, AUTHOR = {Birdal, Tolga and Golyanik, Vladislav and Theobalt, Christian and Guibas, Leonidas}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2101.07755}, EPRINT = {2101.07755}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present QuantumSync, the first quantum algorithm for solving a synchronization problem in the context of computer vision. In particular, we focus on permutation synchronization which involves solving a non-convex optimization problem in discrete variables. We start by formulating synchronization into a quadratic unconstrained binary optimization problem (QUBO). While such formulation respects the binary nature of the problem, ensuring that the result is a set of permutations requires extra care. Hence, we: (i) show how to insert permutation constraints into a QUBO problem and (ii) solve the constrained QUBO problem on the current generation of the adiabatic quantum computers D-Wave. Thanks to the quantum annealing, we guarantee global optimality with high probability while sampling the energy landscape to yield confidence estimates. Our proof-of-concepts realization on the adiabatic D-Wave computer demonstrates that quantum machines offer a promising way to solve the prevalent yet difficult synchronization problems.}, }
Endnote
%0 Report %A Birdal, Tolga %A Golyanik, Vladislav %A Theobalt, Christian %A Guibas, Leonidas %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Quantum Permutation Synchronization : %G eng %U http://hdl.handle.net/21.11116/0000-0007-E895-B %U https://arxiv.org/abs/2101.07755 %D 2021 %X We present QuantumSync, the first quantum algorithm for solving a synchronization problem in the context of computer vision. In particular, we focus on permutation synchronization which involves solving a non-convex optimization problem in discrete variables. We start by formulating synchronization into a quadratic unconstrained binary optimization problem (QUBO). While such formulation respects the binary nature of the problem, ensuring that the result is a set of permutations requires extra care. Hence, we: (i) show how to insert permutation constraints into a QUBO problem and (ii) solve the constrained QUBO problem on the current generation of the adiabatic quantum computers D-Wave. Thanks to the quantum annealing, we guarantee global optimality with high probability while sampling the energy landscape to yield confidence estimates. Our proof-of-concepts realization on the adiabatic D-Wave computer demonstrates that quantum machines offer a promising way to solve the prevalent yet difficult synchronization problems. %K Quantum Physics, quant-ph,Computer Science, Computer Vision and Pattern Recognition, cs.CV,cs.ET,Computer Science, Learning, cs.LG,Computer Science, Robotics, cs.RO
Martin, D., Malpica, S., Gutierrez, D., Masia, B., and Serrano, A. 2021. Multimodality in VR: A Survey. https://arxiv.org/abs/2101.07906.
(arXiv: 2101.07906)
Abstract
Virtual reality has the potential to change the way we create and consume content in our everyday life. Entertainment, training, design and manufacturing, communication, or advertising are all applications that already benefit from this new medium reaching consumer level. VR is inherently different from traditional media: it offers a more immersive experience, and has the ability to elicit a sense of presence through the place and plausibility illusions. It also gives the user unprecedented capabilities to explore their environment, in contrast with traditional media. In VR, like in the real world, users integrate the multimodal sensory information they receive to create a unified perception of the virtual world. Therefore, the sensory cues that are available in a virtual environment can be leveraged to enhance the final experience. This may include increasing realism, or the sense of presence; predicting or guiding the attention of the user through the experience; or increasing their performance if the experience involves the completion of certain tasks. In this state-of-the-art report, we survey the body of work addressing multimodality in virtual reality, its role and benefits in the final user experience. The works here reviewed thus encompass several fields of research, including computer graphics, human computer interaction, or psychology and perception. Additionally, we give an overview of different applications that leverage multimodal input in areas such as medicine, training and education, or entertainment; we include works in which the integration of multiple sensory information yields significant improvements, demonstrating how multimodality can play a fundamental role in the way VR systems are designed, and VR experiences created and consumed.
Export
BibTeX
@online{Martin2021_VRsurvey, TITLE = {Multimodality in {VR}: {A} Survey}, AUTHOR = {Martin, Daniel and Malpica, Sandra and Gutierrez, Diego and Masia, Belen and Serrano, Ana}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2101.07906}, EPRINT = {2101.07906}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Virtual reality has the potential to change the way we create and consume content in our everyday life. Entertainment, training, design and manufacturing, communication, or advertising are all applications that already benefit from this new medium reaching consumer level. VR is inherently different from traditional media: it offers a more immersive experience, and has the ability to elicit a sense of presence through the place and plausibility illusions. It also gives the user unprecedented capabilities to explore their environment, in contrast with traditional media. In VR, like in the real world, users integrate the multimodal sensory information they receive to create a unified perception of the virtual world. Therefore, the sensory cues that are available in a virtual environment can be leveraged to enhance the final experience. This may include increasing realism, or the sense of presence; predicting or guiding the attention of the user through the experience; or increasing their performance if the experience involves the completion of certain tasks. In this state-of-the-art report, we survey the body of work addressing multimodality in virtual reality, its role and benefits in the final user experience. The works here reviewed thus encompass several fields of research, including computer graphics, human computer interaction, or psychology and perception. Additionally, we give an overview of different applications that leverage multimodal input in areas such as medicine, training and education, or entertainment; we include works in which the integration of multiple sensory information yields significant improvements, demonstrating how multimodality can play a fundamental role in the way VR systems are designed, and VR experiences created and consumed.}, }
Endnote
%0 Report %A Martin, Daniel %A Malpica, Sandra %A Gutierrez, Diego %A Masia, Belen %A Serrano, Ana %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Multimodality in VR: A Survey : %G eng %U http://hdl.handle.net/21.11116/0000-0007-EB00-0 %U https://arxiv.org/abs/2101.07906 %D 2021 %X Virtual reality has the potential to change the way we create and consume content in our everyday life. Entertainment, training, design and manufacturing, communication, or advertising are all applications that already benefit from this new medium reaching consumer level. VR is inherently different from traditional media: it offers a more immersive experience, and has the ability to elicit a sense of presence through the place and plausibility illusions. It also gives the user unprecedented capabilities to explore their environment, in contrast with traditional media. In VR, like in the real world, users integrate the multimodal sensory information they receive to create a unified perception of the virtual world. Therefore, the sensory cues that are available in a virtual environment can be leveraged to enhance the final experience. This may include increasing realism, or the sense of presence; predicting or guiding the attention of the user through the experience; or increasing their performance if the experience involves the completion of certain tasks. In this state-of-the-art report, we survey the body of work addressing multimodality in virtual reality, its role and benefits in the final user experience. The works here reviewed thus encompass several fields of research, including computer graphics, human computer interaction, or psychology and perception. Additionally, we give an overview of different applications that leverage multimodal input in areas such as medicine, training and education, or entertainment; we include works in which the integration of multiple sensory information yields significant improvements, demonstrating how multimodality can play a fundamental role in the way VR systems are designed, and VR experiences created and consumed. %K Computer Science, Human-Computer Interaction, cs.HC,Computer Science, Graphics, cs.GR
Sarkar, K., Mehta, D., Xu, W., Golyanik, V., and Theobalt, C. 2021. Neural Re-Rendering of Humans from a Single Image. https://arxiv.org/abs/2101.04104.
(arXiv: 2101.04104)
Abstract
Human re-rendering from a single image is a starkly under-constrained problem, and state-of-the-art algorithms often exhibit undesired artefacts, such as over-smoothing, unrealistic distortions of the body parts and garments, or implausible changes of the texture. To address these challenges, we propose a new method for neural re-rendering of a human under a novel user-defined pose and viewpoint, given one input image. Our algorithm represents body pose and shape as a parametric mesh which can be reconstructed from a single image and easily reposed. Instead of a colour-based UV texture map, our approach further employs a learned high-dimensional UV feature map to encode appearance. This rich implicit representation captures detailed appearance variation across poses, viewpoints, person identities and clothing styles better than learned colour texture maps. The body model with the rendered feature maps is fed through a neural image-translation network that creates the final rendered colour image. The above components are combined in an end-to-end-trained neural network architecture that takes as input a source person image, and images of the parametric body model in the source pose and desired target pose. Experimental evaluation demonstrates that our approach produces higher quality single image re-rendering results than existing methods.
Export
BibTeX
@online{Sarkar_arXiv2101.04104, TITLE = {Neural Re-Rendering of Humans from a Single Image}, AUTHOR = {Sarkar, Kripasindhu and Mehta, Dushyant and Xu, Weipeng and Golyanik, Vladislav and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2101.04104}, EPRINT = {2101.04104}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Human re-rendering from a single image is a starkly under-constrained problem, and state-of-the-art algorithms often exhibit undesired artefacts, such as over-smoothing, unrealistic distortions of the body parts and garments, or implausible changes of the texture. To address these challenges, we propose a new method for neural re-rendering of a human under a novel user-defined pose and viewpoint, given one input image. Our algorithm represents body pose and shape as a parametric mesh which can be reconstructed from a single image and easily reposed. Instead of a colour-based UV texture map, our approach further employs a learned high-dimensional UV feature map to encode appearance. This rich implicit representation captures detailed appearance variation across poses, viewpoints, person identities and clothing styles better than learned colour texture maps. The body model with the rendered feature maps is fed through a neural image-translation network that creates the final rendered colour image. The above components are combined in an end-to-end-trained neural network architecture that takes as input a source person image, and images of the parametric body model in the source pose and desired target pose. Experimental evaluation demonstrates that our approach produces higher quality single image re-rendering results than existing methods.}, }
Endnote
%0 Report %A Sarkar, Kripasindhu %A Mehta, Dushyant %A Xu, Weipeng %A Golyanik, Vladislav %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Re-Rendering of Humans from a Single Image : %G eng %U http://hdl.handle.net/21.11116/0000-0007-CF05-B %U https://arxiv.org/abs/2101.04104 %D 2021 %X Human re-rendering from a single image is a starkly under-constrained problem, and state-of-the-art algorithms often exhibit undesired artefacts, such as over-smoothing, unrealistic distortions of the body parts and garments, or implausible changes of the texture. To address these challenges, we propose a new method for neural re-rendering of a human under a novel user-defined pose and viewpoint, given one input image. Our algorithm represents body pose and shape as a parametric mesh which can be reconstructed from a single image and easily reposed. Instead of a colour-based UV texture map, our approach further employs a learned high-dimensional UV feature map to encode appearance. This rich implicit representation captures detailed appearance variation across poses, viewpoints, person identities and clothing styles better than learned colour texture maps. The body model with the rendered feature maps is fed through a neural image-translation network that creates the final rendered colour image. The above components are combined in an end-to-end-trained neural network architecture that takes as input a source person image, and images of the parametric body model in the source pose and desired target pose. Experimental evaluation demonstrates that our approach produces higher quality single image re-rendering results than existing methods. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Van Onzenoodt, C., Singh, G., Ropinski, T., and Ritschel, T. 2021. Blue Noise Plots. https://arxiv.org/abs/2102.04072.
(arXiv: 2102.04072)
Abstract
We propose Blue Noise Plots, two-dimensional dot plots that depict data points of univariate data sets. While often one-dimensional strip plots are used to depict such data, one of their main problems is visual clutter which results from overlap. To reduce this overlap, jitter plots were introduced, whereby an additional, non-encoding plot dimension is introduced, along which the data point representing dots are randomly perturbed. Unfortunately, this randomness can suggest non-existent clusters, and often leads to visually unappealing plots, in which overlap might still occur. To overcome these shortcomings, we introduce BlueNoise Plots where random jitter along the non-encoding plot dimension is replaced by optimizing all dots to keep a minimum distance in 2D i. e., Blue Noise. We evaluate the effectiveness as well as the aesthetics of Blue Noise Plots through both, a quantitative and a qualitative user study.
Export
BibTeX
@online{Onzenoodt_2102.04072, TITLE = {Blue Noise Plots}, AUTHOR = {van Onzenoodt, Christian and Singh, Gurprit and Ropinski, Timo and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2102.04072}, EPRINT = {2102.04072}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We propose Blue Noise Plots, two-dimensional dot plots that depict data points of univariate data sets. While often one-dimensional strip plots are used to depict such data, one of their main problems is visual clutter which results from overlap. To reduce this overlap, jitter plots were introduced, whereby an additional, non-encoding plot dimension is introduced, along which the data point representing dots are randomly perturbed. Unfortunately, this randomness can suggest non-existent clusters, and often leads to visually unappealing plots, in which overlap might still occur. To overcome these shortcomings, we introduce BlueNoise Plots where random jitter along the non-encoding plot dimension is replaced by optimizing all dots to keep a minimum distance in 2D i. e., Blue Noise. We evaluate the effectiveness as well as the aesthetics of Blue Noise Plots through both, a quantitative and a qualitative user study.}, }
Endnote
%0 Report %A van Onzenoodt, Christian %A Singh, Gurprit %A Ropinski, Timo %A Ritschel, Tobias %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Blue Noise Plots : %G eng %U http://hdl.handle.net/21.11116/0000-0008-01ED-C %U https://arxiv.org/abs/2102.04072 %D 2021 %X We propose Blue Noise Plots, two-dimensional dot plots that depict data points of univariate data sets. While often one-dimensional strip plots are used to depict such data, one of their main problems is visual clutter which results from overlap. To reduce this overlap, jitter plots were introduced, whereby an additional, non-encoding plot dimension is introduced, along which the data point representing dots are randomly perturbed. Unfortunately, this randomness can suggest non-existent clusters, and often leads to visually unappealing plots, in which overlap might still occur. To overcome these shortcomings, we introduce BlueNoise Plots where random jitter along the non-encoding plot dimension is replaced by optimizing all dots to keep a minimum distance in 2D i. e., Blue Noise. We evaluate the effectiveness as well as the aesthetics of Blue Noise Plots through both, a quantitative and a qualitative user study. %K Computer Science, Graphics, cs.GR