Publications from 2001

2024
Wolski, K., Djeacoumar, A., Javanmardi, A., et al. 2024. Learning Images Across Scales Using Adversarial Training. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2024)43, 4.
Export
BibTeX
@article{Wolski24, TITLE = {Learning Images Across Scales Using Adversarial Training}, AUTHOR = {Wolski, Krzysztof and Djeacoumar, Adarsh and Javanmardi, Alireza and Seidel, Hans-Peter and Theobalt, Christian and Cordonnier, Guillaume and Myszkowski, Karol and Drettakis, George and Pan, Xingang and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3658190}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2024}, MARGINALMARK = {$\bullet$}, DATE = {2024}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {43}, NUMBER = {4}, PAGES = {1--13}, EID = {131}, BOOKTITLE = {Proceedings of the SIGGRAPH Conference (ACM SIGGRAPH 2024)}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Djeacoumar, Adarsh %A Javanmardi, Alireza %A Seidel, Hans-Peter %A Theobalt, Christian %A Cordonnier, Guillaume %A Myszkowski, Karol %A Drettakis, George %A Pan, Xingang %A Leimkühler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Learning Images Across Scales Using Adversarial Training : %G eng %U http://hdl.handle.net/21.11116/0000-000F-EED1-9 %R 10.1145/3658190 %7 2024-07-19 %D 2024 %J ACM Transactions on Graphics %V 43 %N 4 %& 1 %P 1 - 13 %Z sequence number: 131 %I ACM %C New York, NY %@ false %B Proceedings of the SIGGRAPH Conference %O ACM SIGGRAPH 2024 Denver, CO, USA, July 28 - Aug 1
Wang, C., Wolski, K., Kerbl, B., et al. 2024. Cinematic Gaussians: Real-Time HDR Radiance Fields with Depth of Field. Computer Graphics Forum (Proc. Pacific Graphics 2024)43, 7.
Export
BibTeX
@article{WangPG24, TITLE = {Cinematic {G}aussians: {R}eal-Time {HDR} Radiance Fields with Depth of Field}, AUTHOR = {Wang, Chao and Wolski, Krzysztof and Kerbl, Bernhard and Serrano, Ana and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.15214}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2024}, MARGINALMARK = {$\bullet$}, DATE = {2024}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {43}, NUMBER = {7}, PAGES = {1--13}, BOOKTITLE = {The 32nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2024)}, EDITOR = {Chen, Renjie and Ritschel, Tobias and Whiting, Emily}, }
Endnote
%0 Journal Article %A Wang, Chao %A Wolski, Krzysztof %A Kerbl, Bernhard %A Serrano, Ana %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %A Leimkühler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Cinematic Gaussians: Real-Time HDR Radiance Fields with Depth of Field : %G eng %U http://hdl.handle.net/21.11116/0000-000F-FC3A-5 %R 10.1111/cgf.15214 %7 2024 %D 2024 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 43 %N 7 %& 1 %P 1 - 13 %I Blackwell-Wiley %C Oxford %@ false %B The 32nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2024 PG 2024 Huangshan (Yellow Mountain), China ; October 13 – 16, 2024
Ruan, L., Bálint, M., Bemana, M., et al. 2024. Self-Supervised Video Defocus Deblurring with Atlas Learning. Proceedings SIGGRAPH 2024 Conference Papers, ACM.
Export
BibTeX
@inproceedings{Ruan_SIGGRAPH24, TITLE = {Self-Supervised Video Defocus Deblurring with Atlas Learning}, AUTHOR = {Ruan, Lingyan and B{\'a}lint, Martin and Bemana, Mojtaba and Wolski, Krzysztof and Seidel, Hans-Peter and Myszkowski, Karol and Chen, Bin}, LANGUAGE = {eng}, ISBN = {979-8-4007-0525-0}, DOI = {10.1145/3641519.3657524}, PUBLISHER = {ACM}, YEAR = {2024}, MARGINALMARK = {$\bullet$}, DATE = {2024}, BOOKTITLE = {Proceedings SIGGRAPH 2024 Conference Papers}, EDITOR = {Burgano, Andres and Zorin, Denis and Jarosz, Wojciech}, PAGES = {1--11}, EID = {120}, ADDRESS = {Denver, CO, USA}, }
Endnote
%0 Conference Proceedings %A Ruan, Lingyan %A Bálint, Martin %A Bemana, Mojtaba %A Wolski, Krzysztof %A Seidel, Hans-Peter %A Myszkowski, Karol %A Chen, Bin %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Self-Supervised Video Defocus Deblurring with Atlas Learning : %G eng %U http://hdl.handle.net/21.11116/0000-000F-FC3F-0 %R 10.1145/3641519.3657524 %D 2024 %B ACM SIGGRAPH Conference %Z date of event: 2024-07-28 - 2024-08-01 %C Denver, CO, USA %B Proceedings SIGGRAPH 2024 Conference Papers %E Burgano, Andres; Zorin, Denis; Jarosz, Wojciech %P 1 - 11 %Z sequence number: 120 %I ACM %@ 979-8-4007-0525-0
Mujkanovic, F., Nsampi, N.E., Theobalt, C., Seidel, H.-P., and Leimkühler, T. 2024. Neural Gaussian Scale-Space Fields. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2024)43, 4.
Export
BibTeX
@article{Mujkanovic24, TITLE = {Neural {G}aussian Scale-Space Fields}, AUTHOR = {Mujkanovic, Felix and Nsampi, Ntumba Elie and Theobalt, Christian and Seidel, Hans-Peter and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3658163}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2024}, MARGINALMARK = {$\bullet$}, DATE = {2024}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {43}, NUMBER = {4}, PAGES = {1--15}, EID = {134}, BOOKTITLE = {Proceedings of the SIGGRAPH Conference (ACM SIGGRAPH 2024)}, }
Endnote
%0 Journal Article %A Mujkanovic, Felix %A Nsampi, Ntumba Elie %A Theobalt, Christian %A Seidel, Hans-Peter %A Leimkühler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Neural Gaussian Scale-Space Fields : %G eng %U http://hdl.handle.net/21.11116/0000-000F-EE1D-6 %R 10.1145/3658163 %7 2024-07-19 %D 2024 %J ACM Transactions on Graphics %V 43 %N 4 %& 1 %P 1 - 15 %Z sequence number: 134 %I ACM %C New York, NY %@ false %B Proceedings of the SIGGRAPH Conference %O ACM SIGGRAPH 2024 Denver, CO, USA, July 28 - Aug 1
Jiménez Navarro, D., Peng, X., Zhang, Y., et al. 2024. Accelerating Saccadic Response through Spatial and Temporal Cross-Modal Misalignments. Proceedings SIGGRAPH 2024 Conference Papers, ACM.
Export
BibTeX
@inproceedings{Navarro_SIGGRAPH24, TITLE = {Accelerating Saccadic Response through Spatial and Temporal Cross-Modal Misalignments}, AUTHOR = {Jim{\'e}nez Navarro, Daniel and Peng, Xi and Zhang, Yunxiang and Myszkowski, Karol and Seidel, Hans-Peter and Sun, Qi and Serrano, Ana}, LANGUAGE = {eng}, ISBN = {979-8-4007-0525-0}, DOI = {10.1145/3641519.3657432}, PUBLISHER = {ACM}, YEAR = {2024}, MARGINALMARK = {$\bullet$}, DATE = {2024}, BOOKTITLE = {Proceedings SIGGRAPH 2024 Conference Papers}, EDITOR = {Burgano, Andres and Zorin, Denis and Jarosz, Wojciech}, PAGES = {1--12}, EID = {129}, ADDRESS = {Denver, CO, USA}, }
Endnote
%0 Conference Proceedings %A Jiménez Navarro, Daniel %A Peng, Xi %A Zhang, Yunxiang %A Myszkowski, Karol %A Seidel, Hans-Peter %A Sun, Qi %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Accelerating Saccadic Response through Spatial and Temporal Cross-Modal Misalignments : %G eng %U http://hdl.handle.net/21.11116/0000-000F-FC10-3 %R 10.1145/3641519.3657432 %D 2024 %B ACM SIGGRAPH Conference %Z date of event: 2024-07-28 - 2024-08-01 %C Denver, CO, USA %B Proceedings SIGGRAPH 2024 Conference Papers %E Burgano, Andres; Zorin, Denis; Jarosz, Wojciech %P 1 - 12 %Z sequence number: 129 %I ACM %@ 979-8-4007-0525-0
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2024. Enhancing Image Quality Prediction with Self-supervised Visual Masking. Computer Graphics Forum (Proc. EUROGRAPHICS 2024)43, 2.
Export
BibTeX
@article{cogalan_Eurographics24, TITLE = {Enhancing Image Quality Prediction with Self-supervised Visual Masking}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.15051}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2024}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {43}, NUMBER = {2}, PAGES = {1--12}, EID = {e15051}, BOOKTITLE = {45th Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2024)}, EDITOR = {Bermano, A. and Kaolgerakis, E.}, }
Endnote
%0 Journal Article %A Çoğalan, Uğur %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Enhancing Image Quality Prediction with Self-supervised Visual Masking : %G eng %U http://hdl.handle.net/21.11116/0000-000F-4DAC-A %R 10.1111/cgf.15051 %7 2024 %D 2024 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 43 %N 2 %& 1 %P 1 - 12 %Z sequence number: e15051 %I Blackwell-Wiley %C Oxford %@ false %B 45th Annual Conference of the European Association for Computer Graphics %O EUROGRAPHICS 2024 EG 2024 Limassol, Cyprus, April 22-26 %U https://onlinelibrary.wiley.com/doi/epdf/10.1111/cgf.15051
Bemana, M., Leimkühler, T., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2024. Exposure Diffusion: HDR Image Generation by Consistent LDR denoising. https://arxiv.org/abs/2405.14304.
(arXiv: 2405.14304)
Abstract
We demonstrate generating high-dynamic range (HDR) images using the concerted<br>action of multiple black-box, pre-trained low-dynamic range (LDR) image<br>diffusion models. Common diffusion models are not HDR as, first, there is no<br>sufficiently large HDR image dataset available to re-train them, and second,<br>even if it was, re-training such models is impossible for most compute budgets.<br>Instead, we seek inspiration from the HDR image capture literature that<br>traditionally fuses sets of LDR images, called "brackets", to produce a single<br>HDR image. We operate multiple denoising processes to generate multiple LDR<br>brackets that together form a valid HDR result. To this end, we introduce an<br>exposure consistency term into the diffusion process to couple the brackets<br>such that they agree across the exposure range they share. We demonstrate HDR<br>versions of state-of-the-art unconditional and conditional as well as<br>restoration-type (LDR2HDR) generative modeling.<br>
Export
BibTeX
@online{Bemana_2405.14304, TITLE = {Exposure Diffusion: {HDR} Image Generation by Consistent {LDR} denoising}, AUTHOR = {Bemana, Mojtaba and Leimk{\"u}hler, Thomas and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2405.14304}, EPRINT = {2405.14304}, EPRINTTYPE = {arXiv}, YEAR = {2024}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We demonstrate generating high-dynamic range (HDR) images using the concerted<br>action of multiple black-box, pre-trained low-dynamic range (LDR) image<br>diffusion models. Common diffusion models are not HDR as, first, there is no<br>sufficiently large HDR image dataset available to re-train them, and second,<br>even if it was, re-training such models is impossible for most compute budgets.<br>Instead, we seek inspiration from the HDR image capture literature that<br>traditionally fuses sets of LDR images, called "brackets", to produce a single<br>HDR image. We operate multiple denoising processes to generate multiple LDR<br>brackets that together form a valid HDR result. To this end, we introduce an<br>exposure consistency term into the diffusion process to couple the brackets<br>such that they agree across the exposure range they share. We demonstrate HDR<br>versions of state-of-the-art unconditional and conditional as well as<br>restoration-type (LDR2HDR) generative modeling.<br>}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Leimk&#252;hler, Thomas %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Exposure Diffusion: HDR Image Generation by Consistent LDR denoising : %G eng %U http://hdl.handle.net/21.11116/0000-0010-1074-9 %U https://arxiv.org/abs/2405.14304 %D 2024 %X We demonstrate generating high-dynamic range (HDR) images using the concerted<br>action of multiple black-box, pre-trained low-dynamic range (LDR) image<br>diffusion models. Common diffusion models are not HDR as, first, there is no<br>sufficiently large HDR image dataset available to re-train them, and second,<br>even if it was, re-training such models is impossible for most compute budgets.<br>Instead, we seek inspiration from the HDR image capture literature that<br>traditionally fuses sets of LDR images, called "brackets", to produce a single<br>HDR image. We operate multiple denoising processes to generate multiple LDR<br>brackets that together form a valid HDR result. To this end, we introduce an<br>exposure consistency term into the diffusion process to couple the brackets<br>such that they agree across the exposure range they share. We demonstrate HDR<br>versions of state-of-the-art unconditional and conditional as well as<br>restoration-type (LDR2HDR) generative modeling.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
2023
Weinrauch, A., Seidel, H.-P., Mlakar, D., Steinberger, M., and Zayer, R. 2023. A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces. Computer Graphics Forum42, 2.
Export
BibTeX
@article{Weinrauch_CGF23, TITLE = {A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and {Reeb} Graph Construction on Surfaces}, AUTHOR = {Weinrauch, Alexander and Seidel, Hans-Peter and Mlakar, Daniel and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14763}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum}, VOLUME = {42}, NUMBER = {2}, PAGES = {309--320}, }
Endnote
%0 Journal Article %A Weinrauch, Alexander %A Seidel, Hans-Peter %A Mlakar, Daniel %A Steinberger, Markus %A Zayer, Rhaleb %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-000C-B851-9 %R 10.1111/cgf.14763 %7 2023 %D 2023 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 42 %N 2 %& 309 %P 309 - 320 %I Blackwell-Wiley %C Oxford %@ false
Wang, C., Serrano, A., Pan, X., et al. 2023a. An Implicit Neural Representation for the Image Stack: Depth, All in Focus, and High Dynamic Range. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2023)42, 6.
Export
BibTeX
@article{Wang_SIGGRAPHASIA23, TITLE = {An Implicit Neural Representation for the Image Stack: {D}epth, All in Focus, and High Dynamic Range}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, Xingang and Wolski, Krzysztof and Chen, Bin and Myszkowski, Karol and Seidel, Hans-Peter and Theobalt, Christian and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3618367}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {42}, NUMBER = {6}, PAGES = {1--11}, EID = {221}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2023}, }
Endnote
%0 Journal Article %A Wang, Chao %A Serrano, Ana %A Pan, Xingang %A Wolski, Krzysztof %A Chen, Bin %A Myszkowski, Karol %A Seidel, Hans-Peter %A Theobalt, Christian %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T An Implicit Neural Representation for the Image Stack: Depth, All in Focus, and High Dynamic Range : %G eng %U http://hdl.handle.net/21.11116/0000-000D-B80B-8 %R 10.1145/3618367 %7 2023 %D 2023 %J ACM Transactions on Graphics %V 42 %N 6 %& 1 %P 1 - 11 %Z sequence number: 221 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2023 %O ACM SIGGRAPH Asia 2023 Sydney, Australia, 12-15 December 2023 SA '23 SA 2023
Wang, C., Serrano, A., Pan, X., et al. 2023b. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. IEEE/CVF International Conference on Computer Vision (ICCV 2023), IEEE.
Export
BibTeX
@inproceedings{wang2023glowgan, TITLE = {{GlowGAN}: {U}nsupervised Learning of {HDR} Images from {LDR} Images in the Wild}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, Xingang and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISBN = {979-8-3503-0718-4}, DOI = {10.1109/ICCV51070.2023.00964}, PUBLISHER = {IEEE}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {IEEE/CVF International Conference on Computer Vision (ICCV 2023)}, PAGES = {10475--10485}, ADDRESS = {Paris, France}, }
Endnote
%0 Conference Proceedings %A Wang, Chao %A Serrano, Ana %A Pan, Xingang %A Chen, Bin %A Seidel, Hans-Peter %A Theobalt, Christian %A Myszkowski, Karol %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild : %G eng %U http://hdl.handle.net/21.11116/0000-000D-B7FC-9 %R 10.1109/ICCV51070.2023.00964 %D 2023 %B IEEE/CVF International Conference on Computer Vision %Z date of event: 2023-10-01 - 2023-10-06 %C Paris, France %B IEEE/CVF International Conference on Computer Vision %P 10475 - 10485 %I IEEE %@ 979-8-3503-0718-4
Ruan, L., Bemana, M., Seidel, H.-P., Myszkowski, K., and Chen, B. 2023. Revisiting Image Deblurring with an Efficient ConvNet. https://arxiv.org/abs/2302.02234.
(arXiv: 2302.02234)
Abstract
Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>
Export
BibTeX
@online{ruan2023revisiting, TITLE = {Revisiting Image Deblurring with an Efficient {ConvNet}}, AUTHOR = {Ruan, Lingyan and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol and Chen, Bin}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2302.02234}, EPRINT = {2302.02234}, EPRINTTYPE = {arXiv}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>}, }
Endnote
%0 Report %A Ruan, Lingyan %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %A Chen, Bin %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Revisiting Image Deblurring with an Efficient ConvNet : %G eng %U http://hdl.handle.net/21.11116/0000-000C-C7B9-3 %U https://arxiv.org/abs/2302.02234 %D 2023 %X Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Nsampi, N.E., Djeacoumar, A., Seidel, H.-P., Ritschel, T., and Leimkühler, T. 2023. Neural Field Convolutions by Repeated Differentiation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2023)42, 6.
Export
BibTeX
@article{Nsampi_SIGGRAPHASIA23, TITLE = {Neural Field Convolutions by Repeated Differentiation}, AUTHOR = {Nsampi, Ntumba Elie and Djeacoumar, Adarsh and Seidel, Hans-Peter and Ritschel, Tobias and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3618340}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {42}, NUMBER = {6}, PAGES = {1--11}, EID = {206}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2023}, }
Endnote
%0 Journal Article %A Nsampi, Ntumba Elie %A Djeacoumar, Adarsh %A Seidel, Hans-Peter %A Ritschel, Tobias %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Neural Field Convolutions by Repeated Differentiation : %G eng %U http://hdl.handle.net/21.11116/0000-000E-691E-C %R 10.1145/3618340 %7 2023 %D 2023 %J ACM Transactions on Graphics %V 42 %N 6 %& 1 %P 1 - 11 %Z sequence number: 206 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2023 %O ACM SIGGRAPH Asia 2023 Sydney, Australia, 12-15 December 2023 SA '23 SA 2023
Liao, K., Tricard, T., Piovarči, M., Seidel, H.-P., and Babaei, V. 2023. Learning Deposition Policies for Fused Multi-Material 3D Printing. IEEE International Conference on Robotics and Automation (ICRA 2023), IEEE.
Export
BibTeX
@inproceedings{Liao_ICRA2023, TITLE = {Learning Deposition Policies for Fused Multi-Material {3D} Printing}, AUTHOR = {Liao, Kang and Tricard, Thibault and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISBN = {979-8-3503-2365-8}, DOI = {10.1109/ICRA48891.2023.10160465}, PUBLISHER = {IEEE}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE International Conference on Robotics and Automation (ICRA 2023)}, PAGES = {12345--12352}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Liao, Kang %A Tricard, Thibault %A Piovar&#269;i, Michal %A Seidel, Hans-Peter %A Babaei, Vahid %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Deposition Policies for Fused Multi-Material 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-000C-44C2-C %R 10.1109/ICRA48891.2023.10160465 %D 2023 %B IEEE International Conference on Robotics and Automation %Z date of event: 2023-05-29 - 2023-06-02 %C London, UK %B IEEE International Conference on Robotics and Automation %P 12345 - 12352 %I IEEE %@ 979-8-3503-2365-8
Huang, X., Ritschel, T., Seidel, H.-P., Memari, P., and Singh, G. 2023. Patternshop: Editing Point Patterns by Image Manipulation. ACM Transactions on Graphics42, 4.
Export
BibTeX
@article{Huang2023, TITLE = {Patternshop: {E}diting Point Patterns by Image Manipulation}, AUTHOR = {Huang, Xingchang and Ritschel, Tobias and Seidel, Hans-Peter and Memari, Pooran and Singh, Gurprit}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3592418}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {42}, NUMBER = {4}, PAGES = {1--14}, EID = {53}, }
Endnote
%0 Journal Article %A Huang, Xingchang %A Ritschel, Tobias %A Seidel, Hans-Peter %A Memari, Pooran %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Patternshop: Editing Point Patterns by Image Manipulation : %G eng %U http://hdl.handle.net/21.11116/0000-000D-FE1D-6 %R 10.1145/3592418 %7 2023 %D 2023 %J ACM Transactions on Graphics %V 42 %N 4 %& 1 %P 1 - 14 %Z sequence number: 53 %I ACM %C New York, NY %@ false
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2023. Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors. Computer Graphics Forum (Proc. EUROGRAPHICS 2023)42, 2.
Export
BibTeX
@article{Cogalan_Eurographics23, TITLE = {Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14748}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {42}, NUMBER = {2}, PAGES = {119--131}, BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2023)}, }
Endnote
%0 Journal Article %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors : %G eng %U http://hdl.handle.net/21.11116/0000-000C-F953-E %R 10.1111/cgf.14748 %7 2023 %D 2023 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 42 %N 2 %& 119 %P 119 - 131 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 43rdAnnual Conference %O EUROGRAPHICS 2023 EG 2023 Saarbr&#252;cken, Germany, May 8-12, 2023
Chen, B., Jindal, A., Piovarci, M., et al. 2023. The Effect of Display Capabilities on the Gloss Consistency between Real and Virtual Objects. SA ’23: SIGGRAPH Asia 2023 Conference Papers, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggrapha/ChenJPWSDMSM23, TITLE = {The Effect of Display Capabilities on the Gloss Consistency between Real and Virtual Objects}, AUTHOR = {Chen, Bin and Jindal, Akshay and Piovarci, Michal and Wang, Chao and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana and Mantiuk, Rafal K.}, LANGUAGE = {eng}, ISBN = {979-8-4007-0315-7}, DOI = {10.1145/3610548.3618226}, PUBLISHER = {ACM}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {SA '23: SIGGRAPH Asia 2023 Conference Papers}, EDITOR = {Kim, June and Lin, Ming C. and Bickel, Bernd}, PAGES = {1--11}, EID = {90}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A Chen, Bin %A Jindal, Akshay %A Piovarci, Michal %A Wang, Chao %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %A Serrano, Ana %A Mantiuk, Rafal K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T The Effect of Display Capabilities on the Gloss Consistency between Real and Virtual Objects : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7BC2-C %R 10.1145/3610548.3618226 %D 2023 %B SIGGRAPH Asia 2023 Conference %Z date of event: 2023-12-12 - 2023-12-15 %C Sydney, Australia %B SA '23: SIGGRAPH Asia 2023 Conference Papers %E Kim, June; Lin, Ming C.; Bickel, Bernd %P 1 - 11 %Z sequence number: 90 %I ACM %@ 979-8-4007-0315-7 %U https://dl.acm.org/doi/pdf/10.1145/3610548.3618226
Bálint, M., Myszkowski, K., Seidel, H.-P., and Singh, G. 2023a. Joint Sampling and Optimisation for Inverse Rendering. SA ’23: SIGGRAPH Asia 2023 Conference Papers, ACM.
Export
BibTeX
@inproceedings{DBLP:journals/corr/abs-2309-15676, TITLE = {Joint Sampling and Optimisation for Inverse Rendering}, AUTHOR = {B{\'a}lint, Martin and Myszkowski, Karol and Seidel, Hans-Peter and Singh, Gurprit}, LANGUAGE = {eng}, ISBN = {979-8-4007-0315-7}, DOI = {10.1145/3610548.3618244}, PUBLISHER = {ACM}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {SA '23: SIGGRAPH Asia 2023 Conference Papers}, PAGES = {1--10}, EID = {29}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A B&#225;lint, Martin %A Myszkowski, Karol %A Seidel, Hans-Peter %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Joint Sampling and Optimisation for Inverse Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7B9B-9 %R 10.1145/3610548.3618244 %D 2023 %B SIGGRAPH Asia 2023 Conference %Z date of event: 2023-12-12 - 2023-12-15 %C Sydney, Australia %B SA '23: SIGGRAPH Asia 2023 Conference Papers %P 1 - 10 %Z sequence number: 29 %I ACM %@ 979-8-4007-0315-7 %U https://dl.acm.org/doi/pdf/10.1145/3610548.3618244
Bálint, M., Wolski, K., Myszkowski, K., Seidel, H.-P., and Mantiuk, R. 2023b. Neural Partitioning Pyramids for Denoising Monte Carlo Renderings. Proceedings SIGGRAPH 2023 Conference Papers, ACM.
Export
BibTeX
@inproceedings{Balint_SIGGRAPH23, TITLE = {Neural Partitioning Pyramids for Denoising {Monte Carlo} Renderings}, AUTHOR = {B{\'a}lint, Martin and Wolski, Krzysztof and Myszkowski, Karol and Seidel, Hans-Peter and Mantiuk, Rafa{\l}}, LANGUAGE = {eng}, ISBN = {979-8-4007-0159-7}, DOI = {10.1145/3588432.3591562}, PUBLISHER = {ACM}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {Proceedings SIGGRAPH 2023 Conference Papers}, EDITOR = {Brunvand, Erik and Sheffer, Alla and Wimmer, Michael}, PAGES = {1--11}, EID = {60}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A B&#225;lint, Martin %A Wolski, Krzysztof %A Myszkowski, Karol %A Seidel, Hans-Peter %A Mantiuk, Rafa&#322; %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Neural Partitioning Pyramids for Denoising Monte Carlo Renderings : %G eng %U http://hdl.handle.net/21.11116/0000-000E-3740-C %R 10.1145/3588432.3591562 %D 2023 %B ACM SIGGRAPH Conference %Z date of event: 2023-08-06 - 2023-08-10 %C Los Angeles, CA, USA %B Proceedings SIGGRAPH 2023 Conference Papers %E Brunvand, Erik; Sheffer, Alla; Wimmer, Michael %P 1 - 11 %Z sequence number: 60 %I ACM %@ 979-8-4007-0159-7
Arabadzhiyska, E., Tursun, C., Seidel, H.-P., and Didyk, P. 2023. Practical Saccade Prediction for Head-mounted Displays: Towards a Comprehensive Model. ACM Transactions on Applied Perception20, 1.
Export
BibTeX
@article{Arabadzhiyska23, TITLE = {Practical Saccade Prediction for Head-Mounted Displays: {T}owards a Comprehensive Model}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Cara and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/3568311}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {20}, NUMBER = {1}, PAGES = {1--23}, EID = {2}, }
Endnote
%0 Journal Article %A Arabadzhiyska, Elena %A Tursun, Cara %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Practical Saccade Prediction for Head-mounted Displays: Towards a Comprehensive Model : %G eng %U http://hdl.handle.net/21.11116/0000-000C-B76B-E %R 10.1145/3568311 %7 2023 %D 2023 %J ACM Transactions on Applied Perception %V 20 %N 1 %& 1 %P 1 - 23 %Z sequence number: 2 %I ACM %C New York, NY %@ false %U https://dl.acm.org/doi/pdf/10.1145/3568311
Ansari, N., Seidel, H.-P., and Babaei, V. 2023. Large-batch, Iteration-efficient Neural Bayesian Design Optimization. https://arxiv.org/abs/2306.01095.
(arXiv: 2306.01095)
Abstract
Bayesian optimization (BO) provides a powerful framework for optimizing<br>black-box, expensive-to-evaluate functions. It is therefore an attractive tool<br>for engineering design problems, typically involving multiple objectives.<br>Thanks to the rapid advances in fabrication and measurement methods as well as<br>parallel computing infrastructure, querying many design problems can be heavily<br>parallelized. This class of problems challenges BO with an unprecedented setup<br>where it has to deal with very large batches, shifting its focus from sample<br>efficiency to iteration efficiency. We present a novel Bayesian optimization<br>framework specifically tailored to address these limitations. Our key<br>contribution is a highly scalable, sample-based acquisition function that<br>performs a non-dominated sorting of not only the objectives but also their<br>associated uncertainty. We show that our acquisition function in combination<br>with different Bayesian neural network surrogates is effective in<br>data-intensive environments with a minimal number of iterations. We demonstrate<br>the superiority of our method by comparing it with state-of-the-art<br>multi-objective optimizations. We perform our evaluation on two real-world<br>problems -- airfoil design and 3D printing -- showcasing the applicability and<br>efficiency of our approach. Our code is available at:<br>https://github.com/an-on-ym-ous/lbn_mobo<br>
Export
BibTeX
@online{Ansari-et-al_2023, TITLE = {Large-batch, Iteration-efficient Neural Bayesian Design Optimization}, AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2306.01095}, DOI = {10.48550/arXiv.2306.01095}, EPRINT = {2306.01095}, EPRINTTYPE = {arXiv}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, ABSTRACT = {Bayesian optimization (BO) provides a powerful framework for optimizing<br>black-box, expensive-to-evaluate functions. It is therefore an attractive tool<br>for engineering design problems, typically involving multiple objectives.<br>Thanks to the rapid advances in fabrication and measurement methods as well as<br>parallel computing infrastructure, querying many design problems can be heavily<br>parallelized. This class of problems challenges BO with an unprecedented setup<br>where it has to deal with very large batches, shifting its focus from sample<br>efficiency to iteration efficiency. We present a novel Bayesian optimization<br>framework specifically tailored to address these limitations. Our key<br>contribution is a highly scalable, sample-based acquisition function that<br>performs a non-dominated sorting of not only the objectives but also their<br>associated uncertainty. We show that our acquisition function in combination<br>with different Bayesian neural network surrogates is effective in<br>data-intensive environments with a minimal number of iterations. We demonstrate<br>the superiority of our method by comparing it with state-of-the-art<br>multi-objective optimizations. We perform our evaluation on two real-world<br>problems -- airfoil design and 3D printing -- showcasing the applicability and<br>efficiency of our approach. Our code is available at:<br>https://github.com/an-on-ym-ous/lbn_mobo<br>}, }
Endnote
%0 Report %A Ansari, Navid %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Large-batch, Iteration-efficient Neural Bayesian Design Optimization : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7BB3-D %R 10.48550/arXiv.2306.01095 %U https://arxiv.org/abs/2306.01095 %D 2023 %X Bayesian optimization (BO) provides a powerful framework for optimizing<br>black-box, expensive-to-evaluate functions. It is therefore an attractive tool<br>for engineering design problems, typically involving multiple objectives.<br>Thanks to the rapid advances in fabrication and measurement methods as well as<br>parallel computing infrastructure, querying many design problems can be heavily<br>parallelized. This class of problems challenges BO with an unprecedented setup<br>where it has to deal with very large batches, shifting its focus from sample<br>efficiency to iteration efficiency. We present a novel Bayesian optimization<br>framework specifically tailored to address these limitations. Our key<br>contribution is a highly scalable, sample-based acquisition function that<br>performs a non-dominated sorting of not only the objectives but also their<br>associated uncertainty. We show that our acquisition function in combination<br>with different Bayesian neural network surrogates is effective in<br>data-intensive environments with a minimal number of iterations. We demonstrate<br>the superiority of our method by comparing it with state-of-the-art<br>multi-objective optimizations. We perform our evaluation on two real-world<br>problems -- airfoil design and 3D printing -- showcasing the applicability and<br>efficiency of our approach. Our code is available at:<br>https://github.com/an-on-ym-ous/lbn_mobo<br> %K Computer Science, Learning, cs.LG,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Computational Engineering, Finance, and Science, cs.CE
2022
Wang, C., Serrano, A., Pan, X., et al. 2022a. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. https://arxiv.org/abs/2211.12352.
(arXiv: 2211.12352)
Abstract
Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>
Export
BibTeX
@online{Wang2211.12352, TITLE = {{GlowGAN}: Unsupervised Learning of {HDR} Images from {LDR} Images in the Wild}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, X. and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2211.12352}, EPRINT = {2211.12352}, EPRINTTYPE = {arXiv}, YEAR = {2022}, ABSTRACT = {Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>}, }
Endnote
%0 Report %A Wang, Chao %A Serrano, Ana %A Pan, X. %A Chen, Bin %A Seidel, Hans-Peter %A Theobalt, Christian %A Myszkowski, Karol %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild : %G eng %U http://hdl.handle.net/21.11116/0000-000B-9D08-C %U https://arxiv.org/abs/2211.12352 %D 2022 %X Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2022b. Learning a self-supervised tone mapping operator via feature contrast masking loss. Computer Graphics Forum (Proc. EUROGRAPHICS 2022)41, 2.
Export
BibTeX
@article{Wang2022, TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss}, AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14459}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2022}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {41}, NUMBER = {2}, PAGES = {71--84}, BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2022)}, EDITOR = {Caine, Rapha{\"e}lle and Kim, Min H.}, }
Endnote
%0 Journal Article %A Wang, Chao %A Chen, Bin %A Seidel, Hans-Peter %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning a self-supervised tone mapping operator via feature contrast masking loss : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA09-B %R 10.1111/cgf.14459 %7 2022 %D 2022 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 41 %N 2 %& 71 %P 71 - 84 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 43rdAnnual Conference %O EUROGRAPHICS 2022 EG 2022 Reims, France, April 25 - 29, 2022
Salaün, C., Georgiev, I., Seidel, H.-P., and Singh, G. 2022. Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{SalauenSIGGRAPHAsia22, TITLE = {Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport}, AUTHOR = {Sala{\"u}n, Corentin and Georgiev, Iliyan and Seidel, Hans-Peter and Singh, Gurprit}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3550454.3555484}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {41}, NUMBER = {6}, PAGES = {1--14}, EID = {261}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022}, }
Endnote
%0 Journal Article %A Sala&#252;n, Corentin %A Georgiev, Iliyan %A Seidel, Hans-Peter %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport : %G eng %U http://hdl.handle.net/21.11116/0000-000C-1716-2 %R 10.1145/3550454.3555484 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 6 %& 1 %P 1 - 14 %Z sequence number: 261 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2022 %O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Huang, X., Memari, P., Seidel, H.-P., and Singh, G. 2022. Point-Pattern Synthesis using Gabor and Random Filters. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2022)41, 4.
Export
BibTeX
@article{Huang_EGSR2022, TITLE = {Point-Pattern Synthesis using {Gabor} and Random Filters}, AUTHOR = {Huang, Xingchang and Memari, Pooran and Seidel, Hans-Peter and Singh, Gurprit}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14596}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2022}, DATE = {2022}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {41}, NUMBER = {4}, PAGES = {169--179}, BOOKTITLE = {Eurographics Symposium on Rendering 2022}, EDITOR = {Ghosh, Abhijeet and Wei, Li-Yi and Wilkie, Alexander}, }
Endnote
%0 Journal Article %A Huang, Xingchang %A Memari, Pooran %A Seidel, Hans-Peter %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Point-Pattern Synthesis using Gabor and Random Filters : %G eng %U http://hdl.handle.net/21.11116/0000-000C-1675-8 %R 10.1111/cgf.14596 %7 2022 %D 2022 %J Computer Graphics Forum %V 41 %N 4 %& 169 %P 169 - 179 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2022 %O Eurographics Symposium on Rendering 2022 EGSR 2022 Prague, Czech Republic & Virtual ; 4 - 6 July 2022 %U https://onlinelibrary.wiley.com/share/X44DPUPXHCYNCUKSEBEE?target=10.1111/cgf.14596
Hladký, J., Stengel, M., Vining, N., Kerbl, B., Seidel, H.-P., and Steinberger, M. 2022. QuadStream: A Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{HladkySIGGRAPHAsia22, TITLE = {QuadStream: {A} Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction}, AUTHOR = {Hladk{\'y}, Jozef and Stengel, Michael and Vining, Nicholas and Kerbl, Bernhard and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3550454.3555524}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {41}, NUMBER = {6}, PAGES = {1--13}, EID = {233}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022}, }
Endnote
%0 Journal Article %A Hladk&#253;, Jozef %A Stengel, Michael %A Vining, Nicholas %A Kerbl, Bernhard %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T QuadStream: A Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction : %G eng %U http://hdl.handle.net/21.11116/0000-000C-208B-3 %R 10.1145/3550454.3555524 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 6 %& 1 %P 1 - 13 %Z sequence number: 233 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2022 %O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2022a. Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures. Computers and Graphics105.
Export
BibTeX
@article{Cogalan2022, TITLE = {Learning {HDR} Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2022.04.008}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2022}, JOURNAL = {Computers and Graphics}, VOLUME = {105}, PAGES = {57--72}, }
Endnote
%0 Journal Article %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures : %G eng %U http://hdl.handle.net/21.11116/0000-000A-9D95-D %R 10.1016/j.cag.2022.04.008 %7 2022 %D 2022 %J Computers and Graphics %V 105 %& 57 %P 57 - 72 %I Elsevier %C Amsterdam %@ false
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2022b. Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors. https://arxiv.org/abs/2206.09485.
(arXiv: 2206.09485)
Abstract
Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>
Export
BibTeX
@online{Cogalan2206.09485, TITLE = {Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2206.09485}, EPRINT = {2206.09485}, EPRINTTYPE = {arXiv}, YEAR = {2022}, ABSTRACT = {Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>}, }
Endnote
%0 Report %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors : %G eng %U http://hdl.handle.net/21.11116/0000-000C-16E8-6 %U https://arxiv.org/abs/2206.09485 %D 2022 %X Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Chu, M., Liu, L., Zheng, Q., et al. 2022. Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data. ACM Transactions on Graphics41, 4.
Export
BibTeX
@article{Chu2022, TITLE = {Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data}, AUTHOR = {Chu, Mengyu and Liu, Lingjie and Zheng, Quan and Franz, Erik and Seidel, Hans-Peter and Theobalt, Christian and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3528223.3530169}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {41}, NUMBER = {4}, PAGES = {1--14}, EID = {119}, }
Endnote
%0 Journal Article %A Chu, Mengyu %A Liu, Lingjie %A Zheng, Quan %A Franz, Erik %A Seidel, Hans-Peter %A Theobalt, Christian %A Zayer, Rhaleb %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data : %G eng %U http://hdl.handle.net/21.11116/0000-000B-6561-6 %R 10.1145/3528223.3530169 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 4 %& 1 %P 1 - 14 %Z sequence number: 119 %I ACM %C New York, NY %@ false %U https://people.mpi-inf.mpg.de/~mchu/projects/PI-NeRF/
Chen, B., Piovarči, M., Wang, C., et al. 2022. Gloss Management for Consistent Reproduction of Real and Virtual Objects. Proceedings SIGGRAPH Asia 2022 (ACM SIGGRAPH Asia 2022), ACM.
Export
BibTeX
@inproceedings{ChenSA22, TITLE = {Gloss Management for Consistent Reproduction of Real and Virtual Objects}, AUTHOR = {Chen, Bin and Piovar{\v c}i, Michal and Wang, Chao and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISBN = {978-1-4503-9470-3}, DOI = {10.1145/3550469.3555406}, PUBLISHER = {ACM}, YEAR = {2022}, BOOKTITLE = {Proceedings SIGGRAPH Asia 2022 (ACM SIGGRAPH Asia 2022)}, EDITOR = {Jung, Soon Ki and Lee, Jehee and Bargteil, Adam}, PAGES = {1--9}, EID = {35}, ADDRESS = {Daegu, Republic of Korea}, }
Endnote
%0 Conference Proceedings %A Chen, Bin %A Piovar&#269;i, Michal %A Wang, Chao %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Gloss Management for Consistent Reproduction of Real and Virtual Objects : %G eng %U http://hdl.handle.net/21.11116/0000-000C-167F-E %R 10.1145/3550469.3555406 %D 2022 %B SIGGRAPH Asia 2022 %Z date of event: 2022-12-06 - 2022-12-09 %C Daegu, Republic of Korea %B Proceedings SIGGRAPH Asia 2022 %E Jung, Soon Ki; Lee, Jehee; Bargteil, Adam %P 1 - 9 %Z sequence number: 35 %I ACM %@ 978-1-4503-9470-3
Bemana, M., Myszkowski, K., Frisvad, J.R., Seidel, H.-P., and Ritschel, T. 2022. Eikonal Fields for Refractive Novel-View Synthesis. Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022), ACM.
Export
BibTeX
@inproceedings{Bemana_SIGGRAPH22, TITLE = {Eikonal Fields for Refractive Novel-View Synthesis}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Frisvad, Jeppe Revall and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISBN = {978-1-4503-9337-9}, DOI = {10.1145/3528233.3530706}, PUBLISHER = {ACM}, YEAR = {2022}, BOOKTITLE = {Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022)}, EDITOR = {Nandigjav, Munkhtsetseg and Mitra, Niloy J. and Hertzmann, Aaron}, PAGES = {1--9}, EID = {39}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Bemana, Mojtaba %A Myszkowski, Karol %A Frisvad, Jeppe Revall %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Eikonal Fields for Refractive Novel-View Synthesis : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA61-7 %R 10.1145/3528233.3530706 %D 2022 %B ACM SIGGRAPH %Z date of event: 2022-08-07 - 2022-08-11 %C Vancouver, Canada %B Proceedings SIGGRAPH 2022 Conference Papers Proceedings %E Nandigjav, Munkhtsetseg; Mitra, Niloy J.; Hertzmann, Aaron %P 1 - 9 %Z sequence number: 39 %I ACM %@ 978-1-4503-9337-9
Arabadzhiyska, E., Tursun, C., Seidel, H.-P., and Didyk, P. 2022. Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model. https://arxiv.org/abs/2205.01624.
(arXiv: 2205.01624)
Abstract
Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br>
Export
BibTeX
@online{Arabadzhiyska2205.01624, TITLE = {Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Cara and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2205.01624}, EPRINT = {2205.01624}, EPRINTTYPE = {arXiv}, YEAR = {2022}, ABSTRACT = {Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br>}, }
Endnote
%0 Report %A Arabadzhiyska, Elena %A Tursun, Cara %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model : %G eng %U http://hdl.handle.net/21.11116/0000-000C-16E3-B %U https://arxiv.org/abs/2205.01624 %D 2022 %X Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br> %K Computer Science, Human-Computer Interaction, cs.HC,Computer Science, Graphics, cs.GR
Ansari, N., Seidel, H.-P., and Babaei, V. 2022a. Mixed Integer Neural Inverse Design. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2022)41, 4.
Export
BibTeX
@article{Ansari22, TITLE = {Mixed Integer Neural Inverse Design}, AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3528223.3530083}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {41}, NUMBER = {4}, PAGES = {1--14}, EID = {151}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2022}, }
Endnote
%0 Journal Article %A Ansari, Navid %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed Integer Neural Inverse Design : %G eng %U http://hdl.handle.net/21.11116/0000-000C-1678-5 %R 10.1145/3528223.3530083 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 4 %& 1 %P 1 - 14 %Z sequence number: 151 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2022 %O ACM SIGGRAPH 2022
Ansari, N., Seidel, H.-P., Vahidi Ferdowsi, N., and Babaei, V. 2022b. Autoinverse: Uncertainty Aware Inversion of Neural Networks. Advances in Neural Information Processing Systems 35 (NeurIPS 2022), Curran Associates, Inc.
Export
BibTeX
@inproceedings{Ansari_Neurips22, TITLE = {Autoinverse: {U}ncertainty Aware Inversion of Neural Networks}, AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Vahidi Ferdowsi, Nima and Babaei, Vahid}, LANGUAGE = {eng}, PUBLISHER = {Curran Associates, Inc}, YEAR = {2022}, BOOKTITLE = {Advances in Neural Information Processing Systems 35 (NeurIPS 2022)}, EDITOR = {Koyejo, S. and Mohamed, S. and Agarwal, A. and Belgrave, D. and Cho, K. and Oh, A.}, PAGES = {8675--8686}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Ansari, Navid %A Seidel, Hans-Peter %A Vahidi Ferdowsi, Nima %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Autoinverse: Uncertainty Aware Inversion of Neural Networks : %G eng %U http://hdl.handle.net/21.11116/0000-000C-16F6-6 %D 2022 %B 36th Conference on Neural Information Processing Systems %Z date of event: 2022-11-28 - 2022-12-09 %C New Orleans, LA, USA %B Advances in Neural Information Processing Systems 35 %E Koyejo, S.; Mohamed, S.; Agarwal, A.; Belgrave, D.; Cho, K.; Oh, A. %P 8675 - 8686 %I Curran Associates, Inc %U https://openreview.net/pdf?id=dNyCj1AbOb
2021
Zheng, Q., Singh, G., and Seidel, H.-P. 2021. Neural Relightable Participating Media Rendering. Advances in Neural Information Processing Systems 34 (NeurIPS 2021), Curran Associates, Inc.
Export
BibTeX
@inproceedings{Zheng_Neurips2021, TITLE = {Neural Relightable Participating Media Rendering}, AUTHOR = {Zheng, Quan and Singh, Gurprit and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9781713845393}, PUBLISHER = {Curran Associates, Inc.}, YEAR = {2021}, BOOKTITLE = {Advances in Neural Information Processing Systems 34 (NeurIPS 2021)}, EDITOR = {Ranzato, M. and Beygelzimer, A. and Liang, P. S. and Vaughan, J. W. and Dauphin, Y.}, PAGES = {15203--15215}, ADDRESS = {Virtual}, }
Endnote
%0 Conference Proceedings %A Zheng, Quan %A Singh, Gurprit %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Relightable Participating Media Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0009-7117-E %D 2021 %B 35th Conference on Neural Information Processing Systems %Z date of event: 2021-12-06 - 2021-12-14 %C Virtual %B Advances in Neural Information Processing Systems 34 %E Ranzato, M.; Beygelzimer, A.; Liang, P. S.; Vaughan, J. W.; Dauphin, Y. %P 15203 - 15215 %I Curran Associates, Inc. %@ 9781713845393
Yenamandra, T., Tewari, A., Bernard, F., et al. 2021. i3DMM: Deep Implicit 3D Morphable Model of Human Heads. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Yenamandra_CVPR2021, TITLE = {{i3DMM}: {D}eep Implicit {3D} Morphable Model of Human Heads}, AUTHOR = {Yenamandra, Tarun and Tewari, Ayush and Bernard, Florian and Seidel, Hans-Peter and Elgharib, Mohamed and Cremers, Daniel and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-4509-2}, DOI = {10.1109/CVPR46437.2021.01261}, PUBLISHER = {IEEE}, YEAR = {2021}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)}, PAGES = {12803--12813}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Yenamandra, Tarun %A Tewari, Ayush %A Bernard, Florian %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Cremers, Daniel %A Theobalt, Christian %+ External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T i3DMM: Deep Implicit 3D Morphable Model of Human Heads : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8966-B %R 10.1109/CVPR46437.2021.01261 %D 2021 %B 34th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2021-06-19 - 2021-06-25 %C Virtual Conference %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 12803 - 12813 %I IEEE %@ 978-1-6654-4509-2 %U https://gvv.mpi-inf.mpg.de/projects/i3DMM/
Weinrauch, A., Seidel, H.-P., Mlakar, D., Steinberger, M., and Zayer, R. 2021. A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces. https://arxiv.org/abs/2105.13168.
(arXiv: 2105.13168)
Abstract
The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br>
Export
BibTeX
@online{Weinrauch_2105.13168, TITLE = {A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and {Reeb} Graph Construction on Surfaces}, AUTHOR = {Weinrauch, Alexander and Seidel, Hans-Peter and Mlakar, Daniel and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2105.13168}, EPRINT = {2105.13168}, EPRINTTYPE = {arXiv}, YEAR = {2021}, ABSTRACT = {The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br>}, }
Endnote
%0 Report %A Weinrauch, Alexander %A Seidel, Hans-Peter %A Mlakar, Daniel %A Steinberger, Markus %A Zayer, Rhaleb %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0009-70EE-D %U https://arxiv.org/abs/2105.13168 %D 2021 %X The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Computational Geometry, cs.CG,Mathematics, Algebraic Topology, math.AT
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2021. Learning a self-supervised tone mapping operator via feature contrast masking loss. https://arxiv.org/abs/2110.09866.
(arXiv: 2110.09866)
Abstract
High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>
Export
BibTeX
@online{Wang_2110.09866, TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss}, AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2110.09866}, EPRINT = {2110.09866}, EPRINTTYPE = {arXiv}, YEAR = {2021}, ABSTRACT = {High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>}, }
Endnote
%0 Report %A Wang, Chao %A Chen, Bin %A Seidel, Hans-Peter %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning a self-supervised tone mapping operator via feature contrast masking loss : %G eng %U http://hdl.handle.net/21.11116/0000-0009-710E-9 %U https://arxiv.org/abs/2110.09866 %D 2021 %X High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
Serrano, A., Chen, B., Wang, C., et al. 2021. The Effect of Shape and Illumination on Material Perception: Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{SIGG2021_Materials, TITLE = {The Effect of Shape and Illumination on Material Perception: Model and Applications}, AUTHOR = {Serrano, Ana and Chen, Bin and Wang, Chao and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3450626.3459813}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2021}, DATE = {2021}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {40}, NUMBER = {4}, PAGES = {1--16}, EID = {125}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Chen, Bin %A Wang, Chao %A Piovar&#269;i, Michal %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T The Effect of Shape and Illumination on Material Perception: Model and Applications : Model and Applications %G eng %U http://hdl.handle.net/21.11116/0000-0009-0565-0 %R 10.1145/3450626.3459813 %7 2021 %D 2021 %J ACM Transactions on Graphics %V 40 %N 4 %& 1 %P 1 - 16 %Z sequence number: 125 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2021 %O ACM SIGGRAPH 2021
Rudnev, V., Golyanik, V., Wang, J., et al. 2021. EventHands: Real-Time Neural 3D Hand Pose Estimation from an Event Stream. ICCV 2021, IEEE.
Export
BibTeX
@inproceedings{Rudnev_2021_ICCV, TITLE = {{EventHands}: {R}eal-Time Neural {3D} Hand Pose Estimation from an Event Stream}, AUTHOR = {Rudnev, Viktor and Golyanik, Vladislav and Wang, Jiayi and Seidel, Hans-Peter and Mueller, Franziska and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-2812-5}, DOI = {10.1109/ICCV48922.2021.01216}, PUBLISHER = {IEEE}, YEAR = {2021}, BOOKTITLE = {ICCV 2021}, PAGES = {12365--12375}, ADDRESS = {Montreal, QC, Canada}, }
Endnote
%0 Conference Proceedings %A Rudnev, Viktor %A Golyanik, Vladislav %A Wang, Jiayi %A Seidel, Hans-Peter %A Mueller, Franziska %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T EventHands: Real-Time Neural 3D Hand Pose Estimation from an Event Stream : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B709-1 %R 10.1109/ICCV48922.2021.01216 %D 2021 %B IEEE/CVF International Conference on Computer Vision %Z date of event: 2021-10-10 - 2021-10-17 %C Montreal, QC, Canada %B ICCV 2021 %P 12365 - 12375 %I IEEE %@ 978-1-6654-2812-5
Nehvi, J., Golyanik, V., Mueller, F., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2021. Differentiable Event Stream Simulator for Non-Rigid 3D Tracking. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Nehvi_CVPR2021Workshop, TITLE = {Differentiable Event Stream Simulator for Non-Rigid {3D} Tracking}, AUTHOR = {Nehvi, Jalees and Golyanik, Vladislav and Mueller, Franziska and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-4899-4}, DOI = {10.1109/CVPRW53098.2021.00143}, PUBLISHER = {IEEE}, YEAR = {2021}, BOOKTITLE = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2021)}, PAGES = {1302--1311}, ADDRESS = {Nashville, TN, USA}, }
Endnote
%0 Conference Proceedings %A Nehvi, Jalees %A Golyanik, Vladislav %A Mueller, Franziska %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Differentiable Event Stream Simulator for Non-Rigid 3D Tracking : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8957-C %R 10.1109/CVPRW53098.2021.00143 %D 2021 %B 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops %Z date of event: 2021-06-19 - 2021-06-25 %C Nashville, TN, USA %B Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops %P 1302 - 1311 %I IEEE %@ 978-1-6654-4899-4 %U https://gvv.mpi-inf.mpg.de/projects/Event-based_Non-rigid_3D_Tracking/
Mallikarjun B R, Tewari, A., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2021a. Learning Complete 3D Morphable Face Models from Images and Videos. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Mallikarjun_CVPR2021b, TITLE = {Learning Complete {3D} Morphable Face Models from Images and Videos}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-4509-2}, DOI = {10.1109/CVPR46437.2021.00337}, PUBLISHER = {IEEE}, YEAR = {2021}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)}, PAGES = {3361--3371}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Mallikarjun B R, %A Tewari, Ayush %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Learning Complete 3D Morphable Face Models from Images and Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8926-3 %R 10.1109/CVPR46437.2021.00337 %D 2021 %B 34th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2021-06-19 - 2021-06-25 %C Virtual Conference %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 3361 - 3371 %I IEEE %@ 978-1-6654-4509-2 %U https://gvv.mpi-inf.mpg.de/projects/LeMoMo/
Mallikarjun B R, Tewari, A., Oh, T.-H., et al. 2021b. Monocular Reconstruction of Neural Face Reflectance Fields. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Mallikarjun_CVPR2021, TITLE = {Monocular Reconstruction of Neural Face Reflectance Fields}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Oh, Tae-Hyun and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-6654-4509-2}, DOI = {10.1109/CVPR46437.2021.00476}, PUBLISHER = {IEEE}, YEAR = {2021}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)}, PAGES = {4791--4800}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Mallikarjun B R, %A Tewari, Ayush %A Oh, Tae-Hyun %A Weyrich, Tim %A Bickel, Bernd %A Seidel, Hans-Peter %A Pfister, Hanspeter %A Matusik, Wojciech %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Monocular Reconstruction of Neural Face Reflectance Fields : %G eng %U http://hdl.handle.net/21.11116/0000-0008-88FB-4 %R 10.1109/CVPR46437.2021.00476 %D 2021 %B 34th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2021-06-19 - 2021-06-25 %C Virtual Conference %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 4791 - 4800 %I IEEE %@ 978-1-6654-4509-2 %U https://gvv.mpi-inf.mpg.de/projects/FaceReflectanceFields/
Mallikarjun B R, Tewari, A., Dib, A., et al. 2021c. PhotoApp: Photorealistic Appearance Editing of Head Portraits. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{MallikarjunBR2021, TITLE = {{PhotoApp}: {P}hotorealistic Appearance Editing of Head Portraits}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Dib, Abdallah and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Chevallier, Louis and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3450626.3459765}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2021}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {40}, NUMBER = {4}, PAGES = {1--16}, EID = {44}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021}, }
Endnote
%0 Journal Article %A Mallikarjun B R, %A Tewari, Ayush %A Dib, Abdallah %A Weyrich, Tim %A Bickel, Bernd %A Seidel, Hans-Peter %A Pfister, Hanspeter %A Matusik, Wojciech %A Chevallier, Louis %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T PhotoApp: Photorealistic Appearance Editing of Head Portraits : %G eng %U http://hdl.handle.net/21.11116/0000-0009-2A9B-A %R 10.1145/3450626.3459765 %7 2021 %D 2021 %J ACM Transactions on Graphics %V 40 %N 4 %& 1 %P 1 - 16 %Z sequence number: 44 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2021 %O ACM SIGGRAPH 2021
Kappel, M., Golyanik, V., Elgharib, M., et al. 2021. High-Fidelity Neural Human Motion Transfer from Monocular Video. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Kappel_CVPR2021, TITLE = {High-Fidelity Neural Human Motion Transfer from Monocular Video}, AUTHOR = {Kappel, Moritz and Golyanik, Vladislav and Elgharib, Mohamed and Henningson, Jann-Ole and Seidel, Hans-Peter and Castillo, Susana and Theobalt, Christian and Magnor, Marcus A.}, LANGUAGE = {eng}, ISBN = {978-1-6654-4509-2}, DOI = {10.1109/CVPR46437.2021.00159}, PUBLISHER = {IEEE}, YEAR = {2021}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)}, PAGES = {1541--1550}, ADDRESS = {Virtual Conference}, }
Endnote
%0 Conference Proceedings %A Kappel, Moritz %A Golyanik, Vladislav %A Elgharib, Mohamed %A Henningson, Jann-Ole %A Seidel, Hans-Peter %A Castillo, Susana %A Theobalt, Christian %A Magnor, Marcus A. %+ External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations %T High-Fidelity Neural Human Motion Transfer from Monocular Video : %G eng %U http://hdl.handle.net/21.11116/0000-0008-8947-E %R 10.1109/CVPR46437.2021.00159 %D 2021 %B 34th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2021-06-19 - 2021-06-25 %C Virtual Conference %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 1541 - 1550 %I IEEE %@ 978-1-6654-4509-2 %U https://gvv.mpi-inf.mpg.de/projects/NHMT/
Jiang, C., Tang, C., Seidel, H.-P., Chen, R., and Wonka, P. 2021. Computational Design of Lightweight Trusses. Computer-Aided Design141.
Export
BibTeX
@article{Jiang2021, TITLE = {Computational Design of Lightweight Trusses}, AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Chen, Renjie and Wonka, Peter}, ISSN = {0010-4485}, DOI = {10.1016/j.cad.2021.103076}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2021}, JOURNAL = {Computer-Aided Design}, VOLUME = {141}, PAGES = {1--11}, EID = {103076}, }
Endnote
%0 Journal Article %A Jiang, Caigui %A Tang, Chengcheng %A Seidel, Hans-Peter %A Chen, Renjie %A Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Computational Design of Lightweight Trusses : %U http://hdl.handle.net/21.11116/0000-0009-70C2-D %R 10.1016/j.cad.2021.103076 %7 2021 %D 2021 %J Computer-Aided Design %V 141 %& 1 %P 1 - 11 %Z sequence number: 103076 %I Elsevier %C Amsterdam %@ false
Hladký, J., Seidel, H.-P., and Steinberger, M. 2021. SnakeBinning: Efficient Temporally Coherent Triangle Packing for Shading Streaming. Computer Graphics Forum (Proc. EUROGRAPHICS 2021)40, 2.
Export
BibTeX
@article{10.1111:cgf.142648, TITLE = {{SnakeBinning}: {E}fficient Temporally Coherent Triangle Packing for Shading Streaming}, AUTHOR = {Hladk{\'y}, Jozef and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.142648}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2021}, DATE = {2021}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {40}, NUMBER = {2}, PAGES = {475--488}, BOOKTITLE = {42nd Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2021)}, EDITOR = {Mitra, Niloy and Viola, Ivan}, }
Endnote
%0 Journal Article %A Hladk&#253;, Jozef %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T SnakeBinning: Efficient Temporally Coherent Triangle Packing for Shading Streaming : %G eng %U http://hdl.handle.net/21.11116/0000-0008-7AFD-3 %R 10.1111/cgf.142648 %7 2021 %D 2021 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 40 %N 2 %& 475 %P 475 - 488 %I Blackwell-Wiley %C Oxford %@ false %B 42nd Annual Conference of the European Association for Computer Graphics %O EUROGRAPHICS 2021 EG 2021
Habibie, I., Xu, W., Mehta, D., et al. 2021a. Learning Speech-driven 3D Conversational Gestures from Video. https://arxiv.org/abs/2102.06837.
(arXiv: 2102.06837)
Abstract
We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br>
Export
BibTeX
@online{Habibie_2102.06837, TITLE = {Learning Speech-driven {3D} Conversational Gestures from Video}, AUTHOR = {Habibie, Ikhsanul and Xu, Weipeng and Mehta, Dushyant and Liu, Lingjie and Seidel, Hans-Peter and Pons-Moll, Gerard and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2102.06837}, EPRINT = {2102.06837}, EPRINTTYPE = {arXiv}, YEAR = {2021}, ABSTRACT = {We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br>}, }
Endnote
%0 Report %A Habibie, Ikhsanul %A Xu, Weipeng %A Mehta, Dushyant %A Liu, Lingjie %A Seidel, Hans-Peter %A Pons-Moll, Gerard %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Learning Speech-driven 3D Conversational Gestures from Video : %G eng %U http://hdl.handle.net/21.11116/0000-0009-70C7-8 %U https://arxiv.org/abs/2102.06837 %D 2021 %X We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Habibie, I., Xu, W., Mehta, D., et al. 2021b. Learning Speech-driven 3D Conversational Gestures from Video. Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents (IVA 2021), ACM.
Export
BibTeX
@inproceedings{Habibie_IVA2021, TITLE = {Learning Speech-driven {3D} Conversational Gestures from Video}, AUTHOR = {Habibie, Ikhsanul and Xu, Weipeng and Mehta, Dushyant and Liu, Lingjie and Seidel, Hans-Peter and Pons-Moll, Gerard and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {9781450386197}, DOI = {10.1145/3472306.3478335}, PUBLISHER = {ACM}, YEAR = {2021}, BOOKTITLE = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents (IVA 2021)}, PAGES = {101--108}, ADDRESS = {Virtual Event, Japan}, }
Endnote
%0 Conference Proceedings %A Habibie, Ikhsanul %A Xu, Weipeng %A Mehta, Dushyant %A Liu, Lingjie %A Seidel, Hans-Peter %A Pons-Moll, Gerard %A Elgharib, Mohamed %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T Learning Speech-driven 3D Conversational Gestures from Video : %G eng %U http://hdl.handle.net/21.11116/0000-0009-4D19-6 %R 10.1145/3472306.3478335 %D 2021 %B 21st ACM International Conference on Intelligent Virtual Agents %Z date of event: 2021-09-14 - 2021-09-17 %C Virtual Event, Japan %B Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents %P 101 - 108 %I ACM %@ 9781450386197
Chu, M., Thuerey, N., Seidel, H.-P., Theobalt, C., and Zayer, R. 2021. Learning Meaningful Controls for Fluids. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{Chu2021, TITLE = {Learning Meaningful Controls for Fluids}, AUTHOR = {Chu, Mengyu and Thuerey, Nils and Seidel, Hans-Peter and Theobalt, Christian and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3450626.3459845}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2021}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {40}, NUMBER = {4}, PAGES = {1--13}, EID = {100}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021}, }
Endnote
%0 Journal Article %A Chu, Mengyu %A Thuerey, Nils %A Seidel, Hans-Peter %A Theobalt, Christian %A Zayer, Rhaleb %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Meaningful Controls for Fluids : %G eng %U http://hdl.handle.net/21.11116/0000-0009-4B91-F %R 10.1145/3450626.3459845 %7 2021 %D 2021 %J ACM Transactions on Graphics %V 40 %N 4 %& 1 %P 1 - 13 %Z sequence number: 100 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2021 %O ACM SIGGRAPH 2021
Chen, B., Wang, C., Piovarči, M., et al. 2021. The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories. The Visual Computer37.
Export
BibTeX
@article{Chen2021b, TITLE = {The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories}, AUTHOR = {Chen, Bin and Wang, Chao and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-021-02227-x}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2021}, JOURNAL = {The Visual Computer}, VOLUME = {37}, PAGES = {2975--2987}, }
Endnote
%0 Journal Article %A Chen, Bin %A Wang, Chao %A Piovar&#269;i, Michal %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories : %G eng %U http://hdl.handle.net/21.11116/0000-0008-F05C-2 %R 10.1007/s00371-021-02227-x %7 2021 %D 2021 %J The Visual Computer %V 37 %& 2975 %P 2975 - 2987 %I Springer %C Berlin %@ false
Ansari, N., Seidel, H.-P., and Babaei, V. 2021. Mixed Integer Neural Inverse Design. https://arxiv.org/abs/2109.12888.
(arXiv: 2109.12888)
Abstract
In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br>
Export
BibTeX
@online{Ansari_2109.12888, TITLE = {Mixed Integer Neural Inverse Design}, AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2109.12888}, EPRINT = {2109.12888}, EPRINTTYPE = {arXiv}, YEAR = {2021}, ABSTRACT = {In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br>}, }
Endnote
%0 Report %A Ansari, Navid %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed Integer Neural Inverse Design : %G eng %U http://hdl.handle.net/21.11116/0000-0009-7104-3 %U https://arxiv.org/abs/2109.12888 %D 2021 %X In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
2020
Zheng, Q., Babaei, V., Wetzstein, G., Seidel, H.-P., Zwicker, M., and Singh, G. 2020. Neural Light Field 3D Printing. ACM Transactions on Graphics (Proc. SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Zheng_TOG2020, TITLE = {Neural Light Field {3D} Printing}, AUTHOR = {Zheng, Quan and Babaei, Vahid and Wetzstein, Gordon and Seidel, Hans-Peter and Zwicker, Matthias and Singh, Gurprit}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {207}, BOOKTITLE = {Proceedings of the SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Zheng, Quan %A Babaei, Vahid %A Wetzstein, Gordon %A Seidel, Hans-Peter %A Zwicker, Matthias %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Light Field 3D Printing : %U http://hdl.handle.net/21.11116/0000-0007-9AA8-E %R 10.1145/3414685.3417879 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 207 %I ACM %C New York, NY %@ false %B Proceedings of the SIGGRAPH Asia 2020 %O SIGGRAPH Asia 2020 SA'20 SA 2020
Yu, Y., Meka, A., Elgharib, M., Seidel, H.-P., Theobalt, C., and Smith, W.A.P. 2020. Self-supervised Outdoor Scene Relighting. Computer Vision -- ECCV 2020, Springer.
Export
BibTeX
@inproceedings{yu_ECCV20, TITLE = {Self-supervised Outdoor Scene Relighting}, AUTHOR = {Yu, Ye and Meka, Abhimitra and Elgharib, Mohamed and Seidel, Hans-Peter and Theobalt, Christian and Smith, William A. P.}, LANGUAGE = {eng}, ISBN = {978-3-030-58541-9}, DOI = {10.1007/978-3-030-58542-6_6}, PUBLISHER = {Springer}, YEAR = {2020}, DATE = {2020}, BOOKTITLE = {Computer Vision -- ECCV 2020}, EDITOR = {Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael}, PAGES = {84--101}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {12367}, ADDRESS = {Glasgow, UK}, }
Endnote
%0 Conference Proceedings %A Yu, Ye %A Meka, Abhimitra %A Elgharib, Mohamed %A Seidel, Hans-Peter %A Theobalt, Christian %A Smith, William A. P. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Self-supervised Outdoor Scene Relighting : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0F6-C %R 10.1007/978-3-030-58542-6_6 %D 2020 %B 16th European Conference on Computer Vision %Z date of event: 2020-08-23 - 2020-08-28 %C Glasgow, UK %B Computer Vision -- ECCV 2020 %E Vedaldi, Andrea; Bischof, Horst; Brox, Thomas; Frahm, Jan-Michael %P 84 - 101 %I Springer %@ 978-3-030-58541-9 %B Lecture Notes in Computer Science %N 12367
Yenamandra, T., Tewari, A., Bernard, F., et al. 2020. i3DMM: Deep Implicit 3D Morphable Model of Human Heads. https://arxiv.org/abs/2011.14143.
(arXiv: 2011.14143)
Abstract
We present the first deep implicit 3D morphable model (i3DMM) of full heads.<br>Unlike earlier morphable face models it not only captures identity-specific<br>geometry, texture, and expressions of the frontal face, but also models the<br>entire head, including hair. We collect a new dataset consisting of 64 people<br>with different expressions and hairstyles to train i3DMM. Our approach has the<br>following favorable properties: (i) It is the first full head morphable model<br>that includes hair. (ii) In contrast to mesh-based models it can be trained on<br>merely rigidly aligned scans, without requiring difficult non-rigid<br>registration. (iii) We design a novel architecture to decouple the shape model<br>into an implicit reference shape and a deformation of this reference shape.<br>With that, dense correspondences between shapes can be learned implicitly. (iv)<br>This architecture allows us to semantically disentangle the geometry and color<br>components, as color is learned in the reference space. Geometry is further<br>disentangled as identity, expressions, and hairstyle, while color is<br>disentangled as identity and hairstyle components. We show the merits of i3DMM<br>using ablation studies, comparisons to state-of-the-art models, and<br>applications such as semantic head editing and texture transfer. We will make<br>our model publicly available.<br>
Export
BibTeX
@online{Yenamandra_arXiv2011.14143, TITLE = {i{3D}MM: Deep Implicit {3D} Morphable Model of Human Heads}, AUTHOR = {Yenamandra, Tarun and Tewari, Ayush and Bernard, Florian and Seidel, Hans-Peter and Elgharib, Mohamed and Cremers, Daniel and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2011.14143}, EPRINT = {2011.14143}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {We present the first deep implicit 3D morphable model (i3DMM) of full heads.<br>Unlike earlier morphable face models it not only captures identity-specific<br>geometry, texture, and expressions of the frontal face, but also models the<br>entire head, including hair. We collect a new dataset consisting of 64 people<br>with different expressions and hairstyles to train i3DMM. Our approach has the<br>following favorable properties: (i) It is the first full head morphable model<br>that includes hair. (ii) In contrast to mesh-based models it can be trained on<br>merely rigidly aligned scans, without requiring difficult non-rigid<br>registration. (iii) We design a novel architecture to decouple the shape model<br>into an implicit reference shape and a deformation of this reference shape.<br>With that, dense correspondences between shapes can be learned implicitly. (iv)<br>This architecture allows us to semantically disentangle the geometry and color<br>components, as color is learned in the reference space. Geometry is further<br>disentangled as identity, expressions, and hairstyle, while color is<br>disentangled as identity and hairstyle components. We show the merits of i3DMM<br>using ablation studies, comparisons to state-of-the-art models, and<br>applications such as semantic head editing and texture transfer. We will make<br>our model publicly available.<br>}, }
Endnote
%0 Report %A Yenamandra, Tarun %A Tewari, Ayush %A Bernard, Florian %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Cremers, Daniel %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T i3DMM: Deep Implicit 3D Morphable Model of Human Heads : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B702-8 %U https://arxiv.org/abs/2011.14143 %D 2020 %X We present the first deep implicit 3D morphable model (i3DMM) of full heads.<br>Unlike earlier morphable face models it not only captures identity-specific<br>geometry, texture, and expressions of the frontal face, but also models the<br>entire head, including hair. We collect a new dataset consisting of 64 people<br>with different expressions and hairstyles to train i3DMM. Our approach has the<br>following favorable properties: (i) It is the first full head morphable model<br>that includes hair. (ii) In contrast to mesh-based models it can be trained on<br>merely rigidly aligned scans, without requiring difficult non-rigid<br>registration. (iii) We design a novel architecture to decouple the shape model<br>into an implicit reference shape and a deformation of this reference shape.<br>With that, dense correspondences between shapes can be learned implicitly. (iv)<br>This architecture allows us to semantically disentangle the geometry and color<br>components, as color is learned in the reference space. Geometry is further<br>disentangled as identity, expressions, and hairstyle, while color is<br>disentangled as identity and hairstyle components. We show the merits of i3DMM<br>using ablation studies, comparisons to state-of-the-art models, and<br>applications such as semantic head editing and texture transfer. We will make<br>our model publicly available.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Tewari, A., Elgharib, M., Bharaj, G., et al. 2020a. StyleRig: Rigging StyleGAN for 3D Control Over Portrait Images. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020), IEEE.
Export
BibTeX
@inproceedings{Tewari_CVPR2020, TITLE = {{StyleRig}: {R}igging {StyleGAN} for {3D} Control Over Portrait Images}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Bharaj, Gaurav and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-7281-7168-5}, DOI = {10.1109/CVPR42600.2020.00618}, PUBLISHER = {IEEE}, YEAR = {2020}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020)}, PAGES = {6141--6150}, ADDRESS = {Seattle, WA, USA (Virtual)}, }
Endnote
%0 Conference Proceedings %A Tewari, Ayush %A Elgharib, Mohamed %A Bharaj, Gaurav %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T StyleRig: Rigging StyleGAN for 3D Control Over Portrait Images : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0E7-D %R 10.1109/CVPR42600.2020.00618 %D 2020 %B 33rd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2020-06-14 - 2020-06-19 %C Seattle, WA, USA (Virtual) %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 6141 - 6150 %I IEEE %@ 978-1-7281-7168-5
Tewari, A., Elgharib, M., Bharaj, G., et al. 2020b. StyleRig: Rigging StyleGAN for 3D Control over Portrait Images. https://arxiv.org/abs/2004.00121.
(arXiv: 2004.00121)
Abstract
StyleGAN generates photorealistic portrait images of faces with eyes, teeth,<br>hair and context (neck, shoulders, background), but lacks a rig-like control<br>over semantic face parameters that are interpretable in 3D, such as face pose,<br>expressions, and scene illumination. Three-dimensional morphable face models<br>(3DMMs) on the other hand offer control over the semantic parameters, but lack<br>photorealism when rendered and only model the face interior, not other parts of<br>a portrait image (hair, mouth interior, background). We present the first<br>method to provide a face rig-like control over a pretrained and fixed StyleGAN<br>via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's<br>semantic parameters and StyleGAN's input. The network is trained in a<br>self-supervised manner, without the need for manual annotations. At test time,<br>our method generates portrait images with the photorealism of StyleGAN and<br>provides explicit control over the 3D semantic parameters of the face.<br>
Export
BibTeX
@online{Tewari_2004.00121, TITLE = {{StyleRig}: Rigging {StyleGAN} for {3D} Control over Portrait Images}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Bharaj, Gaurav and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2004.00121}, EPRINT = {2004.00121}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {StyleGAN generates photorealistic portrait images of faces with eyes, teeth,<br>hair and context (neck, shoulders, background), but lacks a rig-like control<br>over semantic face parameters that are interpretable in 3D, such as face pose,<br>expressions, and scene illumination. Three-dimensional morphable face models<br>(3DMMs) on the other hand offer control over the semantic parameters, but lack<br>photorealism when rendered and only model the face interior, not other parts of<br>a portrait image (hair, mouth interior, background). We present the first<br>method to provide a face rig-like control over a pretrained and fixed StyleGAN<br>via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's<br>semantic parameters and StyleGAN's input. The network is trained in a<br>self-supervised manner, without the need for manual annotations. At test time,<br>our method generates portrait images with the photorealism of StyleGAN and<br>provides explicit control over the 3D semantic parameters of the face.<br>}, }
Endnote
%0 Report %A Tewari, Ayush %A Elgharib, Mohamed %A Bharaj, Gaurav %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T StyleRig: Rigging StyleGAN for 3D Control over Portrait Images : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0FC-6 %U https://arxiv.org/abs/2004.00121 %D 2020 %X StyleGAN generates photorealistic portrait images of faces with eyes, teeth,<br>hair and context (neck, shoulders, background), but lacks a rig-like control<br>over semantic face parameters that are interpretable in 3D, such as face pose,<br>expressions, and scene illumination. Three-dimensional morphable face models<br>(3DMMs) on the other hand offer control over the semantic parameters, but lack<br>photorealism when rendered and only model the face interior, not other parts of<br>a portrait image (hair, mouth interior, background). We present the first<br>method to provide a face rig-like control over a pretrained and fixed StyleGAN<br>via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's<br>semantic parameters and StyleGAN's input. The network is trained in a<br>self-supervised manner, without the need for manual annotations. At test time,<br>our method generates portrait images with the photorealism of StyleGAN and<br>provides explicit control over the 3D semantic parameters of the face.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Tewari, A., Elgharib, M., Mallikarjun B R, et al. 2020c. PIE: Portrait Image Embedding for Semantic Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Tewari_ToG2020, TITLE = {{PIE}: {P}ortrait Image Embedding for Semantic Control}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Mallikarjun B R and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417803}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {223}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Tewari, Ayush %A Elgharib, Mohamed %A Mallikarjun B R, %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T PIE: Portrait Image Embedding for Semantic Control : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B0C-E %R 10.1145/3414685.3417803 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 223 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Tewari, A., Elgharib, M., Mallikarjun B R, et al. 2020d. PIE: Portrait Image Embedding for Semantic Control. https://arxiv.org/abs/2009.09485.
(arXiv: 2009.09485)
Abstract
Editing of portrait images is a very popular and important research topic<br>with a large variety of applications. For ease of use, control should be<br>provided via a semantically meaningful parameterization that is akin to<br>computer animation controls. The vast majority of existing techniques do not<br>provide such intuitive and fine-grained control, or only enable coarse editing<br>of a single isolated control parameter. Very recently, high-quality<br>semantically controlled editing has been demonstrated, however only on<br>synthetically created StyleGAN images. We present the first approach for<br>embedding real portrait images in the latent space of StyleGAN, which allows<br>for intuitive editing of the head pose, facial expression, and scene<br>illumination in the image. Semantic editing in parameter space is achieved<br>based on StyleRig, a pretrained neural network that maps the control space of a<br>3D morphable face model to the latent space of the GAN. We design a novel<br>hierarchical non-linear optimization problem to obtain the embedding. An<br>identity preservation energy term allows spatially coherent edits while<br>maintaining facial integrity. Our approach runs at interactive frame rates and<br>thus allows the user to explore the space of possible edits. We evaluate our<br>approach on a wide set of portrait photos, compare it to the current state of<br>the art, and validate the effectiveness of its components in an ablation study.<br>
Export
BibTeX
@online{Tewari_2009.09485, TITLE = {{PIE}: {P}ortrait Image Embedding for Semantic Control}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Mallikarjun B R and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2009.09485}, EPRINT = {2009.09485}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {Editing of portrait images is a very popular and important research topic<br>with a large variety of applications. For ease of use, control should be<br>provided via a semantically meaningful parameterization that is akin to<br>computer animation controls. The vast majority of existing techniques do not<br>provide such intuitive and fine-grained control, or only enable coarse editing<br>of a single isolated control parameter. Very recently, high-quality<br>semantically controlled editing has been demonstrated, however only on<br>synthetically created StyleGAN images. We present the first approach for<br>embedding real portrait images in the latent space of StyleGAN, which allows<br>for intuitive editing of the head pose, facial expression, and scene<br>illumination in the image. Semantic editing in parameter space is achieved<br>based on StyleRig, a pretrained neural network that maps the control space of a<br>3D morphable face model to the latent space of the GAN. We design a novel<br>hierarchical non-linear optimization problem to obtain the embedding. An<br>identity preservation energy term allows spatially coherent edits while<br>maintaining facial integrity. Our approach runs at interactive frame rates and<br>thus allows the user to explore the space of possible edits. We evaluate our<br>approach on a wide set of portrait photos, compare it to the current state of<br>the art, and validate the effectiveness of its components in an ablation study.<br>}, }
Endnote
%0 Report %A Tewari, Ayush %A Elgharib, Mohamed %A Mallikarjun B R, %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T PIE: Portrait Image Embedding for Semantic Control : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B117-7 %U https://arxiv.org/abs/2009.09485 %D 2020 %X Editing of portrait images is a very popular and important research topic<br>with a large variety of applications. For ease of use, control should be<br>provided via a semantically meaningful parameterization that is akin to<br>computer animation controls. The vast majority of existing techniques do not<br>provide such intuitive and fine-grained control, or only enable coarse editing<br>of a single isolated control parameter. Very recently, high-quality<br>semantically controlled editing has been demonstrated, however only on<br>synthetically created StyleGAN images. We present the first approach for<br>embedding real portrait images in the latent space of StyleGAN, which allows<br>for intuitive editing of the head pose, facial expression, and scene<br>illumination in the image. Semantic editing in parameter space is achieved<br>based on StyleRig, a pretrained neural network that maps the control space of a<br>3D morphable face model to the latent space of the GAN. We design a novel<br>hierarchical non-linear optimization problem to obtain the embedding. An<br>identity preservation energy term allows spatially coherent edits while<br>maintaining facial integrity. Our approach runs at interactive frame rates and<br>thus allows the user to explore the space of possible edits. We evaluate our<br>approach on a wide set of portrait photos, compare it to the current state of<br>the art, and validate the effectiveness of its components in an ablation study.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Stadlbauer, P., Mlakar, D., Seidel, H.-P., Steinberger, M., and Zayer, R. 2020. Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing. Computer Graphics Forum (Proc. EUROGRAPHICS 2020)39, 2.
Export
BibTeX
@article{Stadlbauer_EG2020, TITLE = {Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing}, AUTHOR = {Stadlbauer, Pascal and Mlakar, Daniel and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13929}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2020}, DATE = {2020}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {39}, NUMBER = {2}, PAGES = {277--289}, BOOKTITLE = {The European Association for Computer Graphics 41st Annual Conference (EUROGRAPHICS 2020)}, EDITOR = {Panozzo, Daniele and Assarsson, Ulf}, }
Endnote
%0 Journal Article %A Stadlbauer, Pascal %A Mlakar, Daniel %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing : %G eng %U http://hdl.handle.net/21.11116/0000-0006-DB8A-8 %R 10.1111/cgf.13929 %7 2020 %D 2020 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 39 %N 2 %& 277 %P 277 - 289 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 41st Annual Conference %O EUROGRAPHICS 2020 EG 2020 The European Association for Computer Graphics 41st Annual Conference ; Norrk&#246;ping, Sweden, May 25 &#8211; 29, 2020
Shahmirzadi, A.A., Babaei, V., and Seidel, H.-P. 2020. A Multispectral Dataset of Oil and Watercolor Paints. Electronic Imaging32.
Export
BibTeX
@article{shahmirzadi2020multispectral, TITLE = {A Multispectral Dataset of Oil and Watercolor Paints}, AUTHOR = {Shahmirzadi, Azadeh Asadi and Babaei, Vahid and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2352/ISSN.2470-1173.2020.5.MAAP-107}, PUBLISHER = {IS\&T}, PUBLISHER = {Society for Imaging Science and Technology}, ADDRESS = {Springfield, VA}, ADDRESS = {Springfield}, YEAR = {2020}, JOURNAL = {Electronic Imaging}, VOLUME = {32}, PAGES = {1--4}, EID = {107}, BOOKTITLE = {Proceedings of the Material Appearance 2020}, EDITOR = {H{\'e}bert, Mathieu and Simonot, Lionel and Tastl, Ingeborg}, }
Endnote
%0 Journal Article %A Shahmirzadi, Azadeh Asadi %A Babaei, Vahid %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Multispectral Dataset of Oil and Watercolor Paints : %G eng %U http://hdl.handle.net/21.11116/0000-0007-F064-9 %R 10.2352/ISSN.2470-1173.2020.5.MAAP-107 %7 2020 %D 2020 %J Electronic Imaging %V 32 %& 1 %P 1 - 4 %Z sequence number: 107 %I IS&T %C Springfield, VA %B Proceedings of the Material Appearance 2020 %O Burlingame, CA, USA, January 26-30, 2020 %I Society for Imaging Science and Technology %C Springfield
Saberpour, A., Hersch, R.D., Fang, J., Zayer, R., Seidel, H.-P., and Babaei, V. 2020. Fabrication of Moiré on Curved Surfaces. Optics Express28, 13.
Export
BibTeX
@article{Saberpour2020, TITLE = {Fabrication of Moir{\'e} on Curved Surfaces}, AUTHOR = {Saberpour, Artin and Hersch, Roger D. and Fang, Jiajing and Zayer, Rhaleb and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {1094-4087}, DOI = {10.1364/OE.393843}, PUBLISHER = {Optical Society of America}, ADDRESS = {Washington, DC}, YEAR = {2020}, DATE = {2020}, JOURNAL = {Optics Express}, VOLUME = {28}, NUMBER = {13}, PAGES = {19413--19427}, }
Endnote
%0 Journal Article %A Saberpour, Artin %A Hersch, Roger D. %A Fang, Jiajing %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fabrication of Moir&#233; on Curved Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0006-D39D-B %R 10.1364/OE.393843 %7 2020 %D 2020 %J Optics Express %O Opt. Express %V 28 %N 13 %& 19413 %P 19413 - 19427 %I Optical Society of America %C Washington, DC %@ false
Mlakar, D., Winter, M., Stadlbauer, P., Seidel, H.-P., Steinberger, M., and Zayer, R. 2020. Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the GPU. Computer Graphics Forum (Proc. EUROGRAPHICS 2020)39, 2.
Export
BibTeX
@article{Mlakar_EG2020, TITLE = {Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the {GPU}}, AUTHOR = {Mlakar, Daniel and Winter, M. and Stadlbauer, Pascal and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13934}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2020}, DATE = {2020}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {39}, NUMBER = {2}, PAGES = {335--349}, BOOKTITLE = {The European Association for Computer Graphics 41st Annual Conference (EUROGRAPHICS 2020)}, EDITOR = {Panozzo, Daniele and Assarsson, Ulf}, }
Endnote
%0 Journal Article %A Mlakar, Daniel %A Winter, M. %A Stadlbauer, Pascal %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0006-DB80-2 %R 10.1111/cgf.13934 %7 2020 %D 2020 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 39 %N 2 %& 335 %P 335 - 349 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 41st Annual Conference %O EUROGRAPHICS 2020 EG 2020 The European Association for Computer Graphics 41st Annual Conference ; Norrk&#246;ping, Sweden, May 25 &#8211; 29, 2020
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2020a. XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2020)39, 4.
Export
BibTeX
@article{Mehta_TOG2020, TITLE = {{XNect}: {R}eal-time Multi-person {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392410}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {39}, NUMBER = {4}, EID = {82}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2020}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Elgharib, Mohamed %A Fua, Pascal %A Seidel, Hans-Peter %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0007-832D-3 %R 10.1145/3386569.3392410 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %Z sequence number: 82 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2020 %O ACM SIGGRAPH 2020 Virtual Conference ; 2020, 17-28 August
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2020b. XNect: Real-time Multi-person 3D Motion Capture with a Single RGB Camera. ACM Transactions on Graphics39, 4.
Export
BibTeX
@article{DBLP:journals/tog/MehtaS0XEFSRPT20, TITLE = {{XNect}: Real-time Multi-person {3D} Motion Capture with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392410}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2020}, DATE = {2020}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {39}, NUMBER = {4}, PAGES = {1--17}, EID = {82}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Elgharib, Mohamed %A Fua, Pascal %A Seidel, Hans-Peter %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect: Real-time Multi-person 3D Motion Capture with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-000F-796D-0 %R 10.1145/3386569.3392410 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %& 1 %P 1 - 17 %Z sequence number: 82 %I Association for Computing Machinery %C New York, NY %@ false
Mallikarjun B R, Tewari, A., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2020a. Learning Complete 3D Morphable Face Models from Images and Videos. https://arxiv.org/abs/2010.01679.
(arXiv: 2010.01679)
Abstract
Most 3D face reconstruction methods rely on 3D morphable models, which<br>disentangle the space of facial deformations into identity geometry,<br>expressions and skin reflectance. These models are typically learned from a<br>limited number of 3D scans and thus do not generalize well across different<br>identities and expressions. We present the first approach to learn complete 3D<br>models of face identity geometry, albedo and expression just from images and<br>videos. The virtually endless collection of such data, in combination with our<br>self-supervised learning-based approach allows for learning face models that<br>generalize beyond the span of existing approaches. Our network design and loss<br>functions ensure a disentangled parameterization of not only identity and<br>albedo, but also, for the first time, an expression basis. Our method also<br>allows for in-the-wild monocular reconstruction at test time. We show that our<br>learned models better generalize and lead to higher quality image-based<br>reconstructions than existing approaches.<br>
Export
BibTeX
@online{Mallikarjun_arXiv2010.01679, TITLE = {Learning Complete {3D} Morphable Face Models from Images and Videos}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2010.01679}, EPRINT = {2010.01679}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {Most 3D face reconstruction methods rely on 3D morphable models, which<br>disentangle the space of facial deformations into identity geometry,<br>expressions and skin reflectance. These models are typically learned from a<br>limited number of 3D scans and thus do not generalize well across different<br>identities and expressions. We present the first approach to learn complete 3D<br>models of face identity geometry, albedo and expression just from images and<br>videos. The virtually endless collection of such data, in combination with our<br>self-supervised learning-based approach allows for learning face models that<br>generalize beyond the span of existing approaches. Our network design and loss<br>functions ensure a disentangled parameterization of not only identity and<br>albedo, but also, for the first time, an expression basis. Our method also<br>allows for in-the-wild monocular reconstruction at test time. We show that our<br>learned models better generalize and lead to higher quality image-based<br>reconstructions than existing approaches.<br>}, }
Endnote
%0 Report %A Mallikarjun B R, %A Tewari, Ayush %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Complete 3D Morphable Face Models from Images and Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B6FB-1 %U https://arxiv.org/abs/2010.01679 %D 2020 %X Most 3D face reconstruction methods rely on 3D morphable models, which<br>disentangle the space of facial deformations into identity geometry,<br>expressions and skin reflectance. These models are typically learned from a<br>limited number of 3D scans and thus do not generalize well across different<br>identities and expressions. We present the first approach to learn complete 3D<br>models of face identity geometry, albedo and expression just from images and<br>videos. The virtually endless collection of such data, in combination with our<br>self-supervised learning-based approach allows for learning face models that<br>generalize beyond the span of existing approaches. Our network design and loss<br>functions ensure a disentangled parameterization of not only identity and<br>albedo, but also, for the first time, an expression basis. Our method also<br>allows for in-the-wild monocular reconstruction at test time. We show that our<br>learned models better generalize and lead to higher quality image-based<br>reconstructions than existing approaches.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG,Computer Science, Multimedia, cs.MM
Mallikarjun B R, Tewari, A., Oh, T.-H., et al. 2020b. Monocular Reconstruction of Neural Face Reflectance Fields. https://arxiv.org/abs/2008.10247.
(arXiv: 2008.10247)
Abstract
The reflectance field of a face describes the reflectance properties<br>responsible for complex lighting effects including diffuse, specular,<br>inter-reflection and self shadowing. Most existing methods for estimating the<br>face reflectance from a monocular image assume faces to be diffuse with very<br>few approaches adding a specular component. This still leaves out important<br>perceptual aspects of reflectance as higher-order global illumination effects<br>and self-shadowing are not modeled. We present a new neural representation for<br>face reflectance where we can estimate all components of the reflectance<br>responsible for the final appearance from a single monocular image. Instead of<br>modeling each component of the reflectance separately using parametric models,<br>our neural representation allows us to generate a basis set of faces in a<br>geometric deformation-invariant space, parameterized by the input light<br>direction, viewpoint and face geometry. We learn to reconstruct this<br>reflectance field of a face just from a monocular image, which can be used to<br>render the face from any viewpoint in any light condition. Our method is<br>trained on a light-stage training dataset, which captures 300 people<br>illuminated with 150 light conditions from 8 viewpoints. We show that our<br>method outperforms existing monocular reflectance reconstruction methods, in<br>terms of photorealism due to better capturing of physical premitives, such as<br>sub-surface scattering, specularities, self-shadows and other higher-order<br>effects.<br>
Export
BibTeX
@online{Mallikarjun_2008.10247, TITLE = {Monocular Reconstruction of Neural Face Reflectance Fields}, AUTHOR = {Mallikarjun B R and Tewari, Ayush and Oh, Tae-Hyun and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2008.10247}, EPRINT = {2008.10247}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {The reflectance field of a face describes the reflectance properties<br>responsible for complex lighting effects including diffuse, specular,<br>inter-reflection and self shadowing. Most existing methods for estimating the<br>face reflectance from a monocular image assume faces to be diffuse with very<br>few approaches adding a specular component. This still leaves out important<br>perceptual aspects of reflectance as higher-order global illumination effects<br>and self-shadowing are not modeled. We present a new neural representation for<br>face reflectance where we can estimate all components of the reflectance<br>responsible for the final appearance from a single monocular image. Instead of<br>modeling each component of the reflectance separately using parametric models,<br>our neural representation allows us to generate a basis set of faces in a<br>geometric deformation-invariant space, parameterized by the input light<br>direction, viewpoint and face geometry. We learn to reconstruct this<br>reflectance field of a face just from a monocular image, which can be used to<br>render the face from any viewpoint in any light condition. Our method is<br>trained on a light-stage training dataset, which captures 300 people<br>illuminated with 150 light conditions from 8 viewpoints. We show that our<br>method outperforms existing monocular reflectance reconstruction methods, in<br>terms of photorealism due to better capturing of physical premitives, such as<br>sub-surface scattering, specularities, self-shadows and other higher-order<br>effects.<br>}, }
Endnote
%0 Report %A Mallikarjun B R, %A Tewari, Ayush %A Oh, Tae-Hyun %A Weyrich, Tim %A Bickel, Bernd %A Seidel, Hans-Peter %A Pfister, Hanspeter %A Matusik, Wojciech %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Monocular Reconstruction of Neural Face Reflectance Fields : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B110-E %U https://arxiv.org/abs/2008.10247 %D 2020 %X The reflectance field of a face describes the reflectance properties<br>responsible for complex lighting effects including diffuse, specular,<br>inter-reflection and self shadowing. Most existing methods for estimating the<br>face reflectance from a monocular image assume faces to be diffuse with very<br>few approaches adding a specular component. This still leaves out important<br>perceptual aspects of reflectance as higher-order global illumination effects<br>and self-shadowing are not modeled. We present a new neural representation for<br>face reflectance where we can estimate all components of the reflectance<br>responsible for the final appearance from a single monocular image. Instead of<br>modeling each component of the reflectance separately using parametric models,<br>our neural representation allows us to generate a basis set of faces in a<br>geometric deformation-invariant space, parameterized by the input light<br>direction, viewpoint and face geometry. We learn to reconstruct this<br>reflectance field of a face just from a monocular image, which can be used to<br>render the face from any viewpoint in any light condition. Our method is<br>trained on a light-stage training dataset, which captures 300 people<br>illuminated with 150 light conditions from 8 viewpoints. We show that our<br>method outperforms existing monocular reflectance reconstruction methods, in<br>terms of photorealism due to better capturing of physical premitives, such as<br>sub-surface scattering, specularities, self-shadows and other higher-order<br>effects.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Kappel, M., Golyanik, V., Elgharib, M., et al. 2020. High-Fidelity Neural Human Motion Transfer from Monocular Video. https://arxiv.org/abs/2012.10974.
(arXiv: 2012.10974)
Abstract
Video-based human motion transfer creates video animations of humans<br>following a source motion. Current methods show remarkable results for<br>tightly-clad subjects. However, the lack of temporally consistent handling of<br>plausible clothing dynamics, including fine and high-frequency details,<br>significantly limits the attainable visual quality. We address these<br>limitations for the first time in the literature and present a new framework<br>which performs high-fidelity and temporally-consistent human motion transfer<br>with natural pose-dependent non-rigid deformations, for several types of loose<br>garments. In contrast to the previous techniques, we perform image generation<br>in three subsequent stages, synthesizing human shape, structure, and<br>appearance. Given a monocular RGB video of an actor, we train a stack of<br>recurrent deep neural networks that generate these intermediate representations<br>from 2D poses and their temporal derivatives. Splitting the difficult motion<br>transfer problem into subtasks that are aware of the temporal motion context<br>helps us to synthesize results with plausible dynamics and pose-dependent<br>detail. It also allows artistic control of results by manipulation of<br>individual framework stages. In the experimental results, we significantly<br>outperform the state-of-the-art in terms of video realism. Our code and data<br>will be made publicly available.<br>
Export
BibTeX
@online{Kappel_arXiv2012.10974, TITLE = {High-Fidelity Neural Human Motion Transfer from Monocular Video}, AUTHOR = {Kappel, Moritz and Golyanik, Vladislav and Elgharib, Mohamed and Henningson, Jann-Ole and Seidel, Hans-Peter and Castillo, Susana and Theobalt, Christian and Magnor, Marcus A.}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.10974}, EPRINT = {2012.10974}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {Video-based human motion transfer creates video animations of humans<br>following a source motion. Current methods show remarkable results for<br>tightly-clad subjects. However, the lack of temporally consistent handling of<br>plausible clothing dynamics, including fine and high-frequency details,<br>significantly limits the attainable visual quality. We address these<br>limitations for the first time in the literature and present a new framework<br>which performs high-fidelity and temporally-consistent human motion transfer<br>with natural pose-dependent non-rigid deformations, for several types of loose<br>garments. In contrast to the previous techniques, we perform image generation<br>in three subsequent stages, synthesizing human shape, structure, and<br>appearance. Given a monocular RGB video of an actor, we train a stack of<br>recurrent deep neural networks that generate these intermediate representations<br>from 2D poses and their temporal derivatives. Splitting the difficult motion<br>transfer problem into subtasks that are aware of the temporal motion context<br>helps us to synthesize results with plausible dynamics and pose-dependent<br>detail. It also allows artistic control of results by manipulation of<br>individual framework stages. In the experimental results, we significantly<br>outperform the state-of-the-art in terms of video realism. Our code and data<br>will be made publicly available.<br>}, }
Endnote
%0 Report %A Kappel, Moritz %A Golyanik, Vladislav %A Elgharib, Mohamed %A Henningson, Jann-Ole %A Seidel, Hans-Peter %A Castillo, Susana %A Theobalt, Christian %A Magnor, Marcus A. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T High-Fidelity Neural Human Motion Transfer from Monocular Video : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B715-3 %U https://arxiv.org/abs/2012.10974 %D 2020 %X Video-based human motion transfer creates video animations of humans<br>following a source motion. Current methods show remarkable results for<br>tightly-clad subjects. However, the lack of temporally consistent handling of<br>plausible clothing dynamics, including fine and high-frequency details,<br>significantly limits the attainable visual quality. We address these<br>limitations for the first time in the literature and present a new framework<br>which performs high-fidelity and temporally-consistent human motion transfer<br>with natural pose-dependent non-rigid deformations, for several types of loose<br>garments. In contrast to the previous techniques, we perform image generation<br>in three subsequent stages, synthesizing human shape, structure, and<br>appearance. Given a monocular RGB video of an actor, we train a stack of<br>recurrent deep neural networks that generate these intermediate representations<br>from 2D poses and their temporal derivatives. Splitting the difficult motion<br>transfer problem into subtasks that are aware of the temporal motion context<br>helps us to synthesize results with plausible dynamics and pose-dependent<br>detail. It also allows artistic control of results by manipulation of<br>individual framework stages. In the experimental results, we significantly<br>outperform the state-of-the-art in terms of video realism. Our code and data<br>will be made publicly available.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Fox, G., Liu, W., Kim, H., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2020. VideoForensicsHQ: Detecting High-quality Manipulated Face Videos. https://arxiv.org/abs/2005.10360.
(arXiv: 2005.10360)
Abstract
New approaches to synthesize and manipulate face videos at very high quality<br>have paved the way for new applications in computer animation, virtual and<br>augmented reality, or face video analysis. However, there are concerns that<br>they may be used in a malicious way, e.g. to manipulate videos of public<br>figures, politicians or reporters, to spread false information. The research<br>community therefore developed techniques for automated detection of modified<br>imagery, and assembled benchmark datasets showing manipulatons by<br>state-of-the-art techniques. In this paper, we contribute to this initiative in<br>two ways: First, we present a new audio-visual benchmark dataset. It shows some<br>of the highest quality visual manipulations available today. Human observers<br>find them significantly harder to identify as forged than videos from other<br>benchmarks. Furthermore we propose new family of deep-learning-based fake<br>detectors, demonstrating that existing detectors are not well-suited for<br>detecting fakes of a quality as high as presented in our dataset. Our detectors<br>examine spatial and temporal features. This allows them to outperform existing<br>approaches both in terms of high detection accuracy and generalization to<br>unseen fake generation methods and unseen identities.<br>
Export
BibTeX
@online{Fox_2005.10360, TITLE = {{Video\-Foren\-sics\-HQ}: {D}etecting High-quality Manipulated Face Videos}, AUTHOR = {Fox, Gereon and Liu, Wentao and Kim, Hyeongwoo and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2005.10360}, EPRINT = {2005.10360}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {New approaches to synthesize and manipulate face videos at very high quality<br>have paved the way for new applications in computer animation, virtual and<br>augmented reality, or face video analysis. However, there are concerns that<br>they may be used in a malicious way, e.g. to manipulate videos of public<br>figures, politicians or reporters, to spread false information. The research<br>community therefore developed techniques for automated detection of modified<br>imagery, and assembled benchmark datasets showing manipulatons by<br>state-of-the-art techniques. In this paper, we contribute to this initiative in<br>two ways: First, we present a new audio-visual benchmark dataset. It shows some<br>of the highest quality visual manipulations available today. Human observers<br>find them significantly harder to identify as forged than videos from other<br>benchmarks. Furthermore we propose new family of deep-learning-based fake<br>detectors, demonstrating that existing detectors are not well-suited for<br>detecting fakes of a quality as high as presented in our dataset. Our detectors<br>examine spatial and temporal features. This allows them to outperform existing<br>approaches both in terms of high detection accuracy and generalization to<br>unseen fake generation methods and unseen identities.<br>}, }
Endnote
%0 Report %A Fox, Gereon %A Liu, Wentao %A Kim, Hyeongwoo %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T VideoForensicsHQ: Detecting High-quality Manipulated Face Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B109-7 %U https://arxiv.org/abs/2005.10360 %D 2020 %X New approaches to synthesize and manipulate face videos at very high quality<br>have paved the way for new applications in computer animation, virtual and<br>augmented reality, or face video analysis. However, there are concerns that<br>they may be used in a malicious way, e.g. to manipulate videos of public<br>figures, politicians or reporters, to spread false information. The research<br>community therefore developed techniques for automated detection of modified<br>imagery, and assembled benchmark datasets showing manipulatons by<br>state-of-the-art techniques. In this paper, we contribute to this initiative in<br>two ways: First, we present a new audio-visual benchmark dataset. It shows some<br>of the highest quality visual manipulations available today. Human observers<br>find them significantly harder to identify as forged than videos from other<br>benchmarks. Furthermore we propose new family of deep-learning-based fake<br>detectors, demonstrating that existing detectors are not well-suited for<br>detecting fakes of a quality as high as presented in our dataset. Our detectors<br>examine spatial and temporal features. This allows them to outperform existing<br>approaches both in terms of high detection accuracy and generalization to<br>unseen fake generation methods and unseen identities.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Elgharib, M., Mendiratta, M., Thies, J., et al. 2020. Egocentric Videoconferencing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Elgharib_ToG2020, TITLE = {Egocentric Videoconferencing}, AUTHOR = {Elgharib, Mohamed and Mendiratta, Mohit and Thies, Justus and Nie{\ss}ner, Matthias and Seidel, Hans-Peter and Tewari, Ayush and Golyanik, Vladislav and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417808}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {268}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Elgharib, Mohamed %A Mendiratta, Mohit %A Thies, Justus %A Nie&#223;ner, Matthias %A Seidel, Hans-Peter %A Tewari, Ayush %A Golyanik, Vladislav %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Egocentric Videoconferencing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B36-E %R 10.1145/3414685.3417808 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 268 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Cucerca, S., Didyk, P., Seidel, H.-P., and Babaei, V. 2020. Computational Image Marking on Metals via Laser Induced Heating. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2020)39, 4.
Export
BibTeX
@article{Cucerca_SIGGRAPH2020, TITLE = {Computational Image Marking on Metals via Laser Induced Heating}, AUTHOR = {Cucerca, Sebastian and Didyk, Piotr and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392423}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {39}, NUMBER = {4}, EID = {70}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2020}, }
Endnote
%0 Journal Article %A Cucerca, Sebastian %A Didyk, Piotr %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Computational Image Marking on Metals via Laser Induced Heating : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9664-F %R 10.1145/3386569.3392423 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %Z sequence number: 70 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2020 %O ACM SIGGRAPH 2020 Virtual Conference ; 2020, 17-28 August
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020. HDR Denoising and Deblurring by Learning Spatio-temporal Distortion Models. https://arxiv.org/abs/2012.12009.
(arXiv: 2012.12009)
Abstract
We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br>
Export
BibTeX
@online{Cogalan_arXiv2012.12009, TITLE = {{HDR} Denoising and Deblurring by Learning Spatio-temporal Distortion Model}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.12009}, EPRINT = {2012.12009}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br>}, }
Endnote
%0 Report %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T HDR Denoising and Deblurring by Learning Spatio-temporal Distortion Models : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B721-5 %U https://arxiv.org/abs/2012.12009 %D 2020 %X We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br> %K eess.IV,Computer Science, Computer Vision and Pattern Recognition, cs.CV
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020a. X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Bemana2020, TITLE = {X-{F}ields: {I}mplicit Neural View-, Light- and Time-Image Interpolation}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417827}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {257}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation : %G eng %U http://hdl.handle.net/21.11116/0000-0006-FBF0-0 %R 10.1145/3414685.3417827 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 257 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020b. X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation. https://arxiv.org/abs/2010.00450.
(arXiv: 2010.00450)
Abstract
We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br>
Export
BibTeX
@online{Bemana_arXiv2010.00450, TITLE = {X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2010.00450}, EPRINT = {2010.00450}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br>}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B6EC-2 %U https://arxiv.org/abs/2010.00450 %D 2020 %X We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Ansari, N., Alizadeh-Mousavi, O., Seidel, H.-P., and Babaei, V. 2020. Mixed Integer Ink Selection for Spectral Reproduction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Ansari_ToG2020, TITLE = {Mixed Integer Ink Selection for Spectral Reproduction}, AUTHOR = {Ansari, Navid and Alizadeh-Mousavi, Omid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417761}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {255}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Ansari, Navid %A Alizadeh-Mousavi, Omid %A Seidel, Hans-Peter %A Babaei, Vahid %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed Integer Ink Selection for Spectral Reproduction : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B23-3 %R 10.1145/3414685.3417761 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 255 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
2019
Yu, H., Bemana, M., Wernikowski, M., et al. 2019. A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2019)25, 5.
Export
BibTeX
@article{Yu_VR2019, TITLE = {A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays}, AUTHOR = {Yu, Hyeonseung and Bemana, Mojtaba and Wernikowski, Marek and Chwesiuk, Micha{\l} and Tursun, Okan Tarhan and Singh, Gurprit and Myszkowski, Karol and Mantiuk, Rados{\l}aw and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2019.2898821}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, VOLUME = {25}, NUMBER = {5}, PAGES = {1940--1950}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2019 (IEEE VR 2019)}, EDITOR = {Thomas, Bruce and Welch, Greg and Kuhlen, Torsten and Johnson, Kyle}, }
Endnote
%0 Journal Article %A Yu, Hyeonseung %A Bemana, Mojtaba %A Wernikowski, Marek %A Chwesiuk, Micha&#322; %A Tursun, Okan Tarhan %A Singh, Gurprit %A Myszkowski, Karol %A Mantiuk, Rados&#322;aw %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays : %G eng %U http://hdl.handle.net/21.11116/0000-0002-DCB5-A %R 10.1109/TVCG.2019.2898821 %7 2019 %D 2019 %J IEEE Transactions on Visualization and Computer Graphics %V 25 %N 5 %& 1940 %P 1940 - 1950 %I IEEE Computer Society %C New York, NY %@ false %B Selected Proceedings IEEE Virtual Reality 2019 %O IEEE VR 2019 Osaka, Japan, 23rd - 27th March
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2019. Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2019)25, 5.
Export
BibTeX
@article{Xu2019Mo2Cap2, TITLE = {{Mo2Cap2}: Real-time Mobile {3D} Motion Capture with a Cap-mounted Fisheye Camera}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Fua, Pascal and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2019.2898650}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2019}, DATE = {2019}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, VOLUME = {25}, NUMBER = {5}, PAGES = {2093--2101}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2019 (IEEE VR 2019)}, }
Endnote
%0 Journal Article %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Fua, Pascal %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F1DB-7 %R 10.1109/TVCG.2019.2898650 %7 2019 %D 2019 %J IEEE Transactions on Visualization and Computer Graphics %V 25 %N 5 %& 2093 %P 2093 - 2101 %I IEEE %C Piscataway, NJ %@ false %B Selected Proceedings IEEE Virtual Reality 2019 %O IEEE VR 2019 Osaka, Japan, March 23rd - 27th
Winter, M., Mlakar, D., Zayer, R., Seidel, H.-P., and Steinberger, M. 2019. Adaptive Sparse Matrix-Matrix Multiplication on the GPU. PPoPP’19, 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, ACM.
Export
BibTeX
@inproceedings{PPOPP:2019:ASPMM, TITLE = {Adaptive Sparse Matrix-Matrix Multiplication on the {GPU}}, AUTHOR = {Winter, Martin and Mlakar, Daniel and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-4503-6225-2}, DOI = {10.1145/3293883.3295701}, PUBLISHER = {ACM}, YEAR = {2019}, DATE = {2019}, BOOKTITLE = {PPoPP'19, 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming}, PAGES = {68--81}, ADDRESS = {Washington, DC, USA}, }
Endnote
%0 Conference Proceedings %A Winter, Martin %A Mlakar, Daniel %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Adaptive Sparse Matrix-Matrix Multiplication on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0002-EFE9-B %R 10.1145/3293883.3295701 %D 2019 %B 24th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming %Z date of event: 2019-02-16 - 2019-02-20 %C Washington, DC, USA %B PPoPP'19 %P 68 - 81 %I ACM %@ 978-1-4503-6225-2
Tursun, O.T., Arabadzhiyska, E., Wernikowski, M., et al. 2019. Luminance-Contrast-Aware Foveated Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2019)38, 4.
Export
BibTeX
@article{Tursun2019Luminance, TITLE = {Luminance-Contrast-Aware Foveated Rendering}, AUTHOR = {Tursun, Okan Tarhan and Arabadzhiyska, Elena and Wernikowski, Marek and Mantiuk, Rados{\l}aw and Seidel, Hans-Peter and Myszkowski, Karol and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3306346.3322985}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {38}, NUMBER = {4}, EID = {98}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2019}, }
Endnote
%0 Journal Article %A Tursun, Okan Tarhan %A Arabadzhiyska, Elena %A Wernikowski, Marek %A Mantiuk, Rados&#322;aw %A Seidel, Hans-Peter %A Myszkowski, Karol %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Luminance-Contrast-Aware Foveated Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0003-75D5-9 %R 10.1145/3306346.3322985 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 4 %Z sequence number: 98 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2019 %O ACM SIGGRAPH 2019 Los Angeles, CA, USA, 28 July - 1 August
Tewari, A., Bernard, F., Garrido, P., et al. 2019. FML: Face Model Learning From Videos. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019), IEEE.
Export
BibTeX
@inproceedings{TewariCVPR2019, TITLE = {{FML}: {F}ace Model Learning From Videos}, AUTHOR = {Tewari, Ayush and Bernard, Florian and Garrido, Pablo and Bharaj, Gaurav and Elgharib, Mohamed and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-7281-3293-8}, DOI = {10.1109/CVPR.2019.01107}, PUBLISHER = {IEEE}, YEAR = {2019}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019)}, PAGES = {10812--10822}, ADDRESS = {Long Beach, CA, USA}, }
Endnote
%0 Conference Proceedings %A Tewari, Ayush %A Bernard, Florian %A Garrido, Pablo %A Bharaj, Gaurav %A Elgharib, Mohamed %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T FML: Face Model Learning From Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0005-7B0C-5 %R 10.1109/CVPR.2019.01107 %D 2019 %B 32nd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2019-06-16 - 2019-06-20 %C Long Beach, CA, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 10812 - 10822 %I IEEE %@ 978-1-7281-3293-8
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2019a. XNect Demo (v2): Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera. CVPR 2019 Demonstrations.
Export
BibTeX
@inproceedings{XNectDemoV2_CVPR2019, TITLE = {Demo of {VNect} (v2): {R}eal-time {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Seidel, Hans-Peter and Fua, Pascal and Elgharib, Mohamed and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, YEAR = {2019}, BOOKTITLE = {CVPR 2019 Demonstrations}, ADDRESS = {Long Beach, CA, USA}, }
Endnote
%0 Conference Proceedings %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Seidel, Hans-Peter %A Fua, Pascal %A Elgharib, Mohamed %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect Demo (v2): Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0004-71DB-6 %D 2019 %B 32nd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2019-06-16 - 2019-06-20 %C Long Beach, CA, USA %B CVPR 2019 Demonstrations %U http://gvv.mpi-inf.mpg.de/projects/XNectDemoV2/http://gvv.mpi-inf.mpg.de/projects/XNectDemoV2/
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2019b. XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera. http://arxiv.org/abs/1907.00837.
(arXiv: 1907.00837)
Abstract
We present a real-time approach for multi-person 3D motion capture at over 30<br>fps using a single RGB camera. It operates in generic scenes and is robust to<br>difficult occlusions both by other people and objects. Our method operates in<br>subsequent stages. The first stage is a convolutional neural network (CNN) that<br>estimates 2D and 3D pose features along with identity assignments for all<br>visible joints of all individuals. We contribute a new architecture for this<br>CNN, called SelecSLS Net, that uses novel selective long and short range skip<br>connections to improve the information flow allowing for a drastically faster<br>network without compromising accuracy. In the second stage, a fully-connected<br>neural network turns the possibly partial (on account of occlusion) 2D pose and<br>3D pose features for each subject into a complete 3D pose estimate per<br>individual. The third stage applies space-time skeletal model fitting to the<br>predicted 2D and 3D pose per subject to further reconcile the 2D and 3D pose,<br>and enforce temporal coherence. Our method returns the full skeletal pose in<br>joint angles for each subject. This is a further key distinction from previous<br>work that neither extracted global body positions nor joint angle results of a<br>coherent skeleton in real time for multi-person scenes. The proposed system<br>runs on consumer hardware at a previously unseen speed of more than 30 fps<br>given 512x320 images as input while achieving state-of-the-art accuracy, which<br>we will demonstrate on a range of challenging real-world scenes.<br>
Export
BibTeX
@online{Mehta_arXiv1907.00837, TITLE = {{XNect}: Real-time Multi-person {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1907.00837}, EPRINT = {1907.00837}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {We present a real-time approach for multi-person 3D motion capture at over 30<br>fps using a single RGB camera. It operates in generic scenes and is robust to<br>difficult occlusions both by other people and objects. Our method operates in<br>subsequent stages. The first stage is a convolutional neural network (CNN) that<br>estimates 2D and 3D pose features along with identity assignments for all<br>visible joints of all individuals. We contribute a new architecture for this<br>CNN, called SelecSLS Net, that uses novel selective long and short range skip<br>connections to improve the information flow allowing for a drastically faster<br>network without compromising accuracy. In the second stage, a fully-connected<br>neural network turns the possibly partial (on account of occlusion) 2D pose and<br>3D pose features for each subject into a complete 3D pose estimate per<br>individual. The third stage applies space-time skeletal model fitting to the<br>predicted 2D and 3D pose per subject to further reconcile the 2D and 3D pose,<br>and enforce temporal coherence. Our method returns the full skeletal pose in<br>joint angles for each subject. This is a further key distinction from previous<br>work that neither extracted global body positions nor joint angle results of a<br>coherent skeleton in real time for multi-person scenes. The proposed system<br>runs on consumer hardware at a previously unseen speed of more than 30 fps<br>given 512x320 images as input while achieving state-of-the-art accuracy, which<br>we will demonstrate on a range of challenging real-world scenes.<br>}, }
Endnote
%0 Report %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Elgharib, Mohamed %A Fua, Pascal %A Seidel, Hans-Peter %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0003-FE21-A %U http://arxiv.org/abs/1907.00837 %D 2019 %X We present a real-time approach for multi-person 3D motion capture at over 30<br>fps using a single RGB camera. It operates in generic scenes and is robust to<br>difficult occlusions both by other people and objects. Our method operates in<br>subsequent stages. The first stage is a convolutional neural network (CNN) that<br>estimates 2D and 3D pose features along with identity assignments for all<br>visible joints of all individuals. We contribute a new architecture for this<br>CNN, called SelecSLS Net, that uses novel selective long and short range skip<br>connections to improve the information flow allowing for a drastically faster<br>network without compromising accuracy. In the second stage, a fully-connected<br>neural network turns the possibly partial (on account of occlusion) 2D pose and<br>3D pose features for each subject into a complete 3D pose estimate per<br>individual. The third stage applies space-time skeletal model fitting to the<br>predicted 2D and 3D pose per subject to further reconcile the 2D and 3D pose,<br>and enforce temporal coherence. Our method returns the full skeletal pose in<br>joint angles for each subject. This is a further key distinction from previous<br>work that neither extracted global body positions nor joint angle results of a<br>coherent skeleton in real time for multi-person scenes. The proposed system<br>runs on consumer hardware at a previously unseen speed of more than 30 fps<br>given 512x320 images as input while achieving state-of-the-art accuracy, which<br>we will demonstrate on a range of challenging real-world scenes.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Leimkühler, T., Singh, G., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2019. Deep Point Correlation Design. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2019)38, 6.
Export
BibTeX
@article{Leimkuehler_SA2019, TITLE = {Deep Point Correlation Design}, AUTHOR = {Leimk{\"u}hler, Thomas and Singh, Gurprit and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3355089.3356562}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {38}, NUMBER = {6}, EID = {226}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2019}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Singh, Gurprit %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Point Correlation Design : %G eng %U http://hdl.handle.net/21.11116/0000-0004-9BF3-B %R 10.1145/3355089.3356562 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 6 %Z sequence number: 226 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2019 %O ACM SIGGRAPH Asia 2019 Brisbane, Australia, 17 - 20 November 2019 SA'19 SA 2019
Kim, H., Elgharib, M., Zollhöfer, M., et al. 2019. Neural Style-preserving Visual Dubbing. ACM Transactions on Graphics38, 6.
Export
BibTeX
@article{Kim2019, TITLE = {Neural Style-preserving Visual Dubbing}, AUTHOR = {Kim, Hyeongwoo and Elgharib, Mohamed and Zollh{\"o}fer, Michael and Seidel, Hans-Peter and Beeler, Thabo and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3355089.3356500}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {38}, NUMBER = {6}, EID = {178}, }
Endnote
%0 Journal Article %A Kim, Hyeongwoo %A Elgharib, Mohamed %A Zollh&#246;fer, Michael %A Seidel, Hans-Peter %A Beeler, Thabo %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Style-preserving Visual Dubbing : %G eng %U http://hdl.handle.net/21.11116/0000-0005-6AC0-B %R 10.1145/3355089.3356500 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 6 %Z sequence number: 178 %I ACM %C New York, NY %@ false
Jiang, C., Tang, C., Seidel, H.-P., Chen, R., and Wonka, P. 2019. Computational Design of Lightweight Trusses. http://arxiv.org/abs/1901.05637.
(arXiv: 1901.05637)
Abstract
Trusses are load-carrying light-weight structures consisting of bars<br>connected at joints ubiquitously applied in a variety of engineering scenarios.<br>Designing optimal trusses that satisfy functional specifications with a minimal<br>amount of material has interested both theoreticians and practitioners for more<br>than a century. In this paper, we introduce two main ideas to improve upon the<br>state of the art. First, we formulate an alternating linear programming problem<br>for geometry optimization. Second, we introduce two sets of complementary<br>topological operations, including a novel subdivision scheme for global<br>topology refinement inspired by Michell's famed theoretical study. Based on<br>these two ideas, we build an efficient computational framework for the design<br>of lightweight trusses. \AD{We illustrate our framework with a variety of<br>functional specifications and extensions. We show that our method achieves<br>trusses with smaller volumes and is over two orders of magnitude faster<br>compared with recent state-of-the-art approaches.<br>
Export
BibTeX
@online{Jiang_arXIv1901.05637, TITLE = {Computational Design of Lightweight Trusses}, AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Chen, Renjie and Wonka, Peter}, URL = {http://arxiv.org/abs/1901.05637}, EPRINT = {1901.05637}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {Trusses are load-carrying light-weight structures consisting of bars<br>connected at joints ubiquitously applied in a variety of engineering scenarios.<br>Designing optimal trusses that satisfy functional specifications with a minimal<br>amount of material has interested both theoreticians and practitioners for more<br>than a century. In this paper, we introduce two main ideas to improve upon the<br>state of the art. First, we formulate an alternating linear programming problem<br>for geometry optimization. Second, we introduce two sets of complementary<br>topological operations, including a novel subdivision scheme for global<br>topology refinement inspired by Michell's famed theoretical study. Based on<br>these two ideas, we build an efficient computational framework for the design<br>of lightweight trusses. \AD{We illustrate our framework with a variety of<br>functional specifications and extensions. We show that our method achieves<br>trusses with smaller volumes and is over two orders of magnitude faster<br>compared with recent state-of-the-art approaches.<br>}, }
Endnote
%0 Report %A Jiang, Caigui %A Tang, Chengcheng %A Seidel, Hans-Peter %A Chen, Renjie %A Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Computational Design of Lightweight Trusses : %U http://hdl.handle.net/21.11116/0000-0003-A7E9-A %U http://arxiv.org/abs/1901.05637 %D 2019 %X Trusses are load-carrying light-weight structures consisting of bars<br>connected at joints ubiquitously applied in a variety of engineering scenarios.<br>Designing optimal trusses that satisfy functional specifications with a minimal<br>amount of material has interested both theoreticians and practitioners for more<br>than a century. In this paper, we introduce two main ideas to improve upon the<br>state of the art. First, we formulate an alternating linear programming problem<br>for geometry optimization. Second, we introduce two sets of complementary<br>topological operations, including a novel subdivision scheme for global<br>topology refinement inspired by Michell's famed theoretical study. Based on<br>these two ideas, we build an efficient computational framework for the design<br>of lightweight trusses. \AD{We illustrate our framework with a variety of<br>functional specifications and extensions. We show that our method achieves<br>trusses with smaller volumes and is over two orders of magnitude faster<br>compared with recent state-of-the-art approaches.<br> %K Computer Science, Graphics, cs.GR
Hladký, J., Seidel, H.-P., and Steinberger, M. 2019a. The Camera Offset Space: Real-time Potentially Visible Set Computations for Streaming Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2019)38, 6.
Export
BibTeX
@article{Hladky_SA2019, TITLE = {The Camera Offset Space: Real-time Potentially Visible Set Computations for Streaming Rendering}, AUTHOR = {Hladk{\'y}, Jozef and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-6008-1}, DOI = {10.1145/3355089.3356530}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {38}, NUMBER = {6}, EID = {231}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2019}, }
Endnote
%0 Journal Article %A Hladk&#253;, Jozef %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T The Camera Offset Space: Real-time Potentially Visible Set Computations for Streaming Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0005-4E4F-D %R 10.1145/3355089.3356530 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 6 %Z sequence number: 231 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2019 %O ACM SIGGRAPH Asia 2019 Brisbane, Australia, 17 - 20 November 2019 SA'19 SA 2019 %@ 978-1-4503-6008-1
Hladký, J., Seidel, H.-P., and Steinberger, M. 2019b. Tessellated Shading Streaming. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2019)38, 4.
Export
BibTeX
@article{Hladky_EGSR2019, TITLE = {Tessellated Shading Streaming}, AUTHOR = {Hladk{\'y}, Jozef and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, URL = {https://diglib.eg.org/handle/10.1111/cgf13780}, DOI = {10.1111/cgf.13780}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2019}, DATE = {2019}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {38}, NUMBER = {4}, PAGES = {171--182}, BOOKTITLE = {Eurographics Symposium on Rendering 2019}, EDITOR = {Boubekeur, Tamy and Sen, Pradeep}, }
Endnote
%0 Journal Article %A Hladk&#253;, Jozef %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Tessellated Shading Streaming : %G eng %U http://hdl.handle.net/21.11116/0000-0004-4897-1 %R 10.1111/cgf.13780 %U https://diglib.eg.org/handle/10.1111/cgf13780 %7 2019 %D 2019 %J Computer Graphics Forum %V 38 %N 4 %& 171 %P 171 - 182 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2019 %O Eurographics Symposium on Rendering 2019 EGSR 2019 Strasbourg, France, July 10 - 12, 2109
Elgharib, M., Mallikarjun B R, Tewari, A., et al. 2019. EgoFace: Egocentric Face Performance Capture and Videorealistic Reenactment. http://arxiv.org/abs/1905.10822.
(arXiv: 1905.10822)
Abstract
Face performance capture and reenactment techniques use multiple cameras and<br>sensors, positioned at a distance from the face or mounted on heavy wearable<br>devices. This limits their applications in mobile and outdoor environments. We<br>present EgoFace, a radically new lightweight setup for face performance capture<br>and front-view videorealistic reenactment using a single egocentric RGB camera.<br>Our lightweight setup allows operations in uncontrolled environments, and lends<br>itself to telepresence applications such as video-conferencing from dynamic<br>environments. The input image is projected into a low dimensional latent space<br>of the facial expression parameters. Through careful adversarial training of<br>the parameter-space synthetic rendering, a videorealistic animation is<br>produced. Our problem is challenging as the human visual system is sensitive to<br>the smallest face irregularities that could occur in the final results. This<br>sensitivity is even stronger for video results. Our solution is trained in a<br>pre-processing stage, through a supervised manner without manual annotations.<br>EgoFace captures a wide variety of facial expressions, including mouth<br>movements and asymmetrical expressions. It works under varying illuminations,<br>background, movements, handles people from different ethnicities and can<br>operate in real time.<br>
Export
BibTeX
@online{Elgharib_arXiv1905.10822, TITLE = {{EgoFace}: Egocentric Face Performance Capture and Videorealistic Reenactment}, AUTHOR = {Elgharib, Mohamed and Mallikarjun B R and Tewari, Ayush and Kim, Hyeongwoo and Liu, Wentao and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1905.10822}, EPRINT = {1905.10822}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {Face performance capture and reenactment techniques use multiple cameras and<br>sensors, positioned at a distance from the face or mounted on heavy wearable<br>devices. This limits their applications in mobile and outdoor environments. We<br>present EgoFace, a radically new lightweight setup for face performance capture<br>and front-view videorealistic reenactment using a single egocentric RGB camera.<br>Our lightweight setup allows operations in uncontrolled environments, and lends<br>itself to telepresence applications such as video-conferencing from dynamic<br>environments. The input image is projected into a low dimensional latent space<br>of the facial expression parameters. Through careful adversarial training of<br>the parameter-space synthetic rendering, a videorealistic animation is<br>produced. Our problem is challenging as the human visual system is sensitive to<br>the smallest face irregularities that could occur in the final results. This<br>sensitivity is even stronger for video results. Our solution is trained in a<br>pre-processing stage, through a supervised manner without manual annotations.<br>EgoFace captures a wide variety of facial expressions, including mouth<br>movements and asymmetrical expressions. It works under varying illuminations,<br>background, movements, handles people from different ethnicities and can<br>operate in real time.<br>}, }
Endnote
%0 Report %A Elgharib, Mohamed %A Mallikarjun B R, %A Tewari, Ayush %A Kim, Hyeongwoo %A Liu, Wentao %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoFace: Egocentric Face Performance Capture and Videorealistic Reenactment : %G eng %U http://hdl.handle.net/21.11116/0000-0003-F1E6-9 %U http://arxiv.org/abs/1905.10822 %D 2019 %X Face performance capture and reenactment techniques use multiple cameras and<br>sensors, positioned at a distance from the face or mounted on heavy wearable<br>devices. This limits their applications in mobile and outdoor environments. We<br>present EgoFace, a radically new lightweight setup for face performance capture<br>and front-view videorealistic reenactment using a single egocentric RGB camera.<br>Our lightweight setup allows operations in uncontrolled environments, and lends<br>itself to telepresence applications such as video-conferencing from dynamic<br>environments. The input image is projected into a low dimensional latent space<br>of the facial expression parameters. Through careful adversarial training of<br>the parameter-space synthetic rendering, a videorealistic animation is<br>produced. Our problem is challenging as the human visual system is sensitive to<br>the smallest face irregularities that could occur in the final results. This<br>sensitivity is even stronger for video results. Our solution is trained in a<br>pre-processing stage, through a supervised manner without manual annotations.<br>EgoFace captures a wide variety of facial expressions, including mouth<br>movements and asymmetrical expressions. It works under varying illuminations,<br>background, movements, handles people from different ethnicities and can<br>operate in real time.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR %U http://gvv.mpi-inf.mpg.de/projects/EgoFace/
Dokter, M., Hladký, J., Parger, M., Schmalstieg, D., Seidel, H.-P., and Steinberger, M. 2019. Hierarchical Rasterization of Curved Primitives for Vector Graphics Rendering on the GPU. Computer Graphics Forum (Proc. EUROGRAPHICS 2019)38, 2.
Export
BibTeX
@article{Dokter_EG2019, TITLE = {Hierarchical Rasterization of Curved Primitives for Vector Graphics Rendering on the {GPU}}, AUTHOR = {Dokter, Mark and Hladk{\'y}, Jozef and Parger, Mathias and Schmalstieg, Dieter and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13622}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2019}, DATE = {2019}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {38}, NUMBER = {2}, PAGES = {93--103}, BOOKTITLE = {EUROGRAPHICS 2019 STAR -- State of The Art Reports}, }
Endnote
%0 Journal Article %A Dokter, Mark %A Hladk&#253;, Jozef %A Parger, Mathias %A Schmalstieg, Dieter %A Seidel, Hans-Peter %A Steinberger, Markus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hierarchical Rasterization of Curved Primitives for Vector Graphics Rendering on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0002-FC80-1 %R 10.1111/cgf.13622 %7 2019 %D 2019 %J Computer Graphics Forum %V 38 %N 2 %& 93 %P 93 - 103 %I Wiley-Blackwell %C Oxford %@ false %B EUROGRAPHICS 2019 STAR &#8211; State of The Art Reports %O EUROGRAPHICS 2019 The 40th Annual Conference of the European Association for Computer Graphics ; Genova, Italy, May 6-10, 2019 EG 2019
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2019a. Neural View-Interpolation for Sparse Light Field Video. http://arxiv.org/abs/1910.13921.
(arXiv: 1910.13921)
Abstract
We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br>
Export
BibTeX
@online{Bemana_arXiv1910.13921, TITLE = {Neural View-Interpolation for Sparse Light Field Video}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1910.13921}, EPRINT = {1910.13921}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br>}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Neural View-Interpolation for Sparse Light Field Video : %G eng %U http://hdl.handle.net/21.11116/0000-0005-7B16-9 %U http://arxiv.org/abs/1910.13921 %D 2019 %X We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Learning, cs.LG,eess.IV
Bemana, M., Keinert, J., Myszkowski, K., et al. 2019b. Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image. Computer Graphics Forum (Proc. Pacific Graphics 2019)38, 7.
Export
BibTeX
@article{Bemana_PG2019, TITLE = {Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image}, AUTHOR = {Bemana, Mojtaba and Keinert, Joachim and Myszkowski, Karol and B{\"a}tz, Michel and Ziegler, Matthias and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13862}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2019}, DATE = {2019}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {38}, NUMBER = {7}, PAGES = {579--589}, BOOKTITLE = {27th Annual International Conference on Computer Graphics and Applications (Pacific Graphics 2019)}, }
Endnote
%0 Journal Article %A Bemana, Mojtaba %A Keinert, Joachim %A Myszkowski, Karol %A B&#228;tz, Michel %A Ziegler, Matthias %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image : %G eng %U http://hdl.handle.net/21.11116/0000-0004-9BC5-F %R 10.1111/cgf.13862 %7 2019 %D 2019 %J Computer Graphics Forum %V 38 %N 7 %& 579 %P 579 - 589 %I Wiley-Blackwell %C Oxford, UK %@ false %B 27th Annual International Conference on Computer Graphics and Applications %O Pacific Graphics 2019 PG 2019 Seoul, October 14-17, 2019
2018
Zayer, R., Mlakar, D., Steinberger, M., and Seidel, H.-P. 2018a. Layered Fields for Natural Tessellations on Surfaces. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2018)37, 6.
Export
BibTeX
@article{Zayer:2018:LFN, TITLE = {Layered Fields for Natural Tessellations on Surfaces}, AUTHOR = {Zayer, Rhaleb and Mlakar, Daniel and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-6008-1}, DOI = {10.1145/3272127.3275072}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {37}, NUMBER = {6}, EID = {264}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2018}, }
Endnote
%0 Journal Article %A Zayer, Rhaleb %A Mlakar, Daniel %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Layered Fields for Natural Tessellations on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E0-E %R 10.1145/3272127.3275072 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 6 %Z sequence number: 264 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2018 %O ACM SIGGRAPH Asia 2018 Tokyo, Japan, December 04 - 07, 2018 SA'18 SA 2018 %@ 978-1-4503-6008-1
Zayer, R., Mlakar, D., Steinberger, M., and Seidel, H.-P. 2018b. Layered Fields for Natural Tessellations on Surfaces. http://arxiv.org/abs/1804.09152.
(arXiv: 1804.09152)
Abstract
Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware.
Export
BibTeX
@online{Zayer_arXiv1804.09152, TITLE = {Layered Fields for Natural Tessellations on Surfaces}, AUTHOR = {Zayer, Rhaleb and Mlakar, Daniel and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1804.09152}, EPRINT = {1804.09152}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware.}, }
Endnote
%0 Report %A Zayer, Rhaleb %A Mlakar, Daniel %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Layered Fields for Natural Tessellations on Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0002-152D-5 %U http://arxiv.org/abs/1804.09152 %D 2018 %X Mimicking natural tessellation patterns is a fascinating multi-disciplinary problem. Geometric methods aiming at reproducing such partitions on surface meshes are commonly based on the Voronoi model and its variants, and are often faced with challenging issues such as metric estimation, geometric, topological complications, and most critically parallelization. In this paper, we introduce an alternate model which may be of value for resolving these issues. We drop the assumption that regions need to be separated by lines. Instead, we regard region boundaries as narrow bands and we model the partition as a set of smooth functions layered over the surface. Given an initial set of seeds or regions, the partition emerges as the solution of a time dependent set of partial differential equations describing concurrently evolving fronts on the surface. Our solution does not require geodesic estimation, elaborate numerical solvers, or complicated bookkeeping data structures. The cost per time-iteration is dominated by the multiplication and addition of two sparse matrices. Extension of our approach in a Lloyd's algorithm fashion can be easily achieved and the extraction of the dual mesh can be conveniently preformed in parallel through matrix algebra. As our approach relies mainly on basic linear algebra kernels, it lends itself to efficient implementation on modern graphics hardware. %K Computer Science, Graphics, cs.GR,Computer Science, Distributed, Parallel, and Cluster Computing, cs.DC
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2018a. MonoPerfCap: Human Performance Capture from Monocular Video. ACM Transactions on Graphics37, 2.
Abstract
We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.
Export
BibTeX
@article{Xu_ToG2018, TITLE = {{MonoPerfCap}: Human Performance Capture from Monocular Video}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Mehta, Dushyant and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3181973}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, ABSTRACT = {We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {37}, NUMBER = {2}, EID = {27}, }
Endnote
%0 Journal Article %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Mehta, Dushyant %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MonoPerfCap: Human Performance Capture from Monocular Video : %G eng %U http://hdl.handle.net/21.11116/0000-0001-E20E-1 %R 10.1145/3181973 %7 2017 %D 2018 %X We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR %J ACM Transactions on Graphics %V 37 %N 2 %Z sequence number: 27 %I ACM %C New York, NY %@ false
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2018b. Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera. http://arxiv.org/abs/1803.05959.
(arXiv: 1803.05959)
Abstract
We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines.
Export
BibTeX
@online{Xu_arXiv1803.05959, TITLE = {{Mo2Cap2}: Real-time Mobile {3D} Motion Capture with a Cap-mounted Fisheye Camera}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Fua, Pascal and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1803.05959}, EPRINT = {1803.05959}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines.}, }
Endnote
%0 Report %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Fua, Pascal %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mo2Cap2: Real-time Mobile 3D Motion Capture with a Cap-mounted Fisheye Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0001-3C65-B %U http://arxiv.org/abs/1803.05959 %D 2018 %X We propose the first real-time approach for the egocentric estimation of 3D human body pose in a wide range of unconstrained everyday activities. This setting has a unique set of challenges, such as mobility of the hardware setup, and robustness to long capture sessions with fast recovery from tracking failures. We tackle these challenges based on a novel lightweight setup that converts a standard baseball cap to a device for high-quality pose estimation based on a single cap-mounted fisheye camera. From the captured egocentric live stream, our CNN based 3D pose estimation approach runs at 60Hz on a consumer-level GPU. In addition to the novel hardware setup, our other main contributions are: 1) a large ground truth training corpus of top-down fisheye images and 2) a novel disentangled 3D pose estimation approach that takes the unique properties of the egocentric viewpoint into account. As shown by our evaluation, we achieve lower 3D joint error as well as better 2D overlay than the existing baselines. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Wolski, K., Giunchi, D., Ye, N., et al. 2018. Dataset and Metrics for Predicting Local Visible Differences. ACM Transactions on Graphics37, 5.
Export
BibTeX
@article{wolski2018dataset, TITLE = {Dataset and Metrics for Predicting Local Visible Differences}, AUTHOR = {Wolski, Krzysztof and Giunchi, Daniele and Ye, Nanyang and Didyk, Piotr and Myszkowski, Karol and Mantiuk, Rados{\textbackslash}l{\textbraceleft}{\textbraceright}aw and Seidel, Hans-Peter and Steed, Anthony and Mantiuk, Rafa{\l} K.}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3196493}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {37}, NUMBER = {5}, EID = {172}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Giunchi, Daniele %A Ye, Nanyang %A Didyk, Piotr %A Myszkowski, Karol %A Mantiuk, Rados\l{}aw %A Seidel, Hans-Peter %A Steed, Anthony %A Mantiuk, Rafa&#322; K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Dataset and Metrics for Predicting Local Visible Differences : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F75-2 %R 10.1145/3196493 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 5 %Z sequence number: 172 %I ACM %C New York, NY %@ false
Winter, M., Mlakar, D., Zayer, R., Seidel, H.-P., and Steinberger, M. 2018. faimGraph: High Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the GPU. The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC 2018), IEEE.
Export
BibTeX
@inproceedings{Winter:2018:FHP, TITLE = {{faimGraph}: {H}igh Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the {GPU}}, AUTHOR = {Winter, Martin and Mlakar, Daniel and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-5386-8384-2}, URL = {http://conferences.computer.org/sc/2018/#!/home}, PUBLISHER = {IEEE}, YEAR = {2018}, BOOKTITLE = {The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC 2018)}, PAGES = {754--766}, ADDRESS = {Dallas, TX, USA}, }
Endnote
%0 Conference Proceedings %A Winter, Martin %A Mlakar, Daniel %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T faimGraph: High Performance Management of Fully-Dynamic Graphs Under Tight Memory Constraints on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E6-8 %D 2018 %B The International Conference for High Performance Computing, Networking, Storage, and Analysis %Z date of event: 2018-11-11 - 2018-11-16 %C Dallas, TX, USA %B The International Conference for High Performance Computing, Networking, Storage, and Analysis %P 754 - 766 %I IEEE %@ 978-1-5386-8384-2
Tewari, A., Bernard, F., Garrido, P., et al. 2018. FML: Face Model Learning from Videos. http://arxiv.org/abs/1812.07603.
(arXiv: 1812.07603)
Abstract
Monocular image-based 3D reconstruction of faces is a long-standing problem<br>in computer vision. Since image data is a 2D projection of a 3D face, the<br>resulting depth ambiguity makes the problem ill-posed. Most existing methods<br>rely on data-driven priors that are built from limited 3D face scans. In<br>contrast, we propose multi-frame video-based self-supervised training of a deep<br>network that (i) learns a face identity model both in shape and appearance<br>while (ii) jointly learning to reconstruct 3D faces. Our face model is learned<br>using only corpora of in-the-wild video clips collected from the Internet. This<br>virtually endless source of training data enables learning of a highly general<br>3D face model. In order to achieve this, we propose a novel multi-frame<br>consistency loss that ensures consistent shape and appearance across multiple<br>frames of a subject's face, thus minimizing depth ambiguity. At test time we<br>can use an arbitrary number of frames, so that we can perform both monocular as<br>well as multi-frame reconstruction.<br>
Export
BibTeX
@online{tewari2018fml, TITLE = {{FML}: {Face Model Learning from Videos}}, AUTHOR = {Tewari, Ayush and Bernard, Florian and Garrido, Pablo and Bharaj, Gaurav and Elgharib, Mohamed and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1812.07603}, EPRINT = {1812.07603}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Monocular image-based 3D reconstruction of faces is a long-standing problem<br>in computer vision. Since image data is a 2D projection of a 3D face, the<br>resulting depth ambiguity makes the problem ill-posed. Most existing methods<br>rely on data-driven priors that are built from limited 3D face scans. In<br>contrast, we propose multi-frame video-based self-supervised training of a deep<br>network that (i) learns a face identity model both in shape and appearance<br>while (ii) jointly learning to reconstruct 3D faces. Our face model is learned<br>using only corpora of in-the-wild video clips collected from the Internet. This<br>virtually endless source of training data enables learning of a highly general<br>3D face model. In order to achieve this, we propose a novel multi-frame<br>consistency loss that ensures consistent shape and appearance across multiple<br>frames of a subject's face, thus minimizing depth ambiguity. At test time we<br>can use an arbitrary number of frames, so that we can perform both monocular as<br>well as multi-frame reconstruction.<br>}, }
Endnote
%0 Report %A Tewari, Ayush %A Bernard, Florian %A Garrido, Pablo %A Bharaj, Gaurav %A Elgharib, Mohamed %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T FML: Face Model Learning from Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0002-EF79-A %U http://arxiv.org/abs/1812.07603 %D 2018 %X Monocular image-based 3D reconstruction of faces is a long-standing problem<br>in computer vision. Since image data is a 2D projection of a 3D face, the<br>resulting depth ambiguity makes the problem ill-posed. Most existing methods<br>rely on data-driven priors that are built from limited 3D face scans. In<br>contrast, we propose multi-frame video-based self-supervised training of a deep<br>network that (i) learns a face identity model both in shape and appearance<br>while (ii) jointly learning to reconstruct 3D faces. Our face model is learned<br>using only corpora of in-the-wild video clips collected from the Internet. This<br>virtually endless source of training data enables learning of a highly general<br>3D face model. In order to achieve this, we propose a novel multi-frame<br>consistency loss that ensures consistent shape and appearance across multiple<br>frames of a subject's face, thus minimizing depth ambiguity. At test time we<br>can use an arbitrary number of frames, so that we can perform both monocular as<br>well as multi-frame reconstruction.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV %U https://www.youtube.com/watch?v=SG2BwxCw0lQ
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2018. An Intuitive Control Space for Material Appearance. http://arxiv.org/abs/1806.04950.
(arXiv: 1806.04950)
Abstract
Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br>
Export
BibTeX
@online{Serrano_arXiv1806.04950, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.04950}, EPRINT = {1806.04950}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br>}, }
Endnote
%0 Report %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/21.11116/0000-0002-151E-6 %U http://arxiv.org/abs/1806.04950 %D 2018 %X Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br> %K Computer Science, Graphics, cs.GR
Myszkowski, K., Tursun, O.T., Kellnhofer, P., et al. 2018. Perceptual Display: Apparent Enhancement of Scene Detail and Depth. Electronic Imaging (Proc. HVEI 2018), SPIE/IS&T.
(Keynote Talk)
Abstract
Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.
Export
BibTeX
@inproceedings{Myszkowski2018Perceptual, TITLE = {Perceptual Display: Apparent Enhancement of Scene Detail and Depth}, AUTHOR = {Myszkowski, Karol and Tursun, Okan Tarhan and Kellnhofer, Petr and Templin, Krzysztof and Arabadzhiyska, Elena and Didyk, Piotr and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {2470-1173}, DOI = {10.2352/ISSN.2470-1173.2018.14.HVEI-501}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2018}, ABSTRACT = {Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.}, BOOKTITLE = {Human Vision and Electronic Imaging (HVEI 2018)}, PAGES = {1--10}, EID = {501}, JOURNAL = {Electronic Imaging (Proc. HVEI)}, VOLUME = {2018}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %A Tursun, Okan Tarhan %A Kellnhofer, Petr %A Templin, Krzysztof %A Arabadzhiyska, Elena %A Didyk, Piotr %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Display: Apparent Enhancement of Scene Detail and Depth : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F64-5 %R 10.2352/ISSN.2470-1173.2018.14.HVEI-501 %D 2018 %B Human Vision and Electronic Imaging %Z date of event: 2018-01-28 - 2018-02-02 %C San Francisco, CA, USA %X Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations. %B Human Vision and Electronic Imaging %P 1 - 10 %Z sequence number: 501 %I SPIE/IS&T %J Electronic Imaging %V 2018 %@ false
Mlakar, D., Winter, M., Seidel, H.-P., Steinberger, M., and Zayer, R. 2018. AlSub: Fully Parallel and Modular Subdivision. http://arxiv.org/abs/1809.06047.
(arXiv: 1809.06047)
Abstract
In recent years, mesh subdivision---the process of forging smooth free-form<br>surfaces from coarse polygonal meshes---has become an indispensable production<br>instrument. Although subdivision performance is crucial during simulation,<br>animation and rendering, state-of-the-art approaches still rely on serial<br>implementations for complex parts of the subdivision process. Therefore, they<br>often fail to harness the power of modern parallel devices, like the graphics<br>processing unit (GPU), for large parts of the algorithm and must resort to<br>time-consuming serial preprocessing. In this paper, we show that a complete<br>parallelization of the subdivision process for modern architectures is<br>possible. Building on sparse matrix linear algebra, we show how to structure<br>the complete subdivision process into a sequence of algebra operations. By<br>restructuring and grouping these operations, we adapt the process for different<br>use cases, such as regular subdivision of dynamic meshes, uniform subdivision<br>for immutable topology, and feature-adaptive subdivision for efficient<br>rendering of animated models. As the same machinery is used for all use cases,<br>identical subdivision results are achieved in all parts of the production<br>pipeline. As a second contribution, we show how these linear algebra<br>formulations can effectively be translated into efficient GPU kernels. Applying<br>our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows<br>significant speedups of our approach compared to state-of-the-art solutions,<br>while we completely avoid serial preprocessing.<br>
Export
BibTeX
@online{Mlakar_arXiv1809.06047, TITLE = {{AlSub}: {Fully Parallel and Modular Subdivision}}, AUTHOR = {Mlakar, Daniel and Winter, Martin and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1809.06047}, EPRINT = {1809.06047}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {In recent years, mesh subdivision---the process of forging smooth free-form<br>surfaces from coarse polygonal meshes---has become an indispensable production<br>instrument. Although subdivision performance is crucial during simulation,<br>animation and rendering, state-of-the-art approaches still rely on serial<br>implementations for complex parts of the subdivision process. Therefore, they<br>often fail to harness the power of modern parallel devices, like the graphics<br>processing unit (GPU), for large parts of the algorithm and must resort to<br>time-consuming serial preprocessing. In this paper, we show that a complete<br>parallelization of the subdivision process for modern architectures is<br>possible. Building on sparse matrix linear algebra, we show how to structure<br>the complete subdivision process into a sequence of algebra operations. By<br>restructuring and grouping these operations, we adapt the process for different<br>use cases, such as regular subdivision of dynamic meshes, uniform subdivision<br>for immutable topology, and feature-adaptive subdivision for efficient<br>rendering of animated models. As the same machinery is used for all use cases,<br>identical subdivision results are achieved in all parts of the production<br>pipeline. As a second contribution, we show how these linear algebra<br>formulations can effectively be translated into efficient GPU kernels. Applying<br>our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows<br>significant speedups of our approach compared to state-of-the-art solutions,<br>while we completely avoid serial preprocessing.<br>}, }
Endnote
%0 Report %A Mlakar, Daniel %A Winter, Martin %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T AlSub: Fully Parallel and Modular Subdivision : %G eng %U http://hdl.handle.net/21.11116/0000-0002-E5E2-C %U http://arxiv.org/abs/1809.06047 %D 2018 %X In recent years, mesh subdivision---the process of forging smooth free-form<br>surfaces from coarse polygonal meshes---has become an indispensable production<br>instrument. Although subdivision performance is crucial during simulation,<br>animation and rendering, state-of-the-art approaches still rely on serial<br>implementations for complex parts of the subdivision process. Therefore, they<br>often fail to harness the power of modern parallel devices, like the graphics<br>processing unit (GPU), for large parts of the algorithm and must resort to<br>time-consuming serial preprocessing. In this paper, we show that a complete<br>parallelization of the subdivision process for modern architectures is<br>possible. Building on sparse matrix linear algebra, we show how to structure<br>the complete subdivision process into a sequence of algebra operations. By<br>restructuring and grouping these operations, we adapt the process for different<br>use cases, such as regular subdivision of dynamic meshes, uniform subdivision<br>for immutable topology, and feature-adaptive subdivision for efficient<br>rendering of animated models. As the same machinery is used for all use cases,<br>identical subdivision results are achieved in all parts of the production<br>pipeline. As a second contribution, we show how these linear algebra<br>formulations can effectively be translated into efficient GPU kernels. Applying<br>our strategies to $\sqrt{3}$, Loop and Catmull-Clark subdivision shows<br>significant speedups of our approach compared to state-of-the-art solutions,<br>while we completely avoid serial preprocessing.<br> %K Computer Science, Graphics, cs.GR
Meka, A., Maximov, M., Zollhöfer, M., et al. 2018a. LIME: Live Intrinsic Material Estimation. http://arxiv.org/abs/1801.01075.
(arXiv: 1801.01075)
Abstract
We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation.
Export
BibTeX
@online{Meka_arXiv1801.01075, TITLE = {LIME: {L}ive Intrinsic Material Estimation}, AUTHOR = {Meka, Abhimitra and Maximov, Maxim and Zollh{\"o}fer, Michael and Chatterjee, Avishek and Seidel, Hans-Peter and Richardt, Christian and Theobalt, Christian}, URL = {http://arxiv.org/abs/1801.01075}, EPRINT = {1801.01075}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation.}, }
Endnote
%0 Report %A Meka, Abhimitra %A Maximov, Maxim %A Zollh&#246;fer, Michael %A Chatterjee, Avishek %A Seidel, Hans-Peter %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society D2 External Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T LIME: Live Intrinsic Material Estimation : %U http://hdl.handle.net/21.11116/0000-0001-40D9-2 %U http://arxiv.org/abs/1801.01075 %D 2018 %X We present the first end to end approach for real time material estimation for general object shapes with uniform material that only requires a single color image as input. In addition to Lambertian surface properties, our approach fully automatically computes the specular albedo, material shininess, and a foreground segmentation. We tackle this challenging and ill posed inverse rendering problem using recent advances in image to image translation techniques based on deep convolutional encoder decoder architectures. The underlying core representations of our approach are specular shading, diffuse shading and mirror images, which allow to learn the effective and accurate separation of diffuse and specular albedo. In addition, we propose a novel highly efficient perceptual rendering loss that mimics real world image formation and obtains intermediate results even during run time. The estimation of material parameters at real time frame rates enables exciting mixed reality applications, such as seamless illumination consistent integration of virtual objects into real world scenes, and virtual material cloning. We demonstrate our approach in a live setup, compare it to the state of the art, and demonstrate its effectiveness through quantitative and qualitative evaluation. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Meka, A., Maximov, M., Zollhöfer, M., et al. 2018b. LIME: Live Intrinsic Material Estimation. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018), IEEE.
Export
BibTeX
@inproceedings{Meka:2018, TITLE = {{LIME}: {L}ive Intrinsic Material Estimation}, AUTHOR = {Meka, Abhimitra and Maximov, Maxim and Zollh{\"o}fer, Michael and Chatterjee, Avishek and Seidel, Hans-Peter and Richardt, Christian and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5386-6420-9}, DOI = {10.1109/CVPR.2018.00661}, PUBLISHER = {IEEE}, YEAR = {2018}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2018)}, PAGES = {6315--6324}, ADDRESS = {Salt Lake City, UT, USA}, }
Endnote
%0 Conference Proceedings %A Meka, Abhimitra %A Maximov, Maxim %A Zollh&#246;fer, Michael %A Chatterjee, Avishek %A Seidel, Hans-Peter %A Richardt, Christian %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T LIME: Live Intrinsic Material Estimation : %G eng %U http://hdl.handle.net/21.11116/0000-0002-F391-7 %R 10.1109/CVPR.2018.00661 %D 2018 %B 31st IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2018-06-18 - 2018-06-22 %C Salt Lake City, UT, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 6315 - 6324 %I IEEE %@ 978-1-5386-6420-9 %U http://gvv.mpi-inf.mpg.de/projects/LIME/
Leimkühler, T., Singh, G., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2018a. End-to-end Sampling Patterns. http://arxiv.org/abs/1806.06710.
(arXiv: 1806.06710)
Abstract
Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties.
Export
BibTeX
@online{Leimkuehler_arXiv1806.06710, TITLE = {End-to-end Sampling Patterns}, AUTHOR = {Leimk{\"u}hler, Thomas and Singh, Gurprit and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.06710}, EPRINT = {1806.06710}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties.}, }
Endnote
%0 Report %A Leimk&#252;hler, Thomas %A Singh, Gurprit %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T End-to-end Sampling Patterns : %G eng %U http://hdl.handle.net/21.11116/0000-0002-1376-4 %U http://arxiv.org/abs/1806.06710 %D 2018 %X Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties. %K Computer Science, Graphics, cs.GR
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2018b. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. IEEE Transactions on Visualization and Computer Graphics24, 6.
Export
BibTeX
@article{Leimkuehler2018, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2017.2703612}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {24}, NUMBER = {6}, PAGES = {2037--2050}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/21.11116/0000-0001-409A-9 %R 10.1109/TVCG.2017.2703612 %7 2018 %D 2018 %J IEEE Transactions on Visualization and Computer Graphics %V 24 %N 6 %& 2037 %P 2037 - 2050 %I IEEE Computer Society %C New York, NY %@ false
Leimkühler, T., Seidel, H.-P., and Ritschel, T. 2018c. Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2018)37, 4.
Export
BibTeX
@article{LeimkuehlerSIGGRAPH2018, TITLE = {Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction}, AUTHOR = {Leimk{\"u}hler, Thomas and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3197517.3201379}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {37}, NUMBER = {4}, PAGES = {1--11}, EID = {55}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2018}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Laplacian Kernel Splatting for Efficient Depth-of-field and Motion Blur Synthesis or Reconstruction : %G eng %U http://hdl.handle.net/21.11116/0000-0002-0630-1 %R 10.1145/3197517.3201379 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 4 %& 1 %P 1 - 11 %Z sequence number: 55 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2018 %O ACM SIGGRAPH 2018 Vancouver, Canada , 12 - 16 August
Golla, B., Seidel, H.-P., and Chen, R. 2018. Piecewise Linear Mapping Optimization Based on the Complex View. Computer Graphics Forum (Proc. Pacific Graphics 2018)37, 7.
Export
BibTeX
@article{Golla_PG2018, TITLE = {Piecewise Linear Mapping Optimization Based on the Complex View}, AUTHOR = {Golla, Bj{\"o}rn and Seidel, Hans-Peter and Chen, Renjie}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13563}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2018}, DATE = {2018}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {37}, NUMBER = {7}, PAGES = {233--243}, BOOKTITLE = {The 26th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2018)}, }
Endnote
%0 Journal Article %A Golla, Bj&#246;rn %A Seidel, Hans-Peter %A Chen, Renjie %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Piecewise Linear Mapping Optimization Based on the Complex View : %G eng %U http://hdl.handle.net/21.11116/0000-0002-72CD-7 %R 10.1111/cgf.13563 %7 2018 %D 2018 %J Computer Graphics Forum %V 37 %N 7 %& 233 %P 233 - 243 %I Wiley-Blackwell %C Oxford, UK %@ false %B The 26th Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2018 PG 2018 Hong Kong, 8-11 October 2018
Beigpour, S., Shekhar, S., Mansouryar, M., Myszkowski, K., and Seidel, H.-P. 2018. Light-Field Appearance Editing Based on Intrinsic Decomposition. Journal of Perceptual Imaging1, 1.
Export
BibTeX
@article{Beigpour2018, TITLE = {Light-Field Appearance Editing Based on Intrinsic Decomposition}, AUTHOR = {Beigpour, Shida and Shekhar, Sumit and Mansouryar, Mohsen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2352/J.Percept.Imaging.2018.1.1.010502}, YEAR = {2018}, JOURNAL = {Journal of Perceptual Imaging}, VOLUME = {1}, NUMBER = {1}, PAGES = {1--15}, EID = {10502}, }
Endnote
%0 Journal Article %A Beigpour, Shida %A Shekhar, Sumit %A Mansouryar, Mohsen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Light-Field Appearance Editing Based on Intrinsic Decomposition : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F88-C %R 10.2352/J.Percept.Imaging.2018.1.1.010502 %7 2018 %D 2018 %J Journal of Perceptual Imaging %O JPI %V 1 %N 1 %& 1 %P 1 - 15 %Z sequence number: 10502
2017
Zayer, R., Steinberger, M., and Seidel, H.-P. 2017a. Sparse Matrix Assembly on the GPU Through Multiplication Patterns. IEEE High Performance Extreme Computing Conference (HPEC 2017), IEEE.
Export
BibTeX
@inproceedings{Zayer_HPEC2017, TITLE = {Sparse Matrix Assembly on the {GPU} Through Multiplication Patterns}, AUTHOR = {Zayer, Rhaleb and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-5386-3472-1}, DOI = {10.1109/HPEC.2017.8091057}, PUBLISHER = {IEEE}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2017)}, PAGES = {1--8}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Zayer, Rhaleb %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Sparse Matrix Assembly on the GPU Through Multiplication Patterns : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3B33-5 %R 10.1109/HPEC.2017.8091057 %D 2017 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2017-09-12 - 2017-09-14 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 8 %I IEEE %@ 978-1-5386-3472-1
Zayer, R., Steinberger, M., and Seidel, H.-P. 2017b. A GPU-adapted Structure for Unstructured Grids. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{Zayer2017, TITLE = {A {GPU}-adapted Structure for Unstructured Grids}, AUTHOR = {Zayer, Rhaleb and Steinberger, Markus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13144}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {495--507}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Zayer, Rhaleb %A Steinberger, Markus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A GPU-adapted Structure for Unstructured Grids : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5A05-7 %R 10.1111/cgf.13144 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 495 %P 495 - 507 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Xu, W., Chatterjee, A., Zollhöfer, M., et al. 2017. MonoPerfCap: Human Performance Capture from Monocular Video. http://arxiv.org/abs/1708.02136.
(arXiv: 1708.02136)
Abstract
We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.
Export
BibTeX
@online{Xu2017, TITLE = {{MonoPerfCap}: Human Performance Capture from Monocular Video}, AUTHOR = {Xu, Weipeng and Chatterjee, Avishek and Zollh{\"o}fer, Michael and Rhodin, Helge and Mehta, Dushyant and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1708.02136}, EPRINT = {1708.02136}, EPRINTTYPE = {arXiv}, YEAR = {2017}, ABSTRACT = {We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled.}, }
Endnote
%0 Report %A Xu, Weipeng %A Chatterjee, Avishek %A Zollh&#246;fer, Michael %A Rhodin, Helge %A Mehta, Dushyant %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MonoPerfCap: Human Performance Capture from Monocular Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002E-05C2-9 %U http://arxiv.org/abs/1708.02136 %D 2017 %X We present the first marker-less approach for temporally coherent 3D performance capture of a human with general clothing from monocular video. Our approach reconstructs articulated human skeleton motion as well as medium-scale non-rigid surface deformations in general scenes. Human performance capture is a challenging problem due to the large range of articulation, potentially fast motion, and considerable non-rigid deformations, even from multi-view data. Reconstruction from monocular video alone is drastically more challenging, since strong occlusions and the inherent depth ambiguity lead to a highly ill-posed reconstruction problem. We tackle these challenges by a novel approach that employs sparse 2D and 3D human pose detections from a convolutional neural network using a batch-based pose estimation strategy. Joint recovery of per-batch motion allows to resolve the ambiguities of the monocular reconstruction problem based on a low dimensional trajectory subspace. In addition, we propose refinement of the surface geometry based on fully automatically extracted silhouettes to enable medium-scale non-rigid alignment. We demonstrate state-of-the-art performance capture results that enable exciting applications such as video editing and free viewpoint video, previously infeasible from monocular video. Our qualitative and quantitative evaluation demonstrates that our approach significantly outperforms previous monocular methods in terms of accuracy, robustness and scene complexity that can be handled. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Wang, Z., Martinez Esturo, J., Seidel, H.-P., and Weinkauf, T. 2017. Stream Line–Based Pattern Search in Flows. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Wang:Esturo:Seidel:Weinkauf2016, TITLE = {Stream Line--Based Pattern Search in Flows}, AUTHOR = {Wang, Zhongjie and Martinez Esturo, Janick and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12990}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {7--18}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Martinez Esturo, Janick %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stream Line&#8211;Based Pattern Search in Flows : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-4301-A %R 10.1111/cgf.12990 %7 2016 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 7 %P 7 - 18 %I Blackwell-Wiley %C Oxford %@ false
Steinberger, M., Zayer, R., and Seidel, H.-P. 2017. Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the GPU. ICS 2017, International Conference on Supercomputing, ACM.
Export
BibTeX
@inproceedings{SteinbergerICS2017, TITLE = {Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the {GPU}}, AUTHOR = {Steinberger, Markus and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-5020-4}, DOI = {10.1145/3079079.3079086}, PUBLISHER = {ACM}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {ICS 2017, International Conference on Supercomputing}, PAGES = {1--11}, EID = {13}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Steinberger, Markus %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Globally Homogeneous, Locally Adaptive Sparse Matrix-Vector Multiplication on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D71-2 %R 10.1145/3079079.3079086 %D 2017 %B International Conference on Supercomputing %Z date of event: 2017-06-14 - 2017-06-16 %C Chicago, IL, USA %B ICS 2017 %P 1 - 11 %Z sequence number: 13 %I ACM %@ 978-1-4503-5020-4
Saikia, H., Seidel, H.-P., and Weinkauf, T. 2017. Fast Similarity Search in Scalar Fields using Merging Histograms. In: Topological Methods in Data Analysis and Visualization IV. Springer, Cham.
Export
BibTeX
@incollection{Saikia_Seidel_Weinkauf2017, TITLE = {Fast Similarity Search in Scalar Fields using Merging Histograms}, AUTHOR = {Saikia, Himangshu and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-319-44682-0}, DOI = {10.1007/978-3-319-44684-4_7}, PUBLISHER = {Springer}, ADDRESS = {Cham}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {Topological Methods in Data Analysis and Visualization IV}, EDITOR = {Carr, Hamish and Garth, Christoph and Weinkauf, Tino}, PAGES = {121--134}, SERIES = {Mathematics and Visualization}, }
Endnote
%0 Book Section %A Saikia, Himangshu %A Seidel, Hans-Peter %A Weinkauf, Tino %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Fast Similarity Search in Scalar Fields using Merging Histograms : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-772A-0 %R 10.1007/978-3-319-44684-4_7 %D 2017 %B Topological Methods in Data Analysis and Visualization IV %E Carr, Hamish; Garth, Christoph; Weinkauf, Tino %P 121 - 134 %I Springer %C Cham %@ 978-3-319-44682-0 %S Mathematics and Visualization
Nalbach, O., Seidel, H.-P., and Ritschel, T. 2017a. Practical Capture and Reproduction of Phosphorescent Appearance. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{Nalbach2017, TITLE = {Practical Capture and Reproduction of Phosphorescent Appearance}, AUTHOR = {Nalbach, Oliver and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13136}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {409--420}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Nalbach, Oliver %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Practical Capture and Reproduction of Phosphorescent Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A53-9 %R 10.1111/cgf.13136 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 409 %P 409 - 420 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Nalbach, O., Arabadzhiyska, E., Mehta, D., Seidel, H.-P., and Ritschel, T. 2017b. Deep Shading: Convolutional Neural Networks for Screen Space Shading. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2017)36, 4.
Export
BibTeX
@article{NalbachEGSR2017, TITLE = {Deep Shading: {C}onvolutional Neural Networks for Screen Space Shading}, AUTHOR = {Nalbach, Oliver and Arabadzhiyska, Elena and Mehta, Dushyant and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13225}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {36}, NUMBER = {4}, PAGES = {65--78}, BOOKTITLE = {Eurographics Symposium on Rendering 2017}, EDITOR = {Zwicker, Matthias and Sander, Pedro}, }
Endnote
%0 Journal Article %A Nalbach, Oliver %A Arabadzhiyska, Elena %A Mehta, Dushyant %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Shading: Convolutional Neural Networks for Screen Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CD86-6 %R 10.1111/cgf.13225 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 4 %& 65 %P 65 - 78 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2017 %O Eurographics Symposium on Rendering 2017 EGSR 2017 Helsinki, Finland, 19-21 June 2017
Mehta, D., Sridhar, S., Sotnychenko, O., et al. 2017a. VNect: Real-time 3D Human Pose Estimation with a Single RGB Camera. http://arxiv.org/abs/1705.01583.
(arXiv: 1705.01583)
Abstract
We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras.
Export
BibTeX
@online{MehtaArXiv2017, TITLE = {{VNect}: Real-time {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sridhar, Srinath and Sotnychenko, Oleksandr and Rhodin, Helge and Shafiei, Mohammad and Seidel, Hans-Peter and Xu, Weipeng and Casas, Dan and Theobalt, Christian}, URL = {http://arxiv.org/abs/1705.01583}, DOI = {10.1145/3072959.3073596}, EPRINT = {1705.01583}, EPRINTTYPE = {arXiv}, YEAR = {2017}, ABSTRACT = {We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras.}, }
Endnote
%0 Report %A Mehta, Dushyant %A Sridhar, Srinath %A Sotnychenko, Oleksandr %A Rhodin, Helge %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Xu, Weipeng %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T VNect: Real-time 3D Human Pose Estimation with a Single RGB Camera : %U http://hdl.handle.net/11858/00-001M-0000-002D-7D78-3 %R 10.1145/3072959.3073596 %U http://arxiv.org/abs/1705.01583 %D 2017 %X We present the first real-time method to capture the full global 3D skeletal pose of a human in a stable, temporally consistent manner using a single RGB camera. Our method combines a new convolutional neural network (CNN) based pose regressor with kinematic skeleton fitting. Our novel fully-convolutional pose formulation regresses 2D and 3D joint positions jointly in real time and does not require tightly cropped input frames. A real-time kinematic skeleton fitting method uses the CNN output to yield temporally stable 3D global pose reconstructions on the basis of a coherent kinematic skeleton. This makes our approach the first monocular RGB method usable in real-time applications such as 3D character control---thus far, the only monocular methods for such applications employed specialized RGB-D cameras. Our method's accuracy is quantitatively on par with the best offline 3D monocular RGB pose estimation methods. Our results are qualitatively comparable to, and sometimes better than, results from monocular RGB-D approaches, such as the Kinect. However, we show that our approach is more broadly applicable than RGB-D solutions, i.e. it works for outdoor scenes, community videos, and low quality commodity RGB cameras. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Mehta, D., Sridhar, S., Sotnychenko, O., et al. 2017b. VNect: Real-Time 3D Human Pose Estimation With a Single RGB Camera. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{MehtaSIGGRAPH2017, TITLE = {{VNect}: {R}eal-Time {3D} Human Pose Estimation With a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sridhar, Srinath and Sotnychenko, Oleksandr and Rhodin, Helge and Shafiei, Mohammad and Seidel, Hans-Peter and Xu, Weipeng and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073596}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, EID = {44}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sridhar, Srinath %A Sotnychenko, Oleksandr %A Rhodin, Helge %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Xu, Weipeng %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T VNect: Real-Time 3D Human Pose Estimation With a Single RGB Camera : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D95-0 %R 10.1145/3072959.3073596 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 14 %Z sequence number: 44 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Leimkühler, T., Seidel, H.-P., and Ritschel, T. 2017. Minimal Warping: Planning Incremental Novel-view Synthesis. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2017)36, 4.
Export
BibTeX
@article{LeimkuehlerEGSR2017, TITLE = {Minimal Warping: {P}lanning Incremental Novel-view Synthesis}, AUTHOR = {Leimk{\"u}hler, Thomas and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13219}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, BOOKTITLE = {Eurographics Symposium on Rendering 2017}, EDITOR = {Zwicker, Matthias and Sander, Pedro}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Minimal Warping: Planning Incremental Novel-view Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-CD7C-D %R 10.1111/cgf.13219 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 4 %& 1 %P 1 - 14 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2017 %O Eurographics Symposium on Rendering 2017 EGSR 2017 Helsinki, Finland, 19-21 June 2017
Kol, T.R., Klehm, O., Seidel, H.-P., and Eisemann, E. 2017. Expressive Single Scattering for Light Shaft Stylization. IEEE Transactions on Visualization and Computer Graphics23, 7.
Export
BibTeX
@article{kol2016expressive, TITLE = {Expressive Single Scattering for Light Shaft Stylization}, AUTHOR = {Kol, Timothy R. and Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2016.2554114}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {23}, NUMBER = {7}, PAGES = {1753--1766}, }
Endnote
%0 Journal Article %A Kol, Timothy R. %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Expressive Single Scattering for Light Shaft Stylization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64E7-2 %R 10.1109/TVCG.2016.2554114 %7 2016-04-14 %D 2017 %J IEEE Transactions on Visualization and Computer Graphics %V 23 %N 7 %& 1753 %P 1753 - 1766 %I IEEE Computer Society %C New York, NY %@ false
Kerbl, B., Kenzel, M., Schmalstieg, D., Seidel, H.-P., and Steinberger, M. 2017. Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU. Computer Graphics Forum36, 8.
Export
BibTeX
@article{Seidel_Steinberger2016, TITLE = {Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the {GPU}}, AUTHOR = {Kerbl, Bernhard and Kenzel, Michael and Schmalstieg, Dieter and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13075}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {8}, PAGES = {232--246}, }
Endnote
%0 Journal Article %A Kerbl, Bernhard %A Kenzel, Michael %A Schmalstieg, Dieter %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Hierarchical Bucket Queuing for Fine-Grained Priority Scheduling on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-1823-8 %R 10.1111/cgf.13075 %7 2016-12-05 %D 2017 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 36 %N 8 %& 232 %P 232 - 246 %I Blackwell-Wiley %C Oxford %@ false
Jiang, C., Tang, C., Seidel, H.-P., and Wonka, P. 2017. Design and Volume Optimization of Space Structures. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{JiangSIGGRAPH2017, TITLE = {Design and Volume Optimization of Space Structures}, AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Wonka, Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073619}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--14}, EID = {159}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Jiang, Caigui %A Tang, Chengcheng %A Seidel, Hans-Peter %A Wonka, Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Design and Volume Optimization of Space Structures : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D8E-2 %R 10.1145/3072959.3073619 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 14 %Z sequence number: 159 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Haubenwallner, K., Seidel, H.-P., and Steinberger, M. 2017. ShapeGenetics: Using Genetic Algorithms for Procedural Modeling. Computer Graphics Forum (Proc. EUROGRAPHICS 2017)36, 2.
Export
BibTeX
@article{haubenwallner2017shapegenetics, TITLE = {{ShapeGenetics}: {U}sing Genetic Algorithms for Procedural Modeling}, AUTHOR = {Haubenwallner, Karl and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13120}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {36}, NUMBER = {2}, PAGES = {213--223}, BOOKTITLE = {The European Association for Computer Graphics 38th Annual Conference (EUROGRAPHICS 2017)}, }
Endnote
%0 Journal Article %A Haubenwallner, Karl %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T ShapeGenetics: Using Genetic Algorithms for Procedural Modeling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-5C69-8 %R 10.1111/cgf.13120 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 213 %P 213 - 223 %I Wiley-Blackwell %C Oxford %@ false %B The European Association for Computer Graphics 38th Annual Conference %O EUROGRAPHICS 2017 Lyon, France, 24-28 April 2017 EG 2017
Derler, A., Zayer, R., Seidel, H.-P., and Steinberger, M. 2017. Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the GPU. ICS 2017, International Conference on Supercomputing, ACM.
Export
BibTeX
@inproceedings{DerlerICS2017, TITLE = {Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the {GPU}}, AUTHOR = {Derler, Andreas and Zayer, Rhaleb and Seidel, Hans-Peter and Steinberger, Markus}, LANGUAGE = {eng}, ISBN = {978-1-4503-5020-4}, DOI = {10.1145/3079079.3079085}, PUBLISHER = {ACM}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {ICS 2017, International Conference on Supercomputing}, EID = {7}, ADDRESS = {Chicago, IL, USA}, }
Endnote
%0 Conference Proceedings %A Derler, Andreas %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Scheduling for Efficient Hierarchical Sparse Matrix Operations on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D73-D %R 10.1145/3079079.3079085 %D 2017 %B International Conference on Supercomputing %Z date of event: 2017-06-13 - 2017-06-16 %C Chicago, IL, USA %B ICS 2017 %Z sequence number: 7 %I ACM %@ 978-1-4503-5020-4
Arabadzhiyska, E., Tursun, O.T., Myszkowski, K., Seidel, H.-P., and Didyk, P. 2017. Saccade Landing Position Prediction for Gaze-Contingent Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{ArabadzhiyskaSIGGRAPH2017, TITLE = {Saccade Landing Position Prediction for Gaze-Contingent Rendering}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Okan Tarhan and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073642}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--12}, EID = {50}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Arabadzhiyska, Elena %A Tursun, Okan Tarhan %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Saccade Landing Position Prediction for Gaze-Contingent Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D82-9 %R 10.1145/3072959.3073642 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 12 %Z sequence number: 50 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017a. Towards a Quality Metric for Dense Light Fields. http://arxiv.org/abs/1704.07576.
(arXiv: 1704.07576)
Abstract
Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.
Export
BibTeX
@online{AdhikarlaArXiv17, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} K. and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, URL = {http://arxiv.org/abs/1704.07576}, EPRINT = {1704.07576}, EPRINTTYPE = {arXiv}, YEAR = {2017}, ABSTRACT = {Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.}, }
Endnote
%0 Report %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafa&#322; K. %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %U http://hdl.handle.net/11858/00-001M-0000-002D-2C2C-1 %U http://arxiv.org/abs/1704.07576 %D 2017 %X Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017b. Towards a Quality Metric for Dense Light Fields. 30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017), IEEE Computer Society.
Export
BibTeX
@inproceedings{Vamsi2017, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISBN = {978-1-5386-0458-8}, DOI = {10.1109/CVPR.2017.396}, PUBLISHER = {IEEE Computer Society}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017)}, PAGES = {3720--3729}, ADDRESS = {Honolulu, HI, USA}, }
Endnote
%0 Conference Proceedings %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E476-3 %R 10.1109/CVPR.2017.396 %D 2017 %B 30th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2017-07-21 - 2017-07-26 %C Honolulu, HI, USA %B 30th IEEE Conference on Computer Vision and Pattern Recognition %P 3720 - 3729 %I IEEE Computer Society %@ 978-1-5386-0458-8
2016
Wang, Z., Seidel, H.-P., and Weinkauf, T. 2016. Multi-field Pattern Matching Based on Sparse Feature Sampling. IEEE Transactions on Visualization and Computer Graphics22, 1.
Export
BibTeX
@article{Wang2015, TITLE = {Multi-field Pattern Matching Based on Sparse Feature Sampling}, AUTHOR = {Wang, Zhongjie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2015.2467292}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {22}, NUMBER = {1}, PAGES = {807--816}, }
Endnote
%0 Journal Article %A Wang, Zhongjie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Multi-field Pattern Matching Based on Sparse Feature Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-1A76-6 %R 10.1109/TVCG.2015.2467292 %7 2015 %D 2016 %J IEEE Transactions on Visualization and Computer Graphics %V 22 %N 1 %& 807 %P 807 - 816 %I IEEE Computer Society %C New York, NY %@ false
Von Radziewsky, P., Eisemann, E., Seidel, H.-P., and Hildebrandt, K. 2016. Optimized Subspaces for Deformation-based Modeling and Shape Interpolation. Computers and Graphics (Proc. SMI 2016)58.
Export
BibTeX
@article{Radziewsky2016, TITLE = {Optimized Subspaces for Deformation-based Modeling and Shape Interpolation}, AUTHOR = {von Radziewsky, Philipp and Eisemann, Elmar and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2016.05.016}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computers and Graphics (Proc. SMI)}, VOLUME = {58}, PAGES = {128--138}, BOOKTITLE = {Shape Modeling International 2016 (SMI 2016)}, }
Endnote
%0 Journal Article %A von Radziewsky, Philipp %A Eisemann, Elmar %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Optimized Subspaces for Deformation-based Modeling and Shape Interpolation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0144-0 %R 10.1016/j.cag.2016.05.016 %7 2016 %D 2016 %J Computers and Graphics %V 58 %& 128 %P 128 - 138 %I Elsevier %C Amsterdam %@ false %B Shape Modeling International 2016 %O SMI 2016
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Emulating Displays with Continuously Varying Frame Rates. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016)35, 4.
Export
BibTeX
@article{TemplinSIGGRAPH2016, TITLE = {Emulating Displays with Continuously Varying Frame Rates}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, PAGES = {1--11}, EID = {67}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Emulating Displays with Continuously Varying Frame Rates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-018D-E %R 10.1145/2897824.2925879 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %& 1 %P 1 - 11 %Z sequence number: 67 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Steinberger, M., Derler, A., Zayer, R., and Seidel, H.-P. 2016. How Naive is Naive SpMV on the GPU? IEEE High Performance Extreme Computing Conference (HPEC 2016), IEEE.
Export
BibTeX
@inproceedings{SteinbergerHPEC2016, TITLE = {How naive is naive {SpMV} on the {GPU}?}, AUTHOR = {Steinberger, Markus and Derler, Andreas and Zayer, Rhaleb and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-5090-3525-0}, DOI = {10.1109/HPEC.2016.7761634}, PUBLISHER = {IEEE}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {IEEE High Performance Extreme Computing Conference (HPEC 2016)}, PAGES = {1--8}, ADDRESS = {Waltham, MA, USA}, }
Endnote
%0 Conference Proceedings %A Steinberger, Markus %A Derler, Andreas %A Zayer, Rhaleb %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T How Naive is Naive SpMV on the GPU? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98A5-F %R 10.1109/HPEC.2016.7761634 %D 2016 %B IEEE High Performance Extreme Computing Conference %Z date of event: 2016-09-13 - 2016-09-15 %C Waltham, MA, USA %B IEEE High Performance Extreme Computing Conference %P 1 - 8 %I IEEE %@ 978-1-5090-3525-0
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016a. Intuitive Editing of Material Appearance. ACM SIGGRAPH 2016 Posters.
Export
BibTeX
@inproceedings{SerranoSIGGRAPH2016, TITLE = {Intuitive Editing of Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISBN = {978-1-4503-4371-8}, DOI = {10.1145/2945078.2945141}, PUBLISHER = {ACM}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {ACM SIGGRAPH 2016 Posters}, PAGES = {1--2}, EID = {63}, ADDRESS = {Anaheim, CA, USA}, }
Endnote
%0 Generic %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Intuitive Editing of Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0170-C %R 10.1145/2945078.2945141 %D 2016 %Z name of event: 43rd International Conference and Exhibition on Computer Graphics & Interactive Techniques %Z date of event: 2016-07-24 - 2016-07-28 %Z place of event: Anaheim, CA, USA %B ACM SIGGRAPH 2016 Posters %P 1 - 2 %Z sequence number: 63 %@ 978-1-4503-4371-8
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016b. An Intuitive Control Space for Material Appearance. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Serrano_MaterialAppearance_2016, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980242}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {186}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B8-9 %R 10.1145/2980179.2980242 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 186 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Robertini, N., Casas, D., Rhodin, H., Seidel, H.-P., and Theobalt, C. 2016. Model-Based Outdoor Performance Capture. Fourth International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Robertini:2016, TITLE = {Model-Based Outdoor Performance Capture}, AUTHOR = {Robertini, Nadia and Casas, Dan and Rhodin, Helge and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-5090-5407-7}, URL = {http://gvv.mpi-inf.mpg.de/projects/OutdoorPerfcap/}, DOI = {10.1109/3DV.2016.25}, PUBLISHER = {IEEE Computer Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Fourth International Conference on 3D Vision}, PAGES = {166--175}, ADDRESS = {Stanford, CA, USA}, }
Endnote
%0 Conference Proceedings %A Robertini, Nadia %A Casas, Dan %A Rhodin, Helge %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Model-Based Outdoor Performance Capture : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-4A6D-2 %R 10.1109/3DV.2016.25 %U http://gvv.mpi-inf.mpg.de/projects/OutdoorPerfcap/ %D 2016 %B Fourth International Conference on 3D Vision %Z date of event: 2016-10-25 - 2016-10-28 %C Stanford, CA, USA %B Fourth International Conference on 3D Vision %P 166 - 175 %I IEEE Computer Society %@ 978-1-5090-5407-7
Rhodin, H., Robertini, N., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016a. A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation. http://arxiv.org/abs/1602.03725.
(arXiv: 1602.03725)
Abstract
Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation.
Export
BibTeX
@online{Rhodin2016arXiv1602.03725, TITLE = {A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1602.03725}, EPRINT = {1602.03725}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation.}, }
Endnote
%0 Report %A Rhodin, Helge %A Robertini, Nadia %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Versatile Scene Model with Differentiable Visibility Applied to Generative Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9875-C %U http://arxiv.org/abs/1602.03725 %D 2016 %X Generative reconstruction methods compute the 3D configuration (such as pose and/or geometry) of a shape by optimizing the overlap of the projected 3D shape model with images. Proper handling of occlusions is a big challenge, since the visibility function that indicates if a surface point is seen from a camera can often not be formulated in closed form, and is in general discrete and non-differentiable at occlusion boundaries. We present a new scene representation that enables an analytically differentiable closed-form formulation of surface visibility. In contrast to previous methods, this yields smooth, analytically differentiable, and efficient to optimize pose similarity energies with rigorous occlusion handling, fewer local minima, and experimentally verified improved convergence of numerical optimization. The underlying idea is a new image formation model that represents opaque objects by a translucent medium with a smooth Gaussian density distribution which turns visibility into a smooth phenomenon. We demonstrate the advantages of our versatile scene model in several generative pose estimation problems, namely marker-less multi-object pose estimation, marker-less human motion capture with few cameras, and image-based 3D geometry estimation. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Robertini, N., Casas, D., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016b. General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues. http://arxiv.org/abs/1607.08659.
(arXiv: 1607.08659)
Abstract
Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation - skeleton, volumetric shape, appearance, and optionally a body surface - and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way.
Export
BibTeX
@online{Rhodin2016arXiv1607.08659, TITLE = {General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Casas, Dan and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1607.08659}, EPRINT = {1607.08659}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation -- skeleton, volumetric shape, appearance, and optionally a body surface -- and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way.}, }
Endnote
%0 Report %A Rhodin, Helge %A Robertini, Nadia %A Casas, Dan %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9883-C %U http://arxiv.org/abs/1607.08659 %D 2016 %X Markerless motion capture algorithms require a 3D body with properly personalized skeleton dimension and/or body shape and appearance to successfully track a person. Unfortunately, many tracking methods consider model personalization a different problem and use manual or semi-automatic model initialization, which greatly reduces applicability. In this paper, we propose a fully automatic algorithm that jointly creates a rigged actor model commonly used for animation - skeleton, volumetric shape, appearance, and optionally a body surface - and estimates the actor's motion from multi-view video input only. The approach is rigorously designed to work on footage of general outdoor scenes recorded with very few cameras and without background subtraction. Our method uses a new image formation model with analytic visibility and analytically differentiable alignment energy. For reconstruction, 3D body shape is approximated as Gaussian density field. For pose and shape estimation, we minimize a new edge-based alignment energy inspired by volume raycasting in an absorbing medium. We further propose a new statistical human body model that represents the body surface, volumetric Gaussian density, as well as variability in skeleton shape. Given any multi-view sequence, our method jointly optimizes the pose and shape parameters of this model fully automatically in a spatiotemporal way. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Richardt, C., Casas, D., et al. 2016c. EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract). http://arxiv.org/abs/1701.00142.
(arXiv: 1701.00142)
Abstract
Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes.
Export
BibTeX
@online{DBLP:journals/corr/RhodinRCISSST17, TITLE = {{EgoCap}: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract)}, AUTHOR = {Rhodin, Helge and Richardt, Christian and Casas, Dan and Insafutdinov, Eldar and Shafiei, Mohammad and Seidel, Hans-Peter and Schiele, Bernt and Theobalt, Christian}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1701.00142}, EPRINT = {1701.00142}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes.}, }
Endnote
%0 Report %A Rhodin, Helge %A Richardt, Christian %A Casas, Dan %A Insafutdinov, Eldar %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Schiele, Bernt %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras (Extended Abstract) : %G eng %U http://hdl.handle.net/21.11116/0000-0000-3B3D-B %U http://arxiv.org/abs/1701.00142 %D 2016 %X Marker-based and marker-less optical skeletal motion-capture methods use an outside-in arrangement of cameras placed around a scene, with viewpoints converging on the center. They often create discomfort by possibly needed marker suits, and their recording volume is severely restricted and often constrained to indoor scenes with controlled backgrounds. We therefore propose a new method for real-time, marker-less and egocentric motion capture which estimates the full-body skeleton pose from a lightweight stereo pair of fisheye cameras that are attached to a helmet or virtual-reality headset. It combines the strength of a new generative pose estimation framework for fisheye views with a ConvNet-based body-part detector trained on a new automatically annotated and augmented dataset. Our inside-in method captures full-body motion in general indoor and outdoor scenes, and also crowded scenes. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Rhodin, H., Richardt, C., Casas, D., et al. 2016d. EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Rhodin2016SGA, TITLE = {{EgoCap}: {E}gocentric Marker-less Motion Capture with Two Fisheye Cameras}, AUTHOR = {Rhodin, Helge and Richardt, Christian and Casas, Dan and Insafutdinov, Eldar and Shafiei, Mohammad and Seidel, Hans-Peter and Schiele, Bernt and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980235}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, PAGES = {1--11}, EID = {162}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Richardt, Christian %A Casas, Dan %A Insafutdinov, Eldar %A Shafiei, Mohammad %A Seidel, Hans-Peter %A Schiele, Bernt %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute University of Bath Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Multimodal Computing, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EgoCap: Egocentric Marker-less Motion Capture with Two Fisheye Cameras : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-8321-6 %R 10.1145/2980179.2980235 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 6 %& 1 %P 1 - 11 %Z sequence number: 162 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Rhodin, H., Robertini, N., Casas, D., Richardt, C., Seidel, H.-P., and Theobalt, C. 2016e. General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues. Computer Vision -- ECCV 2016, Springer.
Export
BibTeX
@inproceedings{RhodinECCV2016, TITLE = {General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Casas, Dan and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-3-319-46453-4}, DOI = {10.1007/978-3-319-46454-1_31}, PUBLISHER = {Springer}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Computer Vision -- ECCV 2016}, DEBUG = {author: Leibe, Bastian; author: Matas, Jiri; author: Sebe, Nicu; author: Welling, Max}, PAGES = {509--526}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {9909}, ADDRESS = {Amsterdam, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Rhodin, Helge %A Robertini, Nadia %A Casas, Dan %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Intel Visual Computing Institute Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T General Automatic Human Shape and Motion Capture Using Volumetric Contour Cues : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-986D-F %R 10.1007/978-3-319-46454-1_31 %D 2016 %B 14th European Conference on Computer Vision %Z date of event: 2016-10-11 - 2016-10-14 %C Amsterdam, The Netherlands %B Computer Vision -- ECCV 2016 %E Leibe, Bastian; Matas, Jiri; Sebe, Nicu; Welling, Max %P 509 - 526 %I Springer %@ 978-3-319-46453-4 %B Lecture Notes in Computer Science %N 9909 %U https://rdcu.be/dLgG6
Reinert, B., Kopf, J., Ritschel, T., Cuervo, E., Chu, D., and Seidel, H.-P. 2016a. Proxy-guided Image-based Rendering for Mobile Devices. Computer Graphics Forum (Proc. Pacific Graphics 2016)35, 7.
Export
BibTeX
@article{ReinertPG2016, TITLE = {Proxy-guided Image-based Rendering for Mobile Devices}, AUTHOR = {Reinert, Bernhard and Kopf, Johannes and Ritschel, Tobias and Cuervo, Eduardo and Chu, David and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13032}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {35}, NUMBER = {7}, PAGES = {353--362}, BOOKTITLE = {The 24th Pacific Conference on Computer Graphics and Applications Short Papers Proceedings (Pacific Graphics 2016)}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Kopf, Johannes %A Ritschel, Tobias %A Cuervo, Eduardo %A Chu, David %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Proxy-guided Image-based Rendering for Mobile Devices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2DD8-7 %R 10.1111/cgf.13032 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 7 %& 353 %P 353 - 362 %I Blackwell-Wiley %C Oxford %@ false %B The 24th Pacific Conference on Computer Graphics and Applications Short Papers Proceedings %O Pacific Graphics 2016 PG 2016
Reinert, B., Ritschel, T., Seidel, H.-P., and Georgiev, I. 2016b. Projective Blue-Noise Sampling. Computer Graphics Forum35, 1.
Export
BibTeX
@article{ReinertCGF2016, TITLE = {Projective Blue-Noise Sampling}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter and Georgiev, Iliyan}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12725}, PUBLISHER = {Wiley}, ADDRESS = {Chichester}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum}, VOLUME = {35}, NUMBER = {1}, PAGES = {285--295}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %A Georgiev, Iliyan %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Projective Blue-Noise Sampling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002A-1A31-D %R 10.1111/cgf.12725 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 1 %& 285 %P 285 - 295 %I Wiley %C Chichester %@ false
Reinert, B., Ritschel, T., and Seidel, H.-P. 2016c. Animated 3D Creatures from Single-view Video by Skeletal Sketching. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{Reinert:2016:AnimatedCreatures, TITLE = {Animated {3D} Creatures from Single-view Video by Skeletal Sketching}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.17}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {133--143}, ADDRESS = {Victoria, BC, Canada}, }
Endnote
%0 Conference Proceedings %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Animated 3D Creatures from Single-view Video by Skeletal Sketching : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-64EC-7 %R 10.20380/GI2016.17 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, BC, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 133 - 143 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Nalbach, O., Arabadzhiyska, E., Mehta, D., Seidel, H.-P., and Ritschel, T. 2016. Deep Shading: Convolutional Neural Networks for Screen-Space Shading. http://arxiv.org/abs/1603.06078.
(arXiv: 1603.06078)
Abstract
In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images.
Export
BibTeX
@online{NalbacharXiv2016, TITLE = {Deep Shading: Convolutional Neural Networks for Screen-Space Shading}, AUTHOR = {Nalbach, Oliver and Arabadzhiyska, Elena and Mehta, Dushyant and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1603.06078}, EPRINT = {1603.06078}, EPRINTTYPE = {arXiv}, YEAR = {2016}, ABSTRACT = {In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images.}, }
Endnote
%0 Report %A Nalbach, Oliver %A Arabadzhiyska, Elena %A Mehta, Dushyant %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Shading: Convolutional Neural Networks for Screen-Space Shading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0174-4 %U http://arxiv.org/abs/1603.06078 %D 2016 %X In computer vision, Convolutional Neural Networks (CNNs) have recently achieved new levels of performance for several inverse problems where RGB pixel appearance is mapped to attributes such as positions, normals or reflectance. In computer graphics, screen-space shading has recently increased the visual quality in interactive image synthesis, where per-pixel attributes such as positions, normals or reflectance of a virtual 3D scene are converted into RGB pixel appearance, enabling effects like ambient occlusion, indirect light, scattering, depth-of-field, motion blur, or anti-aliasing. In this paper we consider the diagonal problem: synthesizing appearance from given per-pixel attributes using a CNN. The resulting Deep Shading simulates all screen-space effects as well as arbitrary combinations thereof at competitive quality and speed while not being programmed by human experts but learned from example images. %K Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
(Best Student Paper Award)
Export
BibTeX
@inproceedings{LeimkuehlerGI2016, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.02}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {5--12}, ADDRESS = {Victoria, Canada}, }
Endnote
%0 Conference Proceedings %A Leimk&#252;hler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-823D-1 %R 10.20380/GI2016.02 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 5 - 12 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Kellnhofer, P., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2016a. GazeStereo3D: Seamless Disparity Manipulations. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016)35, 4.
Export
BibTeX
@article{KellnhoferSIGGRAPH2016, TITLE = {{GazeStereo3D}: {S}eamless Disparity Manipulations}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925866}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, PAGES = {1--13}, EID = {68}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T GazeStereo3D: Seamless Disparity Manipulations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0190-4 %R 10.1145/2897824.2925866 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %& 1 %P 1 - 13 %Z sequence number: 68 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016b. Transformation-aware Perceptual Image Metric. Journal of Electronic Imaging25, 5.
Export
BibTeX
@article{Kellnhofer2016jei, TITLE = {Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1017-9909}, DOI = {10.1117/1.JEI.25.5.053014}, PUBLISHER = {SPIE}, ADDRESS = {Bellingham, WA}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Journal of Electronic Imaging}, VOLUME = {25}, NUMBER = {5}, PAGES = {1--16}, EID = {053014}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B3-4 %R 10.1117/1.JEI.25.5.053014 %7 2016 %D 2016 %J Journal of Electronic Imaging %V 25 %N 5 %& 1 %P 1 - 16 %Z sequence number: 053014 %I SPIE %C Bellingham, WA %@ false
Kellnhofer, P., Didyk, P., Ritschel, T., Masia, B., Myszkowski, K., and Seidel, H.-P. 2016c. Motion Parallax in Stereo 3D: Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Kellnhofer2016SGA, TITLE = {Motion Parallax in Stereo {3D}: {M}odel and Applications}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Ritschel, Tobias and Masia, Belen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980230}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, PAGES = {1--12}, EID = {176}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Ritschel, Tobias %A Masia, Belen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Parallax in Stereo 3D: Model and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B6-D %R 10.1145/2980179.2980230 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %& 1 %P 1 - 12 %Z sequence number: 176 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Gryaditskaya, Y., Masia, B., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Gloss Editing in Light Fields. VMV 2016 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{jgryadit2016, TITLE = {Gloss Editing in Light Fields}, AUTHOR = {Gryaditskaya, Yulia and Masia, Belen and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-03868-025-3}, DOI = {10.2312/vmv.20161351}, PUBLISHER = {Eurographics Association}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {VMV 2016 Vision, Modeling and Visualization}, EDITOR = {Hullin, Matthias and Stamminger, Marc and Weinkauf, Tino}, PAGES = {127--135}, ADDRESS = {Bayreuth, Germany}, }
Endnote
%0 Conference Proceedings %A Gryaditskaya, Yulia %A Masia, Belen %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Gloss Editing in Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82C5-B %R 10.2312/vmv.20161351 %D 2016 %B 21st International Symposium on Vision, Modeling and Visualization %Z date of event: 2016-10-10 - 2016-10-12 %C Bayreuth, Germany %B VMV 2016 Vision, Modeling and Visualization %E Hullin, Matthias; Stamminger, Marc; Weinkauf, Tino %P 127 - 135 %I Eurographics Association %@ 978-3-03868-025-3
Dąbała, Ł., Ziegler, M., Didyk, P., et al. 2016. Efficient Multi-image Correspondences for On-line Light Field Video Processing. Computer Graphics Forum (Proc. Pacific Graphics 2016)35, 7.
Export
BibTeX
@article{DabalaPG2016, TITLE = {Efficient Multi-image Correspondences for On-line Light Field Video Processing}, AUTHOR = {D{\k a}ba{\l}a, {\L}ukasz and Ziegler, Matthias and Didyk, Piotr and Zilly, Frederik and Keinert, Joachim and Myszkowski, Karol and Rokita, Przemyslaw and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13037}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {35}, NUMBER = {7}, PAGES = {401--410}, BOOKTITLE = {The 24th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2016)}, }
Endnote
%0 Journal Article %A D&#261;ba&#322;a, &#321;ukasz %A Ziegler, Matthias %A Didyk, Piotr %A Zilly, Frederik %A Keinert, Joachim %A Myszkowski, Karol %A Rokita, Przemyslaw %A Ritschel, Tobias %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Multi-image Correspondences for On-line Light Field Video Processing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82BA-5 %R 10.1111/cgf.13037 %7 2016 %D 2016 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 35 %N 7 %& 401 %P 401 - 410 %I Blackwell-Wiley %C Oxford %@ false %B The 24th Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2016 PG 2016
Boechat, P., Dokter, M., Kenzel, M., Seidel, H.-P., Schmalstieg, D., and Steinberger, M. 2016. Representing and Scheduling Procedural Generation using Operator Graphs. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{BoaechatSIGGRAPHAsia2016, TITLE = {Representing and Scheduling Procedural Generation using Operator Graphs}, AUTHOR = {Boechat, Pedro and Dokter, Mark and Kenzel, Michael and Seidel, Hans-Peter and Schmalstieg, Dieter and Steinberger, Markus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980227}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, PAGES = {1--12}, EID = {183}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Boechat, Pedro %A Dokter, Mark %A Kenzel, Michael %A Seidel, Hans-Peter %A Schmalstieg, Dieter %A Steinberger, Markus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Representing and Scheduling Procedural Generation using Operator Graphs : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-98BB-0 %R 10.1145/2980179.2980227 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %& 1 %P 1 - 12 %Z sequence number: 183 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
2015
Wang, Z., Seidel, H.-P., and Weinkauf, T. 2015. Hierarchical Hashing for Pattern Search in 3D Vector Fields. VMV 2015 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{WangVMV2015, TITLE = {Hierarchical Hashing for Pattern Search in {3D} Vector Fields}, AUTHOR = {Wang, Zhongjie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-905674-95-8}, DOI = {10.2312/vmv.20151256}, PUBLISHER = {Eurographics Association}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {VMV 2015 Vision, Modeling and Visualization}, EDITOR = {Bommes, David and Ritschel, Tobias and Schultz, Thomas}, PAGES = {41--48}, ADDRESS = {Aachen, Germany}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Hierarchical Hashing for Pattern Search in 3D Vector Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-F760-4 %R 10.2312/vmv.20151256 %D 2015 %B 20th International Symposium on Vision, Modeling and Visualization %Z date of event: 2015-10-07 - 2015-10-10 %C Aachen, Germany %B VMV 2015 Vision, Modeling and Visualization %E Bommes, David; Ritschel, Tobias; Schultz, Thomas %P 41 - 48 %I Eurographics Association %@ 978-3-905674-95-8
Von Tycowicz, C., Schulz, C., Seidel, H.-P., and Hildebrandt, K. 2015. Real-time Nonlinear Shape Interpolation. ACM Transactions on Graphics34, 3.
Export
BibTeX
@article{Tycowicz2015, TITLE = {Real-time Nonlinear Shape Interpolation}, AUTHOR = {von Tycowicz, Christoph and Schulz, Christian and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2729972}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {34}, NUMBER = {3}, PAGES = {1--10}, EID = {34}, }
Endnote
%0 Journal Article %A von Tycowicz, Christoph %A Schulz, Christian %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Nonlinear Shape Interpolation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D65-9 %R 10.1145/2729972 %7 2015 %D 2015 %J ACM Transactions on Graphics %V 34 %N 3 %& 1 %P 1 - 10 %Z sequence number: 34 %I Association for Computing Machinery %C New York, NY %@ false
Schulz, C., von Tycowicz, C., Seidel, H.-P., and Hildebrandt, K. 2015. Animating Articulated Characters Using Wiggly Splines. Proceedings SCA 2015, ACM.
Export
BibTeX
@inproceedings{SchulzSCA2015, TITLE = {Animating Articulated Characters Using Wiggly Splines}, AUTHOR = {Schulz, Christian and von Tycowicz, Christoph and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISBN = {978-1-4503-3496-9}, DOI = {10.1145/2786784.2786799}, PUBLISHER = {ACM}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Proceedings SCA 2015}, PAGES = {101--109}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Schulz, Christian %A von Tycowicz, Christoph %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Animating Articulated Characters Using Wiggly Splines : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8EA3-0 %R 10.1145/2786784.2786799 %D 2015 %B 14th ACM SIGGRAPH / Eurographics Symposium on Computer Animation %Z date of event: 2015-08-07 - 2015-08-09 %C Los Angeles, CA, USA %B Proceedings SCA 2015 %P 101 - 109 %I ACM %@ 978-1-4503-3496-9
Rhodin, H., Robertini, N., Richardt, C., Seidel, H.-P., and Theobalt, C. 2015a. A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation. ICCV 2015, IEEE International Conference on Computer Vision, IEEE.
Export
BibTeX
@inproceedings{RhodinICCV2015, TITLE = {A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation}, AUTHOR = {Rhodin, Helge and Robertini, Nadia and Richardt, Christian and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4673-8390-5}, DOI = {10.1109/ICCV.2015.94}, PUBLISHER = {IEEE}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {ICCV 2015, IEEE International Conference on Computer Vision}, PAGES = {765--773}, ADDRESS = {Santiago, Chile}, }
Endnote
%0 Conference Proceedings %A Rhodin, Helge %A Robertini, Nadia %A Richardt, Christian %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Versatile Scene Model With Differentiable Visibility Applied to Generative Pose Estimation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-52DC-4 %R 10.1109/ICCV.2015.94 %D 2015 %B IEEE International Conference on Computer Vision %Z date of event: 2015-12-07 - 2015-12-13 %C Santiago, Chile %B ICCV 2015 %P 765 - 773 %I IEEE %@ 978-1-4673-8390-5 %U http://www.cv-foundation.org/openaccess/content_iccv_2015/html/Rhodin_A_Versatile_Scene_ICCV_2015_paper.html
Rhodin, H., Tompkin, J., Kim, K.I., et al. 2015b. Generalizing Wave Gestures from Sparse Examples for Real-time Character Control. ACM Transactions on Graphics34, 6.
Export
BibTeX
@article{DBLP:journals/tog/RhodinTKAPST15, TITLE = {Generalizing Wave Gestures from Sparse Examples for Real-time Character Control}, AUTHOR = {Rhodin, Helge and Tompkin, James and Kim, Kwang In and de Aguiar, Edilson and Pfister, Hanspeter and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2816795.2818082}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {34}, NUMBER = {6}, PAGES = {181:1--181:12}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Tompkin, James %A Kim, Kwang In %A de Aguiar, Edilson %A Pfister, Hanspeter %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Generalizing Wave Gestures from Sparse Examples for Real-time Character Control : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7285-A %R 10.1145/2816795.2818082 %D 2015 %J ACM Transactions on Graphics %V 34 %N 6 %& 181:1 %P 181:1 - 181:12 %I Association for Computing Machinery %C New York, NY %@ false
Rhodin, H., Tompkin, J., Kim, K.I., et al. 2015c. Generalizing Wave Gestures from Sparse Examples for Real-time Character Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2015)34, 6.
Export
BibTeX
@article{RhodinSAP2015, TITLE = {Generalizing Wave Gestures from Sparse Examples for Real-time Character Control}, AUTHOR = {Rhodin, Helge and Tompkin, James and Kim, Kwang In and de Aguiar, Edilson and Pfister, Hanspeter and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2816795.2818082}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {34}, NUMBER = {6}, PAGES = {1--12}, EID = {181}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2015}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Tompkin, James %A Kim, Kwang In %A de Aguiar, Edilson %A Pfister, Hanspeter %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Generalizing Wave Gestures from Sparse Examples for Real-time Character Control : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-2476-8 %R 10.1145/2816795.2818082 %7 2015 %D 2015 %J ACM Transactions on Graphics %O TOG %V 34 %N 6 %& 1 %P 1 - 12 %Z sequence number: 181 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2015 %O ACM SIGGRAPH Asia 2015 Kobe, Japan
Nguyen, C., Ritschel, T., and Seidel, H.-P. 2015a. Data-driven Color Manifolds. ACM Transactions on Graphics34, 2.
Export
BibTeX
@article{NguyenTOG2015, TITLE = {Data-driven Color Manifolds}, AUTHOR = {Nguyen, Chuong and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2699645}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {34}, NUMBER = {2}, PAGES = {1--9}, EID = {20}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Data-driven Color Manifolds : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-680A-D %R 10.1145/2699645 %7 2015 %D 2015 %J ACM Transactions on Graphics %V 34 %N 2 %& 1 %P 1 - 9 %Z sequence number: 20 %I Association for Computing Machinery %C New York, NY %@ false
Nguyen, C., Nalbach, O., Ritschel, T., and Seidel, H.-P. 2015b. Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections. Computer Graphics Forum (Proc. EUROGRAPHICS 2015)34, 2.
Export
BibTeX
@article{NguyenEG2015, TITLE = {Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections}, AUTHOR = {Nguyen, Chuong and Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12548}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {34}, NUMBER = {2}, PAGES = {143--154}, BOOKTITLE = {The 36th Annual Conference of the European Association of Computer Graphics (EUROGRAPHICS 2015)}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Guiding Image Manipulations Using Shape-appearance Subspaces from Co-alignment of Image Collections : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D6A-0 %R 10.1111/cgf.12548 %7 2015 %D 2015 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 34 %N 2 %& 143 %P 143 - 154 %I Blackwell-Wiley %C Oxford %@ false %B The 36th Annual Conference of the European Association of Computer Graphics %O EUROGRAPHICS 2015 4th &#8211; 8th May 2015, Kongresshaus in Z&#252;rich, Switzerland EG 2015
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2015. The Bounced Z-buffer for Indirect Visibility. VMV 2015 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{NalbachVMV2015, TITLE = {The Bounced {Z}-buffer for Indirect Visibility}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-95-8}, DOI = {10.2312/vmv.20151261}, PUBLISHER = {Eurographics Association}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {VMV 2015 Vision, Modeling and Visualization}, EDITOR = {Bommes, David and Ritschel, Tobias and Schultz, Thomas}, PAGES = {79--86}, ADDRESS = {Aachen, Germany}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T The Bounced Z-buffer for Indirect Visibility : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-F762-F %R 10.2312/vmv.20151261 %D 2015 %B 20th International Symposium on Vision, Modeling and Visualization %Z date of event: 2015-10-07 - 2015-10-10 %C Aachen, Germany %B VMV 2015 Vision, Modeling and Visualization %E Bommes, David; Ritschel, Tobias; Schultz, Thomas %P 79 - 86 %I Eurographics Association %@ 978-3-905674-95-8
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2015. High Dynamic Range Imaging. In: Wiley Encyclopedia of Electrical and Electronics Engineering. Wiley, New York, NY.
Export
BibTeX
@incollection{MantiukEncyclopedia2015, TITLE = {High Dynamic Range Imaging}, AUTHOR = {Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1002/047134608X.W8265}, PUBLISHER = {Wiley}, ADDRESS = {New York, NY}, YEAR = {2015}, BOOKTITLE = {Wiley Encyclopedia of Electrical and Electronics Engineering}, EDITOR = {Webster, John G.}, PAGES = {1--42}, }
Endnote
%0 Book Section %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-A376-B %R 10.1002/047134608X.W8265 %D 2015 %8 15.06.2015 %B Wiley Encyclopedia of Electrical and Electronics Engineering %E Webster, John G. %P 1 - 42 %I Wiley %C New York, NY
Li, C., Wand, M., Wu, X., and Seidel, H.-P. 2015. Approximate 3D Partial Symmetry Detection Using Co-occurrence Analysis. International Conference on 3D Vision, IEEE.
Export
BibTeX
@inproceedings{Li3DV2015, TITLE = {Approximate {3D} Partial Symmetry Detection Using Co-occurrence Analysis}, AUTHOR = {Li, Chuan and Wand, Michael and Wu, Xiaokun and Seidel, Hans-Peter}, ISBN = {978-1-4673-8333-2}, DOI = {10.1109/3DV.2015.55}, PUBLISHER = {IEEE}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {International Conference on 3D Vision}, DEBUG = {author: Theobalt, Christian}, EDITOR = {Brown, Michael and Kosecka, Jana}, PAGES = {425--433}, ADDRESS = {Lyon, France}, }
Endnote
%0 Conference Proceedings %A Li, Chuan %A Wand, Michael %A Wu, Xiaokun %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Approximate 3D Partial Symmetry Detection Using Co-occurrence Analysis : %U http://hdl.handle.net/11858/00-001M-0000-002B-34D8-0 %R 10.1109/3DV.2015.55 %D 2015 %B International Conference on 3D Vision %Z date of event: 2015-10-19 - 2015-10-22 %C Lyon, France %B International Conference on 3D Vision %E Brown, Michael; Kosecka, Jana; Theobalt, Christian %P 425 - 433 %I IEEE %@ 978-1-4673-8333-2
Klehm, O., Kol, T.R., Seidel, H.-P., and Eisemann, E. 2015. Stylized Scattering via Transfer Functions and Occluder Manipulation. Graphics Interface 2015, Graphics Interface Conference 2015, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{KlehmGI2015, TITLE = {Stylized Scattering via Transfer Functions and Occluder Manipulation}, AUTHOR = {Klehm, Oliver and Kol, Timothy R. and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-0-9947868-0-7}, DOI = {10.20380/GI2015.15}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Graphics Interface 2015, Graphics Interface Conference 2015}, EDITOR = {Zhang, Hao Richard and Tang, Tony}, PAGES = {115--121}, ADDRESS = {Halifax, Canada}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Kol, Timothy R. %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Stylized Scattering via Transfer Functions and Occluder Manipulation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-D415-8 %R 10.20380/GI2015.15 %D 2015 %B Graphics Interface Conference 2015 %Z date of event: 2015-06-03 - 2015-06-05 %C Halifax, Canada %B Graphics Interface 2015 %E Zhang, Hao Richard; Tang, Tony %P 115 - 121 %I Canadian Information Processing Society %@ 978-0-9947868-0-7
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2015a. A Transformation-aware Perceptual Image Metric. Human Vision and Electronic Imaging XX (HVEI 2015), SPIE/IS&T.
(Best Student Paper Award)
Abstract
Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.
Export
BibTeX
@inproceedings{Kellnhofer2015, TITLE = {A Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9781628414844}, DOI = {10.1117/12.2076754}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2015}, DATE = {2015}, ABSTRACT = {Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.}, BOOKTITLE = {Human Vision and Electronic Imaging XX (HVEI 2015)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and de Ridder, Huib}, EID = {939408}, SERIES = {Proceedings of SPIE}, VOLUME = {9394}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-544A-4 %R 10.1117/12.2076754 %D 2015 %B Human Vision and Electronic Imaging XX %Z date of event: 2015-02-08 - 2015-02-12 %C San Francisco, CA, USA %X Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations. %B Human Vision and Electronic Imaging XX %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; de Ridder, Huib %Z sequence number: 939408 %I SPIE/IS&T %@ 9781628414844 %B Proceedings of SPIE %N 9394
Kellnhofer, P., Leimkühler, T., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2015b. What Makes 2D-to-3D Stereo Conversion Perceptually Plausible? Proceedings SAP 2015, ACM.
(Best Presentation Award)
Export
BibTeX
@inproceedings{Kellnhofer2015SAP, TITLE = {What Makes {2D}-to-{3D} Stereo Conversion Perceptually Plausible?}, AUTHOR = {Kellnhofer, Petr and Leimk{\"u}hler, Thomas and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, ISBN = {978-1-4503-3812-7}, DOI = {10.1145/2804408.2804409}, PUBLISHER = {ACM}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Proceedings SAP 2015}, PAGES = {59--66}, ADDRESS = {T{\"u}bingen, Germany}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Leimk&#252;hler, Thomas %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T What Makes 2D-to-3D Stereo Conversion Perceptually Plausible? : %U http://hdl.handle.net/11858/00-001M-0000-0029-2460-7 %R 10.1145/2804408.2804409 %D 2015 %B ACM SIGGRAPH Symposium on Applied Perception %Z date of event: 2015-09-13 - 2015-09-14 %C T&#252;bingen, Germany %B Proceedings SAP 2015 %P 59 - 66 %I ACM %@ 978-1-4503-3812-7 %U http://resources.mpi-inf.mpg.de/StereoCueFusion/WhatMakes3D/
Kellnhofer, P., Ritschel, T., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2015c. Modeling Luminance Perception at Absolute Threshold. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2015)34, 4.
Export
BibTeX
@article{Kellnhofer2015a, TITLE = {Modeling Luminance Perception at Absolute Threshold}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12687}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {34}, NUMBER = {4}, PAGES = {155--164}, BOOKTITLE = {Eurographics Symposium on Rendering 2014}, EDITOR = {Lehtinen, Jaakko and Nowrouzezahra, Derek}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling Luminance Perception at Absolute Threshold : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8E8D-4 %R 10.1111/cgf.12687 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 4 %& 155 %P 155 - 164 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2014 %O Eurographics Symposium on Rendering 2015 EGSR 2015 Darmstadt, Germany, June 24th - 26th, 2015
Jain, A., Chen, C., Thormählen, T., Metaxas, D., and Seidel, H.-P. 2015. Multi-layer Stencil Creation from Images. Computers and Graphics48.
Export
BibTeX
@article{JainMulti-layer2015, TITLE = {Multi-layer Stencil Creation from Images}, AUTHOR = {Jain, Arjun and Chen, Chao and Thorm{\"a}hlen, Thorsten and Metaxas, Dimitris and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2015.02.003}, PUBLISHER = {Pergamon}, ADDRESS = {New York, NY}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computers and Graphics}, VOLUME = {48}, PAGES = {11--22}, }
Endnote
%0 Journal Article %A Jain, Arjun %A Chen, Chao %A Thorm&#228;hlen, Thorsten %A Metaxas, Dimitris %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Multi-layer Stencil Creation from Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0027-9C34-A %R 10.1016/j.cag.2015.02.003 %7 2015-02-26 %D 2015 %J Computers and Graphics %V 48 %& 11 %P 11 - 22 %I Pergamon %C New York, NY %@ false
Herzog, R., Mewes, D., Wand, M., Guibas, L., and Seidel, H.-P. 2015. LeSSS: Learned Shared Semantic Spaces for Relating Multi-modal Representations of 3D Shapes. Computer Graphics Forum (Proc. Eurographics Symposium on Geometric Processing 2015)34, 5.
Export
BibTeX
@article{HerzogSGP2015, TITLE = {{LeSSS}: {L}earned {S}hared {S}emantic {S}paces for Relating Multi-Modal Representations of {3D} Shapes}, AUTHOR = {Herzog, Robert and Mewes, Daniel and Wand, Michael and Guibas, Leonidas and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12703}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Chichester}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Geometric Processing)}, VOLUME = {34}, NUMBER = {5}, PAGES = {141--151}, BOOKTITLE = {Symposium on Geometry Processing 2015 (Eurographics Symposium on Geometric Processing 2015)}, EDITOR = {Ben-Chen, Mirela and Liu, Ligang}, }
Endnote
%0 Journal Article %A Herzog, Robert %A Mewes, Daniel %A Wand, Michael %A Guibas, Leonidas %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T LeSSS: Learned Shared Semantic Spaces for Relating Multi-modal Representations of 3D Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8E9A-6 %R 10.1111/cgf.12703 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 5 %& 141 %P 141 - 151 %I Wiley-Blackwell %C Chichester %@ false %B Symposium on Geometry Processing 2015 %O Graz, Austria, July 6 - 8, 2015 SGP 2015 Eurographics Symposium on Geometric Processing 2015
Gryaditskaya, Y., Pouli, T., Reinhard, E., Myszkowski, K., and Seidel, H.-P. 2015. Motion Aware Exposure Bracketing for HDR Video. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2015)34, 4.
Export
BibTeX
@article{Gryaditskaya2015, TITLE = {Motion Aware Exposure Bracketing for {HDR} Video}, AUTHOR = {Gryaditskaya, Yulia and Pouli, Tania and Reinhard, Erik and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12684}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {34}, NUMBER = {4}, PAGES = {119--130}, BOOKTITLE = {Eurographics Symposium on Rendering 2015}, EDITOR = {Lehtinen, Jaakko and Nowrouzezahra, Derek}, }
Endnote
%0 Journal Article %A Gryaditskaya, Yulia %A Pouli, Tania %A Reinhard, Erik %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Aware Exposure Bracketing for HDR Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-15D2-B %R 10.1111/cgf.12684 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 4 %& 119 %P 119 - 130 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2015 %O Eurographics Symposium on Rendering 2015 EGSR 2015 Darmstadt, Germany, June 24th - 26th, 2015
Brandt, C., Seidel, H.-P., and Hildebrandt, K. 2015. Optimal Spline Approximation via ℓ₀-Minimization. Computer Graphics Forum (Proc. EUROGRAPHICS 2015)34, 2.
Export
BibTeX
@article{Brandt2015, TITLE = {Optimal Spline Approximation via $\ell_0$-Minimization}, AUTHOR = {Brandt, Christopher and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12589}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {34}, NUMBER = {2}, PAGES = {617--626}, BOOKTITLE = {The 36th Annual Conference of the European Association of Computer Graphics (EUROGRAPHICS 2015)}, }
Endnote
%0 Journal Article %A Brandt, Christopher %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimal Spline Approximation via &#8467;&#8320;-Minimization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D67-5 %R 10.1111/cgf.12589 %7 2015 %D 2015 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 34 %N 2 %& 617 %P 617 - 626 %I Blackwell-Wiley %C Oxford %@ false %B The 36th Annual Conference of the European Association of Computer Graphics %O EUROGRAPHICS 2015 4th - 8th May 2015, Kongresshaus in Z&#252;rich, Switzerland
Arpa, S., Ritschel, T., Myszkowski, K., Çapin, T., and Seidel, H.-P. 2015. Purkinje Images: Conveying Different Content for Different Luminance Adaptations in a Single Image. Computer Graphics Forum34, 1.
Export
BibTeX
@article{arpa2014purkinje, TITLE = {Purkinje Images: {Conveying} Different Content for Different Luminance Adaptations in a Single Image}, AUTHOR = {Arpa, Sami and Ritschel, Tobias and Myszkowski, Karol and {\c C}apin, Tolga and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12463}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum}, VOLUME = {34}, NUMBER = {1}, PAGES = {116--126}, }
Endnote
%0 Journal Article %A Arpa, Sami %A Ritschel, Tobias %A Myszkowski, Karol %A &#199;apin, Tolga %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Purkinje Images: Conveying Different Content for Different Luminance Adaptations in a Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D0B-6 %R 10.1111/cgf.12463 %7 2014-10-18 %D 2015 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 34 %N 1 %& 116 %P 116 - 126 %I Blackwell-Wiley %C Oxford %@ false
2014
Wu, X., Li, C., Wand, M., Hildebrandt, K., Jansen, S., and Seidel, H.-P. 2014a. 3D Model Retargeting Using Offset Statistics. 2nd International Conference on 3D Vision, IEEE.
Export
BibTeX
@inproceedings{Wu2014a, TITLE = {{3D} Model Retargeting Using Offset Statistics}, AUTHOR = {Wu, Xiaokun and Li, Chuan and Wand, Michael and Hildebrandt, Klaus and Jansen, Silke and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4799-7000-1}, DOI = {10.1109/3DV.2014.74}, PUBLISHER = {IEEE}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {2nd International Conference on 3D Vision}, PAGES = {353--360}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Wu, Xiaokun %A Li, Chuan %A Wand, Michael %A Hildebrandt, Klaus %A Jansen, Silke %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Model Retargeting Using Offset Statistics : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D63-D %R 10.1109/3DV.2014.74 %D 2014 %B 2nd International Conference on 3D Vision %Z date of event: 2014-12-08 - 2014-12-11 %C Tokyo, Japan %B 2nd International Conference on 3D Vision %P 353 - 360 %I IEEE %@ 978-1-4799-7000-1
Wu, X., Wand, M., Hildebrandt, K., Kohli, P., and Seidel, H.-P. 2014b. Real-time Symmetry-preserving Deformation. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Export
BibTeX
@article{Wu2014, TITLE = {Real-time Symmetry-preserving Deformation}, AUTHOR = {Wu, Xiaokun and Wand, Michael and Hildebrandt, Klaus and Kohli, Pushmeet and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12491}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {229--238}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Wu, Xiaokun %A Wand, Michael %A Hildebrandt, Klaus %A Kohli, Pushmeet %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Symmetry-preserving Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3D08-5 %R 10.1111/cgf.12491 %7 2014-10-28 %D 2014 %J Computer Graphics Forum %V 33 %N 7 %& 229 %P 229 - 238 %I Wiley-Blackwell %C Oxford %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Wang, Z., Martinez Esturo, J., Seidel, H.-P., and Weinkauf, T. 2014. Pattern Search in Flows based on Similarity of Stream Line Segments. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Abstract
We propose a method that allows users to define flow features in form<br> of patterns represented as sparse sets of stream line segments. Our<br> approach finds similar occurrences in the same or other time steps.<br> Related approaches define patterns using dense, local stencils or<br> support only single segments. Our patterns are defined sparsely and<br> can have a significant extent, i.e., they are integration-based and<br> not local. This allows for a greater flexibility in defining features<br> of interest. Similarity is measured using intrinsic curve properties<br> only, which enables invariance to location, orientation, and scale.<br> Our method starts with splitting stream lines using globally-consistent<br> segmentation criteria. It strives to maintain the visually apparent<br> features of the flow as a collection of stream line segments. Most<br> importantly, it provides similar segmentations for similar flow structures.<br> For user-defined patterns of curve segments, our algorithm finds<br> similar ones that are invariant to similarity transformations. We<br> showcase the utility of our method using different 2D and 3D flow<br> fields.
Export
BibTeX
@inproceedings{wang14, TITLE = {Pattern Search in Flows based on Similarity of Stream Line Segments}, AUTHOR = {Wang, Zhongjie and Martinez Esturo, Janick and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, DOI = {10.2312/vmv.20141272}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014-10}, ABSTRACT = {We propose a method that allows users to define flow features in form<br> of patterns represented as sparse sets of stream line segments. Our<br> approach finds similar occurrences in the same or other time steps.<br> Related approaches define patterns using dense, local stencils or<br> support only single segments. Our patterns are defined sparsely and<br> can have a significant extent, i.e., they are integration-based and<br> not local. This allows for a greater flexibility in defining features<br> of interest. Similarity is measured using intrinsic curve properties<br> only, which enables invariance to location, orientation, and scale.<br> Our method starts with splitting stream lines using globally-consistent<br> segmentation criteria. It strives to maintain the visually apparent<br> features of the flow as a collection of stream line segments. Most<br> importantly, it provides similar segmentations for similar flow structures.<br> For user-defined patterns of curve segments, our algorithm finds<br> similar ones that are invariant to similarity transformations. We<br> showcase the utility of our method using different 2D and 3D flow<br> fields.}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, DEBUG = {author: von Landesberger, Tatiana; author: Theisel, Holger; author: Urban, Philipp}, EDITOR = {Bender, Jan and Kuijper, Arjan}, PAGES = {23--30}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Martinez Esturo, Janick %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Pattern Search in Flows based on Similarity of Stream Line Segments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5337-3 %R 10.2312/vmv.20141272 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %X We propose a method that allows users to define flow features in form<br> of patterns represented as sparse sets of stream line segments. Our<br> approach finds similar occurrences in the same or other time steps.<br> Related approaches define patterns using dense, local stencils or<br> support only single segments. Our patterns are defined sparsely and<br> can have a significant extent, i.e., they are integration-based and<br> not local. This allows for a greater flexibility in defining features<br> of interest. Similarity is measured using intrinsic curve properties<br> only, which enables invariance to location, orientation, and scale.<br> Our method starts with splitting stream lines using globally-consistent<br> segmentation criteria. It strives to maintain the visually apparent<br> features of the flow as a collection of stream line segments. Most<br> importantly, it provides similar segmentations for similar flow structures.<br> For user-defined patterns of curve segments, our algorithm finds<br> similar ones that are invariant to similarity transformations. We<br> showcase the utility of our method using different 2D and 3D flow<br> fields. %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; von Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 23 - 30 %I Eurographics Association %U http://tinoweinkauf.net/
Vangorp, P., Mantiuk, R., Bazyluk, B., et al. 2014. Depth from HDR: Depth Induction or Increased Realism? SAP 2014, ACM Symposium on Applied Perception, ACM.
Export
BibTeX
@inproceedings{Vangorp2014, TITLE = {Depth from {HDR}: {Depth} Induction or Increased Realism?}, AUTHOR = {Vangorp, Peter and Mantiuk, Rafal and Bazyluk, Bartosz and Myszkowski, Karol and Mantiuk, Rados{\textbackslash}law and Watt, Simon J. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-3009-1}, DOI = {10.1145/2628257.2628258}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {SAP 2014, ACM Symposium on Applied Perception}, EDITOR = {Bailey, Reynold and Kuhl, Scott}, PAGES = {71--78}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Vangorp, Peter %A Mantiuk, Rafal %A Bazyluk, Bartosz %A Myszkowski, Karol %A Mantiuk, Rados\law %A Watt, Simon J. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Depth from HDR: Depth Induction or Increased Realism? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-34DB-5 %R 10.1145/2628257.2628258 %D 2014 %B ACM Symposium on Applied Perception %Z date of event: 2014-08-08 - 2014-08-09 %C Vancouver, Canada %K binocular disparity, contrast, luminance, stereo 3D %B SAP 2014 %E Bailey, Reynold; Kuhl, Scott %P 71 - 78 %I ACM %@ 978-1-4503-3009-1
Tevs, A., Huang, Q., Wand, M., Seidel, H.-P., and Guibas, L. 2014. Relating Shapes via Geometric Symmetries and Regularities. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{TevsSIGGRAPH2014, TITLE = {Relating Shapes via Geometric Symmetries and Regularities}, AUTHOR = {Tevs, Art and Huang, Qixing and Wand, Michael and Seidel, Hans-Peter and Guibas, Leonidas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601220}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--12}, EID = {119}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Tevs, Art %A Huang, Qixing %A Wand, Michael %A Seidel, Hans-Peter %A Guibas, Leonidas %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Relating Shapes via Geometric Symmetries and Regularities : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-8052-F %F ISI: 000340000100086 %R 10.1145/2601097.2601220 %7 2014-07 %D 2014 %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 12 %Z sequence number: 119 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O Vancouver, BC, Canada ACM SIGGRAPH 2014
Templin, K., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2014a. Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{Templin:2014:MOE:2601097.2601148, TITLE = {Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601148}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--8}, EID = {145}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE16-9 %R 10.1145/2601097.2601148 %7 2014 %D 2014 %K S3D, binocular, eye&#8208;tracking %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 8 %Z sequence number: 145 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O ACM SIGGRAPH 2014 Vancouver, BC, Canada
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2014b. Perceptually-motivated Stereoscopic Film Grain. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Export
BibTeX
@article{Templin2014b, TITLE = {Perceptually-motivated Stereoscopic Film Grain}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12503}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {349--358}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually-motivated Stereoscopic Film Grain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5DF2-B %R 10.1111/cgf.12503 %7 2014-10-28 %D 2014 %J Computer Graphics Forum %V 33 %N 7 %& 349 %P 349 - 358 %I Wiley-Blackwell %C Oxford %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Sridhar, S., Rhodin, H., Seidel, H.-P., Oulasvirta, A., and Theobalt, C. 2014a. Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model. Proceedings of the 2nd International Conference on 3D Vision, IEEE explore.
(arXiv: 1602.03860)
Abstract
Real-time marker-less hand tracking is of increasing importance in<br>human-computer interaction. Robust and accurate tracking of arbitrary hand<br>motion is a challenging problem due to the many degrees of freedom, frequent<br>self-occlusions, fast motions, and uniform skin color. In this paper, we<br>propose a new approach that tracks the full skeleton motion of the hand from<br>multiple RGB cameras in real-time. The main contributions include a new<br>generative tracking method which employs an implicit hand shape representation<br>based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is<br>smooth and analytically differentiable making fast gradient based pose<br>optimization possible. This shape representation, together with a full<br>perspective projection model, enables more accurate hand modeling than a<br>related baseline method from literature. Our method achieves better accuracy<br>than previous methods and runs at 25 fps. We show these improvements both<br>qualitatively and quantitatively on publicly available datasets.<br>
Export
BibTeX
@inproceedings{Sridhar2016arXiv1602.03860, TITLE = {Real-time Hand Tracking Using a Sum of Anisotropic {Gaussians} Model}, AUTHOR = {Sridhar, Srinath and Rhodin, Helge and Seidel, Hans-Peter and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4799-7000-1}, URL = {http://arxiv.org/abs/1602.03860}, DOI = {10.1109/3DV.2014.37}, EPRINT = {1602.03860}, EPRINTTYPE = {arXiv}, PUBLISHER = {IEEE explore}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Real-time marker-less hand tracking is of increasing importance in<br>human-computer interaction. Robust and accurate tracking of arbitrary hand<br>motion is a challenging problem due to the many degrees of freedom, frequent<br>self-occlusions, fast motions, and uniform skin color. In this paper, we<br>propose a new approach that tracks the full skeleton motion of the hand from<br>multiple RGB cameras in real-time. The main contributions include a new<br>generative tracking method which employs an implicit hand shape representation<br>based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is<br>smooth and analytically differentiable making fast gradient based pose<br>optimization possible. This shape representation, together with a full<br>perspective projection model, enables more accurate hand modeling than a<br>related baseline method from literature. Our method achieves better accuracy<br>than previous methods and runs at 25 fps. We show these improvements both<br>qualitatively and quantitatively on publicly available datasets.<br>}, BOOKTITLE = {Proceedings of the 2nd International Conference on 3D Vision}, PAGES = {319--326}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Sridhar, Srinath %A Rhodin, Helge %A Seidel, Hans-Peter %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-9878-6 %U http://arxiv.org/abs/1602.03860 %R 10.1109/3DV.2014.37 %D 2014 %B 2nd International Conference on 3D Vision %Z date of event: 2014-12-08 - 2014-12-11 %C Tokyo, Japan %X Real-time marker-less hand tracking is of increasing importance in<br>human-computer interaction. Robust and accurate tracking of arbitrary hand<br>motion is a challenging problem due to the many degrees of freedom, frequent<br>self-occlusions, fast motions, and uniform skin color. In this paper, we<br>propose a new approach that tracks the full skeleton motion of the hand from<br>multiple RGB cameras in real-time. The main contributions include a new<br>generative tracking method which employs an implicit hand shape representation<br>based on Sum of Anisotropic Gaussians (SAG), and a pose fitting energy that is<br>smooth and analytically differentiable making fast gradient based pose<br>optimization possible. This shape representation, together with a full<br>perspective projection model, enables more accurate hand modeling than a<br>related baseline method from literature. Our method achieves better accuracy<br>than previous methods and runs at 25 fps. We show these improvements both<br>qualitatively and quantitatively on publicly available datasets.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV %B Proceedings of the 2nd International Conference on 3D Vision %P 319 - 326 %I IEEE explore %@ 978-1-4799-7000-1
Sridhar, S., Rhodin, H., Seidel, H.-P., Oulasvirta, A., and Theobalt, C. 2014b. Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model. 3DV 2014, International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{sridhar2014real, TITLE = {Real-time Hand Tracking Using a Sum of Anisotropic {Gaussians} Model}, AUTHOR = {Sridhar, Srinath and Rhodin, Helge and Seidel, Hans-Peter and Oulasvirta, Antti and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-4799-7001-8}, DOI = {10.1109/3DV.2014.37}, PUBLISHER = {IEEE Computer Society}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {3DV 2014, International Conference on 3D Vision}, PAGES = {319--326}, ADDRESS = {Tokyo, Japan}, }
Endnote
%0 Conference Proceedings %A Sridhar, Srinath %A Rhodin, Helge %A Seidel, Hans-Peter %A Oulasvirta, Antti %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Real-time Hand Tracking Using a Sum of Anisotropic Gaussians Model : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-69E9-F %R 10.1109/3DV.2014.37 %D 2014 %B International Conference on 3D Vision %Z date of event: 2014-12-08 - 2014-12-11 %C Tokyo, Japan %B 3DV 2014 %P 319 - 326 %I IEEE Computer Society %@ 978-1-4799-7001-8
Schulze, M., Martinez Esturo, J., Günther, T., et al. 2014. Sets of Globally Optimal Stream Surfaces for Flow Visualization. Computer Graphics Forum (Proc. EuroVis 2014)33, 3.
Export
BibTeX
@article{Schulze2014, TITLE = {Sets of Globally Optimal Stream Surfaces for Flow Visualization}, AUTHOR = {Schulze, Maik and Martinez Esturo, Janick and G{\"u}nther, T. and R{\"o}ssl, Christian and Seidel, Hans-Peter and Weinkauf, Tino and Theisel, Holger}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12356}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. EuroVis)}, VOLUME = {33}, NUMBER = {3}, PAGES = {1--10}, BOOKTITLE = {Eurographics Conference on Visualization (EuroVis 2014)}, EDITOR = {Carr, Hamish and Rheingans, Penny and Schumann, Heidrun}, }
Endnote
%0 Journal Article %A Schulze, Maik %A Martinez Esturo, Janick %A G&#252;nther, T. %A R&#246;ssl, Christian %A Seidel, Hans-Peter %A Weinkauf, Tino %A Theisel, Holger %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Sets of Globally Optimal Stream Surfaces for Flow Visualization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-F518-1 %R 10.1111/cgf.12356 %7 2014-07-12 %D 2014 %K Categories and Subject Descriptors (according to ACM CCS), I.3.5 [Computer Graphics]: Computational Geometry and Object Modeling&#8212;Geometric algorithms, languages, and systems %J Computer Graphics Forum %V 33 %N 3 %& 1 %P 1 - 10 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Conference on Visualization %O EuroVis 2014 Swansea, Wales, UK, June 9 &#8211; 13, 2014
Schulz, C., von Tycowicz, C., Seidel, H.-P., and Hildebrandt, K. 2014a. Animating Deformable Objects Using Sparse Spacetime Constraints. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{Schulz2014, TITLE = {Animating Deformable Objects Using Sparse Spacetime Constraints}, AUTHOR = {Schulz, Christian and von Tycowicz, Christoph and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601156}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--10}, EID = {109}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Schulz, Christian %A von Tycowicz, Christoph %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Animating Deformable Objects Using Sparse Spacetime Constraints : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE18-5 %R 10.1145/2601097.2601156 %7 2014 %D 2014 %K model reduction, optimal control, physically&#8208;based animation, spacetime constraints, wiggly splines %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 10 %Z sequence number: 109 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O ACM SIGGRAPH 2014 Vancouver, BC, Canada
Schulz, C., von Tycowicz, C., Seidel, H.-P., and Hildebrandt, K. 2014b. Proofs of two Theorems concerning Sparse Spacetime Constraints. https://arxiv.org/abs/1405.1902v1.
(arXiv: 1405.1902)
Abstract
In the SIGGRAPH 2014 paper [SvTSH14] an approach for animating deformable<br>objects using sparse spacetime constraints is introduced. This report contains<br>the proofs of two theorems presented in the paper.<br>
Export
BibTeX
@online{Schulz-et-al_2014, TITLE = {Proofs of two Theorems concerning Sparse Spacetime Constraints}, AUTHOR = {Schulz, Christian and von Tycowicz, Christoph and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/1405.1902v1}, DOI = {10.48550/arXiv.1405.1902}, EPRINT = {1405.1902}, EPRINTTYPE = {arXiv}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {In the SIGGRAPH 2014 paper [SvTSH14] an approach for animating deformable<br>objects using sparse spacetime constraints is introduced. This report contains<br>the proofs of two theorems presented in the paper.<br>}, }
Endnote
%0 Report %A Schulz, Christian %A von Tycowicz, Christoph %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Proofs of two Theorems concerning Sparse Spacetime Constraints : %G eng %U http://hdl.handle.net/21.11116/0000-000F-7066-0 %U https://arxiv.org/abs/1405.1902v1 %R 10.48550/arXiv.1405.1902 %D 2014 %X In the SIGGRAPH 2014 paper [SvTSH14] an approach for animating deformable<br>objects using sparse spacetime constraints is introduced. This report contains<br>the proofs of two theorems presented in the paper.<br> %K Computer Science, Graphics, cs.GR,Mathematics, Numerical Analysis, math.NA
Saikia, H., Seidel, H.-P., and Weinkauf, T. 2014. Extended Branch Decomposition Graphs: Structural Comparison of Scalar Data. Computer Graphics Forum (Proc. EuroVis 2014)33, 3.
Abstract
We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other - in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise.
Export
BibTeX
@article{saikia14a, TITLE = {Extended Branch Decomposition Graphs: {Structural} Comparison of Scalar Data}, AUTHOR = {Saikia, Himangshu and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12360}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other -- in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise.}, JOURNAL = {Computer Graphics Forum (Proc. EuroVis)}, VOLUME = {33}, NUMBER = {3}, PAGES = {41--50}, BOOKTITLE = {Eurographics Conference on Visualization 2014 (EuroVis 2014)}, EDITOR = {Carr, Hamish and Rheingans, Penny and Schumann, Heidrun}, }
Endnote
%0 Journal Article %A Saikia, Himangshu %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Extended Branch Decomposition Graphs: Structural Comparison of Scalar Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4FFB-A %R 10.1111/cgf.12360 %7 2014 %D 2014 %X We present a method to find repeating topological structures in scalar data sets. More precisely, we compare all subtrees of two merge trees against each other - in an efficient manner exploiting redundancy. This provides pair-wise distances between the topological structures defined by sub/superlevel sets, which can be exploited in several applications such as finding similar structures in the same data set, assessing periodic behavior in time-dependent data, and comparing the topology of two different data sets. To do so, we introduce a novel data structure called the extended branch decomposition graph, which is composed of the branch decompositions of all subtrees of the merge tree. Based on dynamic programming, we provide two highly efficient algorithms for computing and comparing extended branch decomposition graphs. Several applications attest to the utility of our method and its robustness against noise. %J Computer Graphics Forum %V 33 %N 3 %& 41 %P 41 - 50 %I Wiley-Blackwell %C Oxford %B Eurographics Conference on Visualization 2014 %O EuroVis 2014 Swansea, Wales, UK, June 9 &#8211; 13, 2014
Rhodin, H., Tompkin, J., Kim, K.I., Varanasi, K., Seidel, H.-P., and Theobalt, C. 2014. Interactive Motion Mapping for Real-time Character Control. Computer Graphics Forum (Proc. EUROGRAPHICS 2014)33, 2.
Export
BibTeX
@article{RhodinCGF2014, TITLE = {Interactive Motion Mapping for Real-time Character Control}, AUTHOR = {Rhodin, Helge and Tompkin, James and Kim, Kwang In and Varanasi, Kiran and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12325}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014-05}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {33}, NUMBER = {2}, PAGES = {273--282}, BOOKTITLE = {EUROGRAPHICS 2014}, EDITOR = {L{\'e}vy, Bruno and Kautz, Jan}, }
Endnote
%0 Journal Article %A Rhodin, Helge %A Tompkin, James %A Kim, Kwang In %A Varanasi, Kiran %A Seidel, Hans-Peter %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Motion Mapping for Real-time Character Control : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-8096-6 %F ISI: 000337543000028 %R 10.1111/cgf.12325 %7 2014 %D 2014 %J Computer Graphics Forum %V 33 %N 2 %& 273 %P 273 - 282 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2014 %O The European Association for Computer Graphics 35th Annual Conference ; Strasbourg, France, April 7th &#8211; 11th, 2014 EUROGRAPHICS 2014 EG 2014
Palmas, G., Bachynskyi, M., Oulasvirta, A., Seidel, H.-P., and Weinkauf, T. 2014a. MovExp: A Versatile Visualization Tool for Human-Computer Interaction Studies with 3D Performance and Biomechanical Data. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS 2014)20, 12.
Abstract
In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts.
Export
BibTeX
@article{palmas14b, TITLE = {{MovExp}: A Versatile Visualization Tool for Human-Computer Interaction Studies with {3D} Performance and Biomechanical Data}, AUTHOR = {Palmas, Gregorio and Bachynskyi, Myroslav and Oulasvirta, Antti and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.2346311}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-12}, ABSTRACT = {In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS)}, VOLUME = {20}, NUMBER = {12}, PAGES = {2359--2368}, BOOKTITLE = {IEEE Visual Analytics Science \& Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014}, DEBUG = {author: Ebert, David; author: Hauser, Helwig; author: Heer, Jeffrey; author: North, Chris; author: Tory, Melanie; author: Qu, Huamin; author: Shen, Han-Wei; author: Ynnerman, Anders}, EDITOR = {Chen, Min}, }
Endnote
%0 Journal Article %A Palmas, Gregorio %A Bachynskyi, Myroslav %A Oulasvirta, Antti %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T MovExp: A Versatile Visualization Tool for Human-Computer Interaction Studies with 3D Performance and Biomechanical Data : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D4C-4 %R 10.1109/TVCG.2014.2346311 %7 2014 %D 2014 %X In Human-Computer Interaction (HCI), experts seek to evaluate and compare the performance and ergonomics of user interfaces. Recently, a novel cost-efficient method for estimating physical ergonomics and performance has been introduced to HCI. It is based on optical motion capture and biomechanical simulation. It provides a rich source for analyzing human movements summarized in a multidimensional data set. Existing visualization tools do not sufficiently support the HCI experts in analyzing this data. We identified two shortcomings. First, appropriate visual encodings are missing particularly for the biomechanical aspects of the data. Second, the physical setup of the user interface cannot be incorporated explicitly into existing tools. We present MovExp, a versatile visualization tool that supports the evaluation of user interfaces. In particular, it can be easily adapted by the HCI experts to include the physical setup that is being evaluated, and visualize the data on top of it. Furthermore, it provides a variety of visual encodings to communicate muscular loads, movement directions, and other specifics of HCI studies that employ motion capture and biomechanical simulation. In this design study, we follow a problem-driven research approach. Based on a formalization of the visualization needs and the data structure, we formulate technical requirements for the visualization tool and present novel solutions to the analysis needs of the HCI experts. We show the utility of our tool with four case studies from the daily work of our HCI experts. %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 12 %& 2359 %P 2359 - 2368 %I IEEE Computer Society %C Los Alamitos, CA %@ false %B IEEE Visual Analytics Science & Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014 %O Proceedings 2014 ; Paris, France, 9&#8211;14 November 2014 IEEE VIS 2014
Palmas, G., Bachynskyi, M., Oulasvirta, A., Seidel, H.-P., and Weinkauf, T. 2014b. An Edge-bundling Layout for Interactive Parallel Coordinates. PacificVis 2014, IEEE Pacific Visualization Symposium, IEEE Computer Society.
Abstract
Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing.
Export
BibTeX
@inproceedings{palmas14a, TITLE = {An Edge-bundling Layout for Interactive Parallel Coordinates}, AUTHOR = {Palmas, Gregorio and Bachynskyi, Myroslav and Oulasvirta, Antti and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, DOI = {10.1109/PacificVis.2014.40}, PUBLISHER = {IEEE Computer Society}, YEAR = {2014}, DATE = {2014-03}, ABSTRACT = {Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing.}, BOOKTITLE = {PacificVis 2014, IEEE Pacific Visualization Symposium}, PAGES = {57--64}, ADDRESS = {Yokohama, Japan}, }
Endnote
%0 Conference Proceedings %A Palmas, Gregorio %A Bachynskyi, Myroslav %A Oulasvirta, Antti %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Edge-bundling Layout for Interactive Parallel Coordinates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D29-0 %R 10.1109/PacificVis.2014.40 %D 2014 %B IEEE Pacific Visualization Symposium %Z date of event: 2014-03-04 - 2014-03-07 %C Yokohama, Japan %X Parallel Coordinates is an often used visualization method for multidimensional data sets. Its main challenges for large data sets are visual clutter and overplotting which hamper the recognition of patterns in the data. We present an edge-bundling method using density-based clustering for each dimension. This reduces clutter and provides a faster overview of clusters and trends. Moreover, it allows rendering the clustered lines using polygons, decreasing rendering time remarkably. In addition, we design interactions to support multidimensional clustering with this method. A user study shows improvements over the classic parallel coordinates plot in two user tasks: correlation estimation and subset tracing. %B PacificVis 2014 %P 57 - 64 %I IEEE Computer Society
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2014a. Deep Screen Space for Indirect Lighting of Volumes. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{DBLP:conf/vmv/NalbachRS14, TITLE = {Deep Screen Space for Indirect Lighting of Volumes}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-74-3}, DOI = {10.2312/vmv.20141287}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, EDITOR = {Bender, Jan and Kuijper, Arjan and von Landesberger, Tatiana and Theisel, Holger and Urban, Philipp}, PAGES = {143--150}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Deep Screen Space for Indirect Lighting of Volumes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D6C-B %R 10.2312/vmv.20141287 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; von Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 143 - 150 %I Eurographics Association %@ 978-3-905674-74-3 %U http://dx.doi.org/10.2312/vmv.20141287
Nalbach, O., Ritschel, T., and Seidel, H.-P. 2014b. Deep Screen Space. Proceedings I3D 2014, ACM.
Export
BibTeX
@inproceedings{Nalbach:2014:DSS:2556700.2556708, TITLE = {Deep Screen Space}, AUTHOR = {Nalbach, Oliver and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-2717-6}, URL = {http://doi.acm.org/10.1145/2556700.2556708}, DOI = {10.1145/2556700.2556708}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Proceedings I3D 2014}, EDITOR = {Keyser, John and Sander, Pedro}, PAGES = {79--86}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Nalbach, Oliver %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Deep Screen Space : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4D74-8 %R 10.1145/2556700.2556708 %U http://doi.acm.org/10.1145/2556700.2556708 %D 2014 %B 18th Meeting of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2014-03-14 - 2014-03-16 %C San Francisco, CA, USA %B Proceedings I3D 2014 %E Keyser, John; Sander, Pedro %P 79 - 86 %I ACM %@ 978-1-4503-2717-6
Lochmann, G., Reinert, B., Ritschel, T., Müller, S., and Seidel, H.-P. 2014. Real‐time Reflective and Refractive Novel‐view Synthesis. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{LochmannVMV2014, TITLE = {Real-time Reflective and Refractive Novel-view Synthesis}, AUTHOR = {Lochmann, Gerrit and Reinert, Bernhard and Ritschel, Tobias and M{\"u}ller, Stefan and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2312/vmv.20141270}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, EDITOR = {Bender, Jan and Kuijper, Arjan and Landesberger, Tatiana and Theisel, Holger and Urban, Philipp}, PAGES = {9--16}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Lochmann, Gerrit %A Reinert, Bernhard %A Ritschel, Tobias %A M&#252;ller, Stefan %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Real&#8208;time Reflective and Refractive Novel&#8208;view Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-533E-6 %R 10.2312/vmv.20141270 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %E Bender, Jan; Kuijper, Arjan; Landesberger, Tatiana; Theisel, Holger; Urban, Philipp %P 9 - 16 %I Eurographics Association %U http://dx.doi.org/10.2312/vmv.20141270
Kurz, C., Wu, X., Wand, M., Thormählen, T., Kohli, P., and Seidel, H.-P. 2014. Symmetry-aware Template Deformation and Fitting. Computer Graphics Forum33, 6.
Export
BibTeX
@article{Kurz2014, TITLE = {Symmetry-aware Template Deformation and Fitting}, AUTHOR = {Kurz, Christian and Wu, Xiaokun and Wand, Michael and Thorm{\"a}hlen, Thorsten and Kohli, P. and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12344}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum}, VOLUME = {33}, NUMBER = {6}, PAGES = {205--219}, }
Endnote
%0 Journal Article %A Kurz, Christian %A Wu, Xiaokun %A Wand, Michael %A Thorm&#228;hlen, Thorsten %A Kohli, P. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Symmetry-aware Template Deformation and Fitting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D2B-D %R 10.1111/cgf.12344 %7 2014-03-20 %D 2014 %J Computer Graphics Forum %V 33 %N 6 %& 205 %P 205 - 219 %I Wiley-Blackwell %C Oxford
Kozlov, Y., Esturo, J.M., Seidel, H.-P., and Weinkauf, T. 2014. Regularized Harmonic Surface Deformation. http://arxiv.org/abs/1408.3326.
(arXiv: 1408.3326)
Abstract
Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts.
Export
BibTeX
@online{kozlov14, TITLE = {Regularized Harmonic Surface Deformation}, AUTHOR = {Kozlov, Yeara and Esturo, Janick Martinez and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1408.3326}, EPRINT = {1408.3326}, EPRINTTYPE = {arXiv}, YEAR = {2014}, ABSTRACT = {Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts.}, }
Endnote
%0 Report %A Kozlov, Yeara %A Esturo, Janick Martinez %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Regularized Harmonic Surface Deformation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-49F5-A %U http://arxiv.org/abs/1408.3326 %D 2014 %X Harmonic surface deformation is a well-known geometric modeling method that creates plausible deformations in an interactive manner. However, this method is susceptible to artifacts, in particular close to the deformation handles. These artifacts often correlate with strong gradients of the deformation energy.In this work, we propose a novel formulation of harmonic surface deformation, which incorporates a regularization of the deformation energy. To do so, we build on and extend a recently introduced generic linear regularization approach. It can be expressed as a change of norm for the linear optimization problem, i.e., the regularization is baked into the optimization. This minimizes the implementation complexity and has only a small impact on runtime. Our results show that a moderate use of regularization suppresses many deformation artifacts common to the well-known harmonic surface deformation method, without introducing new artifacts. %K Computer Science, Graphics, cs.GR
Klehm, O., Seidel, H.-P., and Eisemann, E. 2014a. Filter-based Real-time Single Scattering using Rectified Shadow Maps. Journal of Computer Graphics Techniques3, 3.
Export
BibTeX
@article{fbss_jcgtKlehm2014, TITLE = {Filter-based Real-time Single Scattering using Rectified Shadow Maps}, AUTHOR = {Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {2331-7418}, URL = {http://jcgt.org/published/0003/03/02/}, PUBLISHER = {Williams College}, ADDRESS = {Williamstown, MA}, YEAR = {2014}, DATE = {2014-08}, JOURNAL = {Journal of Computer Graphics Techniques}, VOLUME = {3}, NUMBER = {3}, PAGES = {7--34}, }
Endnote
%0 Journal Article %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Filter-based Real-time Single Scattering using Rectified Shadow Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51B3-E %U http://jcgt.org/published/0003/03/02/ %7 2014 %D 2014 %J Journal of Computer Graphics Techniques %O JCGT %V 3 %N 3 %& 7 %P 7 - 34 %I Williams College %C Williamstown, MA %@ false %U http://jcgt.org/published/0003/03/02/
Klehm, O., Seidel, H.-P., and Eisemann, E. 2014b. Prefiltered Single Scattering. Proceedings I3D 2014, ACM.
Export
BibTeX
@inproceedings{Klehm:2014:PSS:2556700.2556704, TITLE = {Prefiltered Single Scattering}, AUTHOR = {Klehm, Oliver and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-1-4503-2717-6}, DOI = {10.1145/2556700.2556704}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Proceedings I3D 2014}, EDITOR = {Keyser, John and Sander, Pedro}, PAGES = {71--78}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Prefiltered Single Scattering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51C5-6 %R 10.1145/2556700.2556704 %D 2014 %B 18th Meeting of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2014-03-14 - 2014-03-16 %C San Francisco, CA, USA %K participating media, scattering, shadow test %B Proceedings I3D 2014 %E Keyser, John; Sander, Pedro %P 71 - 78 %I ACM %@ 978-1-4503-2717-6
Klehm, O., Ihrke, I., Seidel, H.-P., and Eisemann, E. 2014c. Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor. IEEE Transactions on Visualization and Computer Graphics20, 7.
Export
BibTeX
@article{PLM-tvcg_Klehm2014, TITLE = {Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor}, AUTHOR = {Klehm, Oliver and Ihrke, Ivo and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.13}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-07}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {20}, NUMBER = {7}, PAGES = {983--995}, }
Endnote
%0 Journal Article %A Klehm, Oliver %A Ihrke, Ivo %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Property and Lighting Manipulations for Static Volume Stylization Using a Painting Metaphor : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-51CA-B %R 10.1109/TVCG.2014.13 %7 2014 %D 2014 %K rendering (computer graphics);artistic control;environmental lighting;image component;lighting manipulations;noise function parameters;painting metaphor;property manipulations;realistic rendering;static volume stylization;static volumes;tomographic reconstruction;volume appearance;volume properties;volumetric rendering equation;Equations;Image reconstruction;Lighting;Mathematical model;Optimization;Rendering (computer graphics);Scattering;Artist control;optimization;participating media %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 7 %& 983 %P 983 - 995 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2014a. Improving Perception of Binocular Stereo Motion on 3D Display Devices. Stereoscopic Displays and Applications XXV, SPIE.
Abstract
This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations.
Export
BibTeX
@inproceedings{Kellnhofer2014a, TITLE = {Improving Perception of Binocular Stereo Motion on {3D} Display Devices}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {9780819499288}, DOI = {10.1117/12.2032389}, PUBLISHER = {SPIE}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations.}, BOOKTITLE = {Stereoscopic Displays and Applications XXV}, EDITOR = {Woods, Andrew J. and Holliman, Nicolas S. and Favalora, Gregg E.}, PAGES = {1--11}, EID = {901116}, SERIES = {Proceedings of SPIE-IS\&T Electronic Imaging}, VOLUME = {9011}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Improving Perception of Binocular Stereo Motion on 3D Display Devices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-318D-7 %R 10.1117/12.2032389 %D 2014 %B Stereoscopic Displays and Applications XXV %Z date of event: 2014-02-03 - 2014-02-05 %C San Francisco, CA, USA %X This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations. %B Stereoscopic Displays and Applications XXV %E Woods, Andrew J.; Holliman, Nicolas S.; Favalora, Gregg E. %P 1 - 11 %Z sequence number: 901116 %I SPIE %@ 9780819499288 %B Proceedings of SPIE-IS&T Electronic Imaging %N 9011 %@ false
Kellnhofer, P., Ritschel, T., Vangorp, P., Myszkowski, K., and Seidel, H.-P. 2014b. Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision. ACM Transactions on Applied Perception11, 3.
Export
BibTeX
@article{kellnhofer:2014c:DarkStereo, TITLE = {Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Vangorp, Peter and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/2644813}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {11}, NUMBER = {3}, EID = {15}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Vangorp, Peter %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE0E-E %R 10.1145/2644813 %7 2014 %D 2014 %J ACM Transactions on Applied Perception %V 11 %N 3 %Z sequence number: 15 %I ACM %C New York, NY %@ false
Günther, D., Jacobson, A., Reininghaus, J., Seidel, H.-P., Sorkine-Hornung, O., and Weinkauf, T. 2014. Fast and Memory-efficient Topological Denoising of 2D and 3D Scalar Fields. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS 2014)20, 12.
Export
BibTeX
@article{guenther14c, TITLE = {Fast and Memory-efficient Topological Denoising of {2D} and {3D} Scalar Fields}, AUTHOR = {G{\"u}nther, David and Jacobson, Alec and Reininghaus, Jan and Seidel, Hans-Peter and Sorkine-Hornung, Olga and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2014.2346432}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2014}, DATE = {2014-12}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VIS)}, VOLUME = {20}, NUMBER = {12}, PAGES = {2585--2594}, BOOKTITLE = {IEEE Visual Analytics Science \& Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014}, DEBUG = {author: Ebert, David; author: Hauser, Helwig; author: Heer, Jeffrey; author: North, Chris; author: Tory, Melanie; author: Qu, Huamin; author: Shen, Han-Wei; author: Ynnerman, Anders}, EDITOR = {Chen, Min}, }
Endnote
%0 Journal Article %A G&#252;nther, David %A Jacobson, Alec %A Reininghaus, Jan %A Seidel, Hans-Peter %A Sorkine-Hornung, Olga %A Weinkauf, Tino %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast and Memory-efficient Topological Denoising of 2D and 3D Scalar Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5349-E %R 10.1109/TVCG.2014.2346432 %7 2014 %D 2014 %J IEEE Transactions on Visualization and Computer Graphics %V 20 %N 12 %& 2585 %P 2585 - 2594 %I IEEE Computer Society %C Los Alamitos, CA %@ false %B IEEE Visual Analytics Science & Technology Conference, IEEE Information Visualization Conference, and IEEE Scientific Visualization Conference Proceedings 2014 %O Proceedings 2014 ; Paris, France, 9&#8211;14 November 2014 IEEE VIS 2014
Guenther, D., Reininghaus, J., Seidel, H.-P., and Weinkauf, T. 2014. Notes on the Simplification of the Morse-Smale Complex. Topological Methods in Data Analysis and Visualization III (TopoInVis 2013), Springer.
Abstract
The Morse-Smale complex can be either explicitly or implicitly represented.<br>Depending on the type of representation, the simplification of the<br>Morse-Smale complex works differently. In the explicit representation,<br>the Morse-Smale complex is directly simplified by explicitly reconnecting<br>the critical points during the simplification. In the implicit representation,<br>on the other hand, the Morse-Smale complex is given by a combinatorial<br>gradient field. In this setting, the simplification changes the combinatorial<br>flow, which yields an indirect simplification of the Morse-Smale<br>complex. The topological complexity of the Morse-Smale complex is<br>reduced in both representations. However, the simplifications generally<br>yield different results. In this paper, we emphasize the differences<br>between these two representations, and provide a high-level discussion<br>about their advantages and limitations.
Export
BibTeX
@inproceedings{guenther13a, TITLE = {Notes on the Simplification of the {Morse}-{Smale} Complex}, AUTHOR = {Guenther, David and Reininghaus, Jan and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISBN = {978-3-319-04098-1}, DOI = {10.1007/978-3-319-04099-8_9}, PUBLISHER = {Springer}, YEAR = {2013}, DATE = {2014}, ABSTRACT = {The Morse-Smale complex can be either explicitly or implicitly represented.<br>Depending on the type of representation, the simplification of the<br>Morse-Smale complex works differently. In the explicit representation,<br>the Morse-Smale complex is directly simplified by explicitly reconnecting<br>the critical points during the simplification. In the implicit representation,<br>on the other hand, the Morse-Smale complex is given by a combinatorial<br>gradient field. In this setting, the simplification changes the combinatorial<br>flow, which yields an indirect simplification of the Morse-Smale<br>complex. The topological complexity of the Morse-Smale complex is<br>reduced in both representations. However, the simplifications generally<br>yield different results. In this paper, we emphasize the differences<br>between these two representations, and provide a high-level discussion<br>about their advantages and limitations.}, BOOKTITLE = {Topological Methods in Data Analysis and Visualization III (TopoInVis 2013)}, EDITOR = {Bremer, Peer-Timo and Hotz, Ingrid and Pascucci, Valerio and Peikert, Ronald}, PAGES = {135--150}, SERIES = {Mathematics and Visualization}, ADDRESS = {Davis, CA, USA}, }
Endnote
%0 Conference Proceedings %A Guenther, David %A Reininghaus, Jan %A Seidel, Hans-Peter %A Weinkauf, Tino %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Notes on the Simplification of the Morse-Smale Complex : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-52F3-3 %R 10.1007/978-3-319-04099-8_9 %D 2014 %B TopoInVis %Z date of event: 2013-03-04 - 2013-03-06 %C Davis, CA, USA %X The Morse-Smale complex can be either explicitly or implicitly represented.<br>Depending on the type of representation, the simplification of the<br>Morse-Smale complex works differently. In the explicit representation,<br>the Morse-Smale complex is directly simplified by explicitly reconnecting<br>the critical points during the simplification. In the implicit representation,<br>on the other hand, the Morse-Smale complex is given by a combinatorial<br>gradient field. In this setting, the simplification changes the combinatorial<br>flow, which yields an indirect simplification of the Morse-Smale<br>complex. The topological complexity of the Morse-Smale complex is<br>reduced in both representations. However, the simplifications generally<br>yield different results. In this paper, we emphasize the differences<br>between these two representations, and provide a high-level discussion<br>about their advantages and limitations. %B Topological Methods in Data Analysis and Visualization III %E Bremer, Peer-Timo; Hotz, Ingrid; Pascucci, Valerio; Peikert, Ronald %P 135 - 150 %I Springer %@ 978-3-319-04098-1 %B Mathematics and Visualization %U https://rdcu.be/dK3QD
Gryaditskaya, Y., Pouli, T., Reinhard, E., and Seidel, H.-P. 2014. Sky Based Light Metering for High Dynamic Range Images. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Abstract
Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel—effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design.
Export
BibTeX
@article{CGF:Gryad:14, TITLE = {Sky Based Light Metering for High Dynamic Range Images}, AUTHOR = {Gryaditskaya, Yulia and Pouli, Tania and Reinhard, Erik and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.12474}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel---effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design.}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {61--69}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Gryaditskaya, Yulia %A Pouli, Tania %A Reinhard, Erik %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Sky Based Light Metering for High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-6C64-1 %R 10.1111/cgf.12474 %7 2014 %D 2014 %X Image calibration requires both linearization of pixel values and scaling so that values in the image correspond to real-world luminances. In this paper we focus on the latter and rather than rely on camera characterization, we calibrate images by analysing their content and metadata, obviating the need for expensive measuring devices or modeling of lens and camera combinations. Our analysis correlates sky pixel values to luminances that would be expected based on geographical metadata. Combined with high dynamic range (HDR) imaging, which gives us linear pixel data, our algorithm allows us to find absolute luminance values for each pixel&#8212;effectively turning digital cameras into absolute light meters. To validate our algorithm we have collected and annotated a calibrated set of HDR images and compared our estimation with several other approaches, showing that our approach is able to more accurately recover absolute luminance. We discuss various applications and demonstrate the utility of our method in the context of calibrated color appearance reproduction and lighting design. %J Computer Graphics Forum %V 33 %N 7 %& 61 %P 61 - 69 %I Wiley-Blackwell %C Oxford, UK %@ false %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Elek, O., Ritschel, T., Dachsbacher, C., and Seidel, H.-P. 2014a. Interactive Light Scattering with Principal-ordinate Propagation. Graphics Interface 2014, Canadian Information Processing Society.
Export
BibTeX
@inproceedings{ElekGI2014, TITLE = {Interactive Light Scattering with Principal-ordinate Propagation}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Dachsbacher, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4822-6003-8}, URL = {https://graphicsinterface.org/proceedings/gi2014/gi2014-11/}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {Graphics Interface 2014}, EDITOR = {Kry, Paul G. and Bunt, Andrea}, PAGES = {87--94}, ADDRESS = {Montreal, Canada}, }
Endnote
%0 Conference Proceedings %A Elek, Oskar %A Ritschel, Tobias %A Dachsbacher, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Light Scattering with Principal-ordinate Propagation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5181-D %U https://graphicsinterface.org/proceedings/gi2014/gi2014-11/ %D 2014 %B Graphics Interface %Z date of event: 2014-05-07 - 2014-05-09 %C Montreal, Canada %B Graphics Interface 2014 %E Kry, Paul G.; Bunt, Andrea %P 87 - 94 %I Canadian Information Processing Society %@ 978-1-4822-6003-8
Elek, O., Bauszat, P., Ritschel, T., Magnor, M., and Seidel, H.-P. 2014b. Progressive Spectral Ray Differentials. VMV 2014 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{ElekVMV2014, TITLE = {Progressive Spectral Ray Differentials}, AUTHOR = {Elek, Oskar and Bauszat, Pablo and Ritschel, Tobias and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905674-74-3}, DOI = {10.2312/vmv.20141288}, PUBLISHER = {Eurographics Association}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {VMV 2014 Vision, Modeling and Visualization}, PAGES = {151--158}, ADDRESS = {Darmstadt, Germany}, }
Endnote
%0 Conference Proceedings %A Elek, Oskar %A Bauszat, Pablo %A Ritschel, Tobias %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Progressive Spectral Ray Differentials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5176-5 %R 10.2312/vmv.20141288 %D 2014 %B 19th International Workshop on Vision, Modeling and Visualization %Z date of event: 2014-10-08 - 2014-10-10 %C Darmstadt, Germany %B VMV 2014 Vision, Modeling and Visualization %P 151 - 158 %I Eurographics Association %@ 978-3-905674-74-3
Elek, O., Ritschel, T., Dachsbacher, C., and Seidel, H.-P. 2014c. Principal-ordinates Propagation for Real-time Rendering of Participating Media. Computers & Graphics45.
Export
BibTeX
@article{ElekCAG2014, TITLE = {Principal-ordinates Propagation for Real-time Rendering of Participating Media}, AUTHOR = {Elek, Oskar and Ritschel, Tobias and Dachsbacher, Carsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2014.08.003}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computers \& Graphics}, VOLUME = {45}, PAGES = {28--39}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Ritschel, Tobias %A Dachsbacher, Carsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Principal-ordinates Propagation for Real-time Rendering of Participating Media : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-516D-C %R 10.1016/j.cag.2014.08.003 %7 2014-09-06 %D 2014 %J Computers & Graphics %V 45 %& 28 %P 28 - 39 %I Elsevier %C Amsterdam %@ false
Elek, O., Bauszat, P., Ritschel, T., Magnor, M., and Seidel, H.-P. 2014d. Spectral Ray Differentials. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2014)33, 4.
Export
BibTeX
@article{Elek2014EGSR, TITLE = {Spectral Ray Differentials}, AUTHOR = {Elek, Oskar and Bauszat, Pablo and Ritschel, Tobias and Magnor, Marcus and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12418}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {33}, NUMBER = {4}, PAGES = {113--122}, BOOKTITLE = {Eurographics Symposium on Rendering 2014}, EDITOR = {Wojciech, Jarosz and Peers, Pieter}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Bauszat, Pablo %A Ritschel, Tobias %A Magnor, Marcus %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Spectral Ray Differentials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-4A77-B %R 10.1111/cgf.12418 %7 2014 %D 2014 %J Computer Graphics Forum %V 33 %N 4 %& 113 %P 113 - 122 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2014 %O Eurographics Symposium on Rendering 2014 EGSR 2014 Lyon, France, June 25th - 27th, 2014
Dabala, L., Kellnhofer, P., Ritschel, T., et al. 2014. Manipulating Refractive and Reflective Binocular Disparity. Computer Graphics Forum (Proc. EUROGRAPHICS 2014)33, 2.
Abstract
Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.
Export
BibTeX
@article{Kellnhofer2014b, TITLE = {Manipulating Refractive and Reflective Binocular Disparity}, AUTHOR = {Dabala, Lukasz and Kellnhofer, Petr and Ritschel, Tobias and Didyk, Piotr and Templin, Krzysztof and Rokita, Przemyslaw and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12290}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {33}, NUMBER = {2}, PAGES = {53--62}, BOOKTITLE = {EUROGRAPHICS 2014}, EDITOR = {L{\'e}vy, Bruno and Kautz, Jan}, }
Endnote
%0 Journal Article %A Dabala, Lukasz %A Kellnhofer, Petr %A Ritschel, Tobias %A Didyk, Piotr %A Templin, Krzysztof %A Rokita, Przemyslaw %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Manipulating Refractive and Reflective Binocular Disparity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0019-EEF9-6 %R 10.1111/cgf.12290 %7 2014-06-01 %D 2014 %X Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes. %J Computer Graphics Forum %V 33 %N 2 %& 53 %P 53 - 62 %I Wiley-Blackwell %C Oxford, UK %B EUROGRAPHICS 2014 %O The European Association for Computer Graphics 35th Annual Conference ; Strasbourg, France, April 7th &#8211; 11th, 2014 EUROGRAPHICS 2014 EG 2014
Brunton, A., Wand, M., Wuhrer, S., Seidel, H.-P., and Weinkauf, T. 2014. A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation. Graphical Models76, 2.
Abstract
Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms.
Export
BibTeX
@article{brunton13, TITLE = {A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation}, AUTHOR = {Brunton, Alan and Wand, Michael and Wuhrer, Stefanie and Seidel, Hans-Peter and Weinkauf, Tino}, LANGUAGE = {eng}, ISSN = {1524-0703}, DOI = {10.1016/j.gmod.2013.11.003}, PUBLISHER = {Academic Press}, ADDRESS = {San Diego, CA}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms.}, JOURNAL = {Graphical Models}, VOLUME = {76}, NUMBER = {2}, PAGES = {70--85}, }
Endnote
%0 Journal Article %A Brunton, Alan %A Wand, Michael %A Wuhrer, Stefanie %A Seidel, Hans-Peter %A Weinkauf, Tino %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Low-dimensional Representation for Robust Partial Isometric Correspondences Computation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F6E9-5 %R 10.1016/j.gmod.2013.11.003 %7 2013-12-15 %D 2014 %X Intrinsic shape matching has become the standard approach for pose invariant correspondence estimation among deformable shapes. Most existing approaches assume global consistency. While global isometric matching is well understood, only a few heuristic solutions are known for partial matching. Partial matching is particularly important for robustness to topological noise, which is a common problem in real-world scanner data. We introduce a new approach to partial isometric matching based on the observation that isometries are fully determined by local information: a map of a single point and its tangent space fixes an isometry. We develop a new representation for partial isometric maps based on equivalence classes of correspondences between pairs of points and their tangent-spaces. We apply our approach to register partial point clouds and compare it to the state-of-the-art methods, where we obtain significant improvements over global methods for real-world data and stronger guarantees than previous partial matching algorithms. %J Graphical Models %V 76 %N 2 %& 70 %P 70 - 85 %I Academic Press %C San Diego, CA %@ false
2013
Wang, Z., Grochulla, M.P., Thormählen, T., and Seidel, H.-P. 2013. 3D Face Template Registration Using Normal Maps. 3DV 2013, International Conference on 3D Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Wang2013, TITLE = {{3D} Face Template Registration Using Normal Maps}, AUTHOR = {Wang, Zhongjie and Grochulla, Martin Peter and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-7695-5067-1}, DOI = {10.1109/3DV.2013.46}, LOCALID = {Local-ID: 220FFD3372EB9C04C1257C6000528BF3-Wang2013}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {3DV 2013, International Conference on 3D Vision}, EDITOR = {Guerrero, Juan E.}, PAGES = {295--302}, ADDRESS = {Seattle, WA, USA}, }
Endnote
%0 Conference Proceedings %A Wang, Zhongjie %A Grochulla, Martin Peter %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Face Template Registration Using Normal Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1CEC-B %R 10.1109/3DV.2013.46 %F OTHER: Local-ID: 220FFD3372EB9C04C1257C6000528BF3-Wang2013 %D 2013 %B International Conference on 3D Vision %Z date of event: 2013-06-29 - 2013-07-01 %C Seattle, WA, USA %B 3DV 2013 %E Guerrero, Juan E. %P 295 - 302 %I IEEE Computer Society %@ 978-0-7695-5067-1
Von Tycowicz, C., Schulz, C., Seidel, H.-P., and Hildebrandt, K. 2013. An Efficient Construction of Reduced Deformable Objects. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2013)32, 6.
Abstract
Many efficient computational methods for physical simulation are based on model <br>reduction. We propose new model reduction techniques for the <br>\emphapproximation of reduced forces} and <br>for the \emph{construction of reduced shape spaces of deformable objects that <br>accelerate<br>the construction of a reduced dynamical system, increase the accuracy<br>of the approximation, and simplify the implementation of model<br>reduction. Based on the techniques, we introduce schemes for real-time<br>simulation of deformable objects and interactive deformation-based editing <br>of triangle or tet meshes. We demonstrate the effectiveness of the new <br>techniques <br>in different experiments with elastic solids and shells and compare them to <br>alternative approaches.
Export
BibTeX
@article{Hildebrandt2013, TITLE = {An Efficient Construction of Reduced Deformable Objects}, AUTHOR = {von Tycowicz, Christoph and Schulz, Christian and Seidel, Hans-Peter and Hildebrandt, Klaus}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2508363.2508392}, LOCALID = {Local-ID: CBFBAC90E4E008EDC1257C240031E997-Hildebrandt2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Many efficient computational methods for physical simulation are based on model <br>reduction. We propose new model reduction techniques for the <br>\emphapproximation of reduced forces} and <br>for the \emph{construction of reduced shape spaces of deformable objects that <br>accelerate<br>the construction of a reduced dynamical system, increase the accuracy<br>of the approximation, and simplify the implementation of model<br>reduction. Based on the techniques, we introduce schemes for real-time<br>simulation of deformable objects and interactive deformation-based editing <br>of triangle or tet meshes. We demonstrate the effectiveness of the new <br>techniques <br>in different experiments with elastic solids and shells and compare them to <br>alternative approaches.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {32}, NUMBER = {6}, PAGES = {1--10}, EID = {213}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2013}, }
Endnote
%0 Journal Article %A von Tycowicz, Christoph %A Schulz, Christian %A Seidel, Hans-Peter %A Hildebrandt, Klaus %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Efficient Construction of Reduced Deformable Objects : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3A34-A %R 10.1145/2508363.2508392 %F OTHER: Local-ID: CBFBAC90E4E008EDC1257C240031E997-Hildebrandt2013 %7 2013 %D 2013 %X Many efficient computational methods for physical simulation are based on model <br>reduction. We propose new model reduction techniques for the <br>\emphapproximation of reduced forces} and <br>for the \emph{construction of reduced shape spaces of deformable objects that <br>accelerate<br>the construction of a reduced dynamical system, increase the accuracy<br>of the approximation, and simplify the implementation of model<br>reduction. Based on the techniques, we introduce schemes for real-time<br>simulation of deformable objects and interactive deformation-based editing <br>of triangle or tet meshes. We demonstrate the effectiveness of the new <br>techniques <br>in different experiments with elastic solids and shells and compare them to <br>alternative approaches. %J ACM Transactions on Graphics %V 32 %N 6 %& 1 %P 1 - 10 %Z sequence number: 213 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2013 %O ACM SIGGRAPH Asia 2013 Hong Kong, 19 - 22 November 2013
Sunkel, M., Jansen, S., Wand, M., and Seidel, H.-P. 2013. A Correlated Parts Model for Object Detection in Large 3D Scans. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Abstract
This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm.
Export
BibTeX
@article{Sunkel2013, TITLE = {A Correlated Parts Model for Object Detection in Large {3D} Scans}, AUTHOR = {Sunkel, Martin and Jansen, Silke and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12040}, LOCALID = {Local-ID: 71E3D133D260E612C1257B0400475765-Sunkel2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {205--214}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, P. and Navazo, I.}, }
Endnote
%0 Journal Article %A Sunkel, Martin %A Jansen, Silke %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Correlated Parts Model for Object Detection in Large 3D Scans : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1CE6-8 %R 10.1111/cgf.12040 %F OTHER: Local-ID: 71E3D133D260E612C1257B0400475765-Sunkel2013 %7 2013-05-06 %D 2013 %X This paper addresses the problem of detecting objects in 3D scans according to object classes learned from sparse user annotation. We model objects belonging to a class by a set of fully correlated parts, encoding dependencies between local shapes of different parts as well as their relative spatial arrangement. For an efficient and comprehensive retrieval of instances belonging to a class of interest, we introduce a new approximate inference scheme and a corresponding planning procedure. We extend our technique to hierarchical composite structures, reducing training effort and modeling spatial relations between detected instances. We evaluate our method on a number of real-world 3D scans and demonstrate its benefits as well as the performance of the new inference algorithm. %J Computer Graphics Forum %V 32 %N 2 %& 205 %P 205 - 214 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EG 2013 EUROGRAPHICS 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th &#8211; 10th, 2013
Scherbaum, K., Petterson, J., Feris, R.S., Blanz, V., and Seidel, H.-P. 2013. Fast Face Detector Training Using Tailored Views. ICCV 2013, IEEE International Conference on Computer Vision, IEEE Computer Society.
Export
BibTeX
@inproceedings{Scherbaum2013, TITLE = {Fast Face Detector Training Using Tailored Views}, AUTHOR = {Scherbaum, Kristina and Petterson, James and Feris, Rogerio S. and Blanz, Volker and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1550-5499}, ISBN = {978-1-4799-2839-2}, DOI = {10.1109/ICCV.2013.354}, LOCALID = {Local-ID: BBE1AD1B44792B41C1257C600050C266-Scherbaum2013}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {ICCV 2013, IEEE International Conference on Computer Vision}, PAGES = {2848--2855}, ADDRESS = {Sydney, Australia}, }
Endnote
%0 Conference Proceedings %A Scherbaum, Kristina %A Petterson, James %A Feris, Rogerio S. %A Blanz, Volker %A Seidel, Hans-Peter %+ Cluster of Excellence Multimodal Computing and Interaction External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Fast Face Detector Training Using Tailored Views : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0019-7AC0-9 %R 10.1109/ICCV.2013.354 %F OTHER: Local-ID: BBE1AD1B44792B41C1257C600050C266-Scherbaum2013 %D 2013 %B IEEE International Conference on Computer Vision %Z date of event: 2013-12-01 - 2013-12-08 %C Sydney, Australia %B ICCV 2013 %P 2848 - 2855 %I IEEE Computer Society %@ false
Reshetouski, I., Manakov, A., Bhandari, A., Raskar, R., Seidel, H.-P., and Ihrke, I. 2013. Discovering the Structure of a Planar Mirror System from Multiple Observations of a Single Point. 2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2013), IEEE Computer Society.
Export
BibTeX
@inproceedings{DBLP:conf/cvpr/ReshetouskiMBRSI13, TITLE = {Discovering the Structure of a Planar Mirror System from Multiple Observations of a Single Point}, AUTHOR = {Reshetouski, Ilya and Manakov, Alkhazur and Bhandari, Ayush and Raskar, Ramesh and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISBN = {978-1-5386-5672-3}, DOI = {10.1109/CVPR.2013.19}, PUBLISHER = {IEEE Computer Society}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2013)}, PAGES = {89--96}, ADDRESS = {Portland, OR, USA}, }
Endnote
%0 Conference Proceedings %A Reshetouski, Ilya %A Manakov, Alkhazur %A Bhandari, Ayush %A Raskar, Ramesh %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Discovering the Structure of a Planar Mirror System from Multiple Observations of a Single Point : %G eng %U http://hdl.handle.net/21.11116/0000-000F-6BFF-B %R 10.1109/CVPR.2013.19 %D 2013 %B 2013 IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2013-06-23 - 2013-06-28 %C Portland, OR, USA %B 2013 IEEE Conference on Computer Vision and Pattern Recognition %P 89 - 96 %I IEEE Computer Society %@ 978-1-5386-5672-3
Reinhard, E., Efros, A., Kautz, J., and Seidel, H.-P. 2013. On Visual Realism of Synthesized Imagery. Proceedings of the IEEE101, 9.
Export
BibTeX
@article{Reinhard2013a, TITLE = {On Visual Realism of Synthesized Imagery}, AUTHOR = {Reinhard, Erik and Efros, Alexei and Kautz, Jan and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0018-9219}, DOI = {10.1109/JPROC.2013.2260711}, LOCALID = {Local-ID: 87D8785C8741C366C1257B820045FF96-Reinhard2013a}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Proceedings of the IEEE}, VOLUME = {101}, NUMBER = {9}, PAGES = {1998--2007}, }
Endnote
%0 Journal Article %A Reinhard, Erik %A Efros, Alexei %A Kautz, Jan %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T On Visual Realism of Synthesized Imagery : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3E31-1 %R 10.1109/JPROC.2013.2260711 %F OTHER: Local-ID: 87D8785C8741C366C1257B820045FF96-Reinhard2013a %7 2013-07-25 %D 2013 %J Proceedings of the IEEE %O Proc. IEEE %V 101 %N 9 %& 1998 %P 1998 - 2007 %I IEEE %C Piscataway, NJ %@ false
Reinert, B., Ritschel, T., and Seidel, H.-P. 2013. Interactive By-example Design of Artistic Packing Layouts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2013)32, 6.
Abstract
We propose an approach to �pack� a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system�s usability.
Export
BibTeX
@article{Reinert2013, TITLE = {Interactive By-example Design of Artistic Packing Layouts}, AUTHOR = {Reinert, Bernhard and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2508363.2508409}, LOCALID = {Local-ID: 7A381077C9181F50C1257C6F004CC475-Reinert2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {We propose an approach to {\diamond}pack{\diamond} a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system{\diamond}s usability.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {32}, NUMBER = {6}, PAGES = {1--7}, EID = {218}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2013}, }
Endnote
%0 Journal Article %A Reinert, Bernhard %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive By-example Design of Artistic Packing Layouts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-18D8-6 %R 10.1145/2508363.2508409 %F OTHER: Local-ID: 7A381077C9181F50C1257C6F004CC475-Reinert2013 %D 2013 %X We propose an approach to &#65533;pack&#65533; a set of two-dimensional graphical primitives into a spatial layout that follows artistic goals. We formalize this process as projecting from a high-dimensional feature space into a 2D layout. Our system does not expose the control of this projection to the user in form of sliders or similar interfaces. Instead, we infer the desired layout of all primitives from interactive placement of a small subset of example primitives. To produce a pleasant distribution of primitives with spatial extend, we produce a pleasant distribution of primitives with spatial extend, we propose a novel generalization of Centroidal Voronoi Tesselation which equalizes the distances between boundaries of nearby primitives. Compared to previous primitive distribution approaches our GPU implementation achieves both better fidelity and asymptotically higher speed. A user study evaluates the system&#65533;s usability. %J ACM Transactions on Graphics %V 32 %N 6 %& 1 %P 1 - 7 %Z sequence number: 218 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2013 %O ACM SIGGRAPH Asia 2013 Hong Kong, 19 - 22 November 2013
Pouli, T., Artusi, A., Banterle, F., Akyüz, A.O., Seidel, H.-P., and Reinhard, E. 2013. Color Correction for Tone Reproduction. 21st Color and Imaging Conference Final Program and Proceedings (CIC 2013), IS&T.
Export
BibTeX
@inproceedings{PouliCIC21, TITLE = {Color Correction for Tone Reproduction}, AUTHOR = {Pouli, Tania and Artusi, Alessandro and Banterle, Francesco and Aky{\"u}z, Ahmet O. and Seidel, Hans-Peter and Reinhard, Erik}, LANGUAGE = {eng}, DOI = {10.2352/CIC.2013.21.1.art00039}, PUBLISHER = {IS\&T}, YEAR = {2013}, DATE = {2013}, BOOKTITLE = {21st Color and Imaging Conference Final Program and Proceedings (CIC 2013)}, PAGES = {215--220}, ADDRESS = {Albuquerque, NM, USA}, }
Endnote
%0 Conference Proceedings %A Pouli, Tania %A Artusi, Alessandro %A Banterle, Francesco %A Aky&#252;z, Ahmet O. %A Seidel, Hans-Peter %A Reinhard, Erik %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Color Correction for Tone Reproduction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-A2C6-E %R 10.2352/CIC.2013.21.1.art00039 %D 2013 %B 21st Color and Imaging Conference %Z date of event: 2013-11-04 - 2013-11-08 %C Albuquerque, NM, USA %B 21st Color and Imaging Conference Final Program and Proceedings %P 215 - 220 %I IS&T
Nguyen, C., Scherzer, D., Ritschel, T., and Seidel, H.-P. 2013. Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Abstract
This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that ``explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require ``diffuse'' or ``glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU.
Export
BibTeX
@article{Nguyen2013, TITLE = {Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization}, AUTHOR = {Nguyen, Chuong and Scherzer, Daniel and Ritschel, Tobias and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12038}, LOCALID = {Local-ID: 4CD3871C310E2855C1257B010065285A-Nguyen2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that ``explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require ``diffuse'' or ``glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {185--194}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, P. and Navazo, I.}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Scherzer, Daniel %A Ritschel, Tobias %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Material Editing in Complex Scenes by Surface Light Field Manipulation and Reflectance Optimization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3810-8 %R 10.1111/cgf.12038 %F OTHER: Local-ID: 4CD3871C310E2855C1257B010065285A-Nguyen2013 %7 2013-05-06 %D 2013 %X This work addresses the challenge of intuitive appearance editing in scenes with complex geometric layout and complex, spatially-varying indirect lighting. In contrast to previous work, that aimed to edit surface reflectance, our system allows a user to freely manipulate the surface light field. It then finds the best surface reflectance that ``explains'' the surface light field manipulation. Instead of classic \mathcal L_2 fitting of reflectance to a combination of incoming and exitant illumination, our system infers a sparse \mathcal L_0 change of shading parameters instead. Consequently, our system does not require ``diffuse'' or ``glossiness'' brushes or any such understanding of the underlying reflectance parametrization. Instead, it infers reflectance changes from scribbles made by a single simple color brush tool alone: Drawing a highlight will increase Phong specular; blurring a mirror reflection will decrease glossiness; etc. A sparse-solver framework operating on a novel point-based, pre-convolved lighting representation in combination with screen-space edit upsampling allows to perform editing interactively on a GPU. %J Computer Graphics Forum %V 32 %N 2 %& 185 %P 185 - 194 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EUROGRAPHICS 2013 EG 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th - 10th, 2013
Milliez, A., Wand, M., Cani, M.-P., and Seidel, H.-P. 2013. Mutable Elastic Models for Sculpting Structured Shapes. Computer Graphics Forum (Proc. EUROGRAPHICS 2013)32, 2.
Export
BibTeX
@article{Milliez2013, TITLE = {Mutable Elastic Models for Sculpting Structured Shapes}, AUTHOR = {Milliez, Antoine and Wand, Michael and Cani, Marie-Paule and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12022}, LOCALID = {Local-ID: 54D78E6C8E10AB4CC1257C130048CEEA-Milliez2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {32}, NUMBER = {2}, PAGES = {21--30}, BOOKTITLE = {EUROGRAPHICS 2013}, EDITOR = {Poulin, Pierre and Navazo, Isabel}, }
Endnote
%0 Journal Article %A Milliez, Antoine %A Wand, Michael %A Cani, Marie-Paule %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Mutable Elastic Models for Sculpting Structured Shapes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3CCE-2 %R 10.1111/cgf.12022 %F OTHER: Local-ID: 54D78E6C8E10AB4CC1257C130048CEEA-Milliez2013 %7 2013-05-06 %D 2013 %J Computer Graphics Forum %V 32 %N 2 %& 21 %P 21 - 30 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2013 %O EUROGRAPHICS 2013 The European Association for Computer Graphics 34th Annual Conference ; Girona, Spain, May 6th - 10th, 2013 EG 2013
Manakov, A., Restrepo, J.F., Klehm, O., et al. 2013. A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2013)32, 4.
Abstract
We propose a non-permanent add-on that enables plenoptic imaging with standard <br>cameras. Our design is based on a physical copying mechanism that multiplies a <br>sensor image into a number of identical copies that still carry the plenoptic <br>information of interest. Via different optical filters, we can then recover the <br>desired information. A minor modification of the design also allows for <br>aperture sub-sampling and, hence, light-field imaging. As the filters in our <br>design are exchangeable, a reconfiguration for different imaging purposes is <br>possible. We show in a prototype setup that high dynamic range, multispectral, <br>polarization, and light-field imaging can be achieved with our design.
Export
BibTeX
@article{Manakov2013, TITLE = {A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging}, AUTHOR = {Manakov, Alkhazur and Restrepo, John F. and Klehm, Oliver and Heged{\"u}s, Ramon and Eisemann, Elmar and Seidel, Hans-Peter and Ihrke, Ivo}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2461912.2461937}, LOCALID = {Local-ID: 2AF094BD6240B2D2C1257C13003B6CBD-Manakov2013}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {We propose a non-permanent add-on that enables plenoptic imaging with standard <br>cameras. Our design is based on a physical copying mechanism that multiplies a <br>sensor image into a number of identical copies that still carry the plenoptic <br>information of interest. Via different optical filters, we can then recover the <br>desired information. A minor modification of the design also allows for <br>aperture sub-sampling and, hence, light-field imaging. As the filters in our <br>design are exchangeable, a reconfiguration for different imaging purposes is <br>possible. We show in a prototype setup that high dynamic range, multispectral, <br>polarization, and light-field imaging can be achieved with our design.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {32}, NUMBER = {4}, PAGES = {1--14}, EID = {47}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2013}, }
Endnote
%0 Journal Article %A Manakov, Alkhazur %A Restrepo, John F. %A Klehm, Oliver %A Heged&#252;s, Ramon %A Eisemann, Elmar %A Seidel, Hans-Peter %A Ihrke, Ivo %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Reconfigurable Camera Add-on for High Dynamic Range, Multi-spectral, Polarization, and Light-field Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3CDD-F %R 10.1145/2461912.2461937 %F OTHER: Local-ID: 2AF094BD6240B2D2C1257C13003B6CBD-Manakov2013 %7 2013 %D 2013 %X We propose a non-permanent add-on that enables plenoptic imaging with standard <br>cameras. Our design is based on a physical copying mechanism that multiplies a <br>sensor image into a number of identical copies that still carry the plenoptic <br>information of interest. Via different optical filters, we can then recover the <br>desired information. A minor modification of the design also allows for <br>aperture sub-sampling and, hence, light-field imaging. As the filters in our <br>design are exchangeable, a reconfiguration for different imaging purposes is <br>possible. We show in a prototype setup that high dynamic range, multispectral, <br>polarization, and light-field imaging can be achieved with our design. %J ACM Transactions on Graphics %V 32 %N 4 %& 1 %P 1 - 14 %Z sequence number: 47 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2013 %O ACM SIGGRAPH 2013 Anaheim, California, 21 - 25 July 2013
Liu, Y., Gall, J., Stoll, C., Dai, Q., Seidel, H.-P., and Theobalt, C. 2013. Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence35, 11.
Export
BibTeX
@article{LiuPami2013, TITLE = {Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation}, AUTHOR = {Liu, Yebin and Gall, J{\"u}rgen and Stoll, Carsten and Dai, Qionghai and Seidel, Hans-Peter and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0162-8828}, DOI = {10.1109/TPAMI.2013.47}, LOCALID = {Local-ID: 3A056CE707FBCCD9C1257C6000533A6F-LiuPami2013}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2013}, DATE = {2013}, JOURNAL = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, VOLUME = {35}, NUMBER = {11}, PAGES = {2720--2735}, }
Endnote
%0 Journal Article %A Liu, Yebin %A Gall, J&#252;rgen %A Stoll, Carsten %A Dai, Qionghai %A Seidel, Hans-Peter %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Markerless Motion Capture of Multiple Characters Using Multiview Image Segmentation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3937-8 %R 10.1109/TPAMI.2013.47 %F OTHER: Local-ID: 3A056CE707FBCCD9C1257C6000533A6F-LiuPami2013 %7 2013-02-21 %D 2013 %J IEEE Transactions on Pattern Analysis and Machine Intelligence %O IEEE Trans. Pattern Anal. Mach. Intell. %V 35 %N 11 %& 2720 %P 2720 - 2735 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Lee, S., Sips, M., and Seidel, H.-P. 2013. Perceptually Driven Visibility Optimization for Categorical Data Visualization. IEEE Transactions on Visualization and Computer Graphics19, 10.
Export
BibTeX
@article{Seidel2013, TITLE = {Perceptually Driven Visibility Optimization for Categorical Data Visualization}, AUTHOR = {Lee, Sungkil and Sips, Mike and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2012.315}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2013}, DATE = {2013}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {19}, NUMBER = {10}, PAGES = {1746--1757}, }
Endnote
%0 Journal Article %A Lee, Sungkil %A Sips, Mike %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually Driven Visibility Optimization for Categorical Data Visualization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0018-A9FB-0 %R 10.1109/TVCG.2012.315 %7 2012-11-30 %D 2013 %J IEEE Transactions on Visualization and Computer Graphics %V 19 %N 10 %& 1746 %P 1746 - 1757 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Kurz, C., Ritschel, T., Eisemann, E., Thormählen, T., and Seidel, H.-P. 2013. Generating Realistic Camera Shake for Virtual Scenes. Journal of Virtual Reality and Broadcasting10, 7.
Abstract
When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible.
Export
BibTeX
@article{Kurz2013, TITLE = {Generating Realistic Camera Shake for Virtual Scenes}, AUTHOR = {Kurz, Christian and Ritschel, Tobias and Eisemann, Elmar and Thorm{\"a}hlen, Thorsten and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1860-2037}, URL = {urn:nbn:de:0009-6-38335}, LOCALID = {Local-ID: 43DB142DAE2CF97AC1257C69005B2D67-Kurz2013}, PUBLISHER = {Hochschulbibliothekszentrum des Landes Nordrhein-Westfalen, K{\"o}ln (HBZ)}, ADDRESS = {K{\"o}ln}, YEAR = {2013}, ABSTRACT = {When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible.}, JOURNAL = {Journal of Virtual Reality and Broadcasting}, VOLUME = {10}, NUMBER = {7}, PAGES = {1--13}, }
Endnote
%0 Journal Article %A Kurz, Christian %A Ritschel, Tobias %A Eisemann, Elmar %A Thorm&#228;hlen, Thorsten %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Generating Realistic Camera Shake for Virtual Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-196D-1 %F OTHER: Local-ID: 43DB142DAE2CF97AC1257C69005B2D67-Kurz2013 %U urn:nbn:de:0009-6-38335 %7 2013 %D 2013 %X When depicting both virtual and physical worlds, the viewer's impression of presence in these worlds is strongly linked to camera motion. Plausible and artist-controlled camera movement can substantially increase scene immersion. While physical camera motion exhibits subtle details of position, rotation, and acceleration, these details are often missing for virtual camera motion. In this work, we analyze camera movement using signal theory. Our system allows us to stylize a smooth user-defined virtual base camera motion by enriching it with plausible details. A key component of our system is a database of videos filmed by physical cameras. These videos are analyzed with a camera-motion estimation algorithm (structure-from-motion) and labeled manually with a specific style. By considering spectral properties of location, orientation and acceleration, our solution learns camera motion details. Consequently, an arbitrary virtual base motion, defined in any conventional animation package, can be automatically modified according to a user-selected style. In an animation package the camera motion base path is typically defined by the user via function curves. Another possibility is to obtain the camera path by using a mixed reality camera in motion capturing studio. As shown in our experiments, the resulting shots are still fully artist-controlled, but appear richer and more physically plausible. %J Journal of Virtual Reality and Broadcasting %V 10 %N 7 %& 1 %P 1 - 13 %I Hochschulbibliothekszentrum des Landes Nordrhein-Westfalen, K&#246;ln (HBZ) %C K&#246;ln %@ false %U http://www.jvrb.org/past-issues/10.2013/3833/1020137.pdf
Klehm, O., Ihrke, I., Seidel, H.-P., and Eisemann, E. 2013. Volume Stylizer: Tomography-based Volume Painting. Proceedings I3D 2013, ACM.
Abstract
Volumetric phenomena are an integral part of standard rendering, yet, no <br>suitable tools to edit characteristic properties are available so far.<br>Either simulation results are used directly, or modifications are high-level, <br>e.g., noise functions to influence appearance. Intuitive artistic control is <br>not possible.<br><br>We propose a solution to stylize single-scattering volumetric effects. <br>Emission, scattering and extinction become amenable to artistic control while <br>preserving a smooth and coherent appearance when changing the viewpoint.<br>Our approach lets the user define a number of target views to be matched when <br>observing the volume from this perspective. Via an analysis of the volumetric <br>rendering equation, we can show how to link this problem to tomographic <br>reconstruction.
Export
BibTeX
@inproceedings{i3dKlehm2013, TITLE = {Volume Stylizer: {Tomography-based} Volume Painting}, AUTHOR = {Klehm, Oliver and Ihrke, Ivo and Seidel, Hans-Peter and Eisemann, Elmar}, LANGUAGE = {eng}, ISBN = {978-1-4503-1956-0}, DOI = {10.1145/2448196.2448222}, LOCALID = {Local-ID: A0B42A95204F2B1EC1257B03005B313A-i3dKlehm2013}, PUBLISHER = {ACM}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Volumetric phenomena are an integral part of standard rendering, yet, no <br>suitable tools to edit characteristic properties are available so far.<br>Either simulation results are used directly, or modifications are high-level, <br>e.g., noise functions to influence appearance. Intuitive artistic control is <br>not possible.<br><br>We propose a solution to stylize single-scattering volumetric effects. <br>Emission, scattering and extinction become amenable to artistic control while <br>preserving a smooth and coherent appearance when changing the viewpoint.<br>Our approach lets the user define a number of target views to be matched when <br>observing the volume from this perspective. Via an analysis of the volumetric <br>rendering equation, we can show how to link this problem to tomographic <br>reconstruction.}, BOOKTITLE = {Proceedings I3D 2013}, EDITOR = {Olano, Marc and Otaduy, Miguel A. and Meenakshisundaram, Gopi and Yoon, Sung-Eui and Spencer, Stephen N.}, PAGES = {161--168}, ADDRESS = {Orlando, FL, USA}, }
Endnote
%0 Conference Proceedings %A Klehm, Oliver %A Ihrke, Ivo %A Seidel, Hans-Peter %A Eisemann, Elmar %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Volume Stylizer: Tomography-based Volume Painting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3817-9 %R 10.1145/2448196.2448222 %F OTHER: Local-ID: A0B42A95204F2B1EC1257B03005B313A-i3dKlehm2013 %D 2013 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2013-03-21 - 2013-03-23 %C Orlando, FL, USA %X Volumetric phenomena are an integral part of standard rendering, yet, no <br>suitable tools to edit characteristic properties are available so far.<br>Either simulation results are used directly, or modifications are high-level, <br>e.g., noise functions to influence appearance. Intuitive artistic control is <br>not possible.<br><br>We propose a solution to stylize single-scattering volumetric effects. <br>Emission, scattering and extinction become amenable to artistic control while <br>preserving a smooth and coherent appearance when changing the viewpoint.<br>Our approach lets the user define a number of target views to be matched when <br>observing the volume from this perspective. Via an analysis of the volumetric <br>rendering equation, we can show how to link this problem to tomographic <br>reconstruction. %B Proceedings I3D 2013 %E Olano, Marc; Otaduy, Miguel A.; Meenakshisundaram, Gopi; Yoon, Sung-Eui; Spencer, Stephen N. %P 161 - 168 %I ACM %@ 978-1-4503-1956-0
Kerber, J., Bokeloh, M., Wand, M., and Seidel, H.-P. 2013. Scalable Symmetry Detection for Urban Scenes. Computer Graphics Forum32, 1.
Export
BibTeX
@article{Kerber2013_1, TITLE = {Scalable Symmetry Detection for Urban Scenes}, AUTHOR = {Kerber, Jens and Bokeloh, Martin and Wand, Michael and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2012.03226.x}, LOCALID = {Local-ID: FC00BBDD131C5BC2C1257AED003BCDC9-Kerber2013_1}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum}, VOLUME = {32}, NUMBER = {1}, PAGES = {3--15}, }
Endnote
%0 Journal Article %A Kerber, Jens %A Bokeloh, Martin %A Wand, Michael %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Scalable Symmetry Detection for Urban Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-17F3-0 %R 10.1111/j.1467-8659.2012.03226.x %F OTHER: Local-ID: FC00BBDD131C5BC2C1257AED003BCDC9-Kerber2013_1 %7 2012-10-09 %D 2013 %J Computer Graphics Forum %V 32 %N 1 %& 3 %P 3 - 15 %I Wiley-Blackwell %C Oxford, UK
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2013. Optimizing Disparity for Motion in Depth. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2013)32, 4.
Abstract
Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion.
Export
BibTeX
@article{Kellnhofer2013, TITLE = {Optimizing Disparity for Motion in Depth}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12160}, LOCALID = {Local-ID: AAA9E8B7CDD4AD1FC1257BFD004E5D30-Kellnhofer2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion.}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {32}, NUMBER = {4}, PAGES = {143--152}, BOOKTITLE = {Eurographics Symposium on Rendering 2013}, EDITOR = {Holzschuch, N. and Rusinkiewicz, S.}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimizing Disparity for Motion in Depth : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-