Last Year

Article
Ansari, N., Alizadeh-Mousavi, O., Seidel, H.-P., and Babaei, V. 2020. Mixed Integer Ink Selection for Spectral Reproduction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Ansari_ToG2020, TITLE = {Mixed Integer Ink Selection for Spectral Reproduction}, AUTHOR = {Ansari, Navid and Alizadeh-Mousavi, Omid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417761}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {255}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Ansari, Navid %A Alizadeh-Mousavi, Omid %A Seidel, Hans-Peter %A Babaei, Vahid %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed Integer Ink Selection for Spectral Reproduction : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B23-3 %R 10.1145/3414685.3417761 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 255 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %I ACM %C New York, NY
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020a. X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Bemana2020, TITLE = {X-{F}ields: {I}mplicit Neural View-, Light- and Time-Image Interpolation}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417827}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {257}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation : %G eng %U http://hdl.handle.net/21.11116/0000-0006-FBF0-0 %R 10.1145/3414685.3417827 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 257 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %I ACM %C New York, NY
Çoğalan, U. and Akyüz, A.O. 2020. Deep Joint Deinterlacing and Denoising for Single Shot Dual-ISO HDR Reconstruction. IEEE Transactions on Image Processing29.
Export
BibTeX
@article{Cogalan2020, TITLE = {Deep Joint Deinterlacing and Denoising for Single Shot Dual-{ISO HDR} Reconstruction}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Aky{\"u}z, Ahmet O{\u g}uz}, LANGUAGE = {eng}, ISSN = {1057-7149}, DOI = {10.1109/TIP.2020.3004014}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, JOURNAL = {IEEE Transactions on Image Processing}, VOLUME = {29}, PAGES = {7511--7524}, }
Endnote
%0 Journal Article %A Çoğalan, Uğur %A Akyüz , Ahmet Oğuz %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Joint Deinterlacing and Denoising for Single Shot Dual-ISO HDR Reconstruction : %G eng %U http://hdl.handle.net/21.11116/0000-0006-DCA7-6 %R 10.1109/TIP.2020.3004014 %7 2020 %D 2020 %J IEEE Transactions on Image Processing %V 29 %& 7511 %P 7511 - 7524 %I IEEE %C Piscataway, NJ %@ false
Cucerca, S., Didyk, P., Seidel, H.-P., and Babaei, V. 2020. Computational Image Marking on Metals via Laser Induced Heating. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2020)39, 4.
Export
BibTeX
@article{Cucerca_SIGGRAPH2020, TITLE = {Computational Image Marking on Metals via Laser Induced Heating}, AUTHOR = {Cucerca, Sebastian and Didyk, Piotr and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392423}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {39}, NUMBER = {4}, EID = {70}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2020}, }
Endnote
%0 Journal Article %A Cucerca, Sebastian %A Didyk, Piotr %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Computational Image Marking on Metals via Laser Induced Heating : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9664-F %R 10.1145/3386569.3392423 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %Z sequence number: 70 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2020 %O ACM SIGGRAPH 2020 Virtual Conference ; 2020, 17-28 August
Egger, B., Smith, W.A.P., Tewari, A., et al. 2020. 3D Morphable Face Models -Past, Present and Future. ACM Transactions on Graphics39, 5.
Export
BibTeX
@article{Egger_TOG2020, TITLE = {{3D} Morphable Face Models -- Past, Present and Future}, AUTHOR = {Egger, Bernhard and Smith, William A. P. and Tewari, Ayush and Wuhrer, Stefanie and Zollh{\"o}fer, Michael and Beeler, Thabo and Bernard, Florian and Bolkart, Timo and Kortylewski, Adam and Romdhani, Sami and Theobalt, Christian and Blanz, Volker and Vetter, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3395208}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {39}, NUMBER = {5}, EID = {157}, }
Endnote
%0 Journal Article %A Egger, Bernhard %A Smith, William A. P. %A Tewari, Ayush %A Wuhrer, Stefanie %A Zollhöfer, Michael %A Beeler, Thabo %A Bernard, Florian %A Bolkart, Timo %A Kortylewski, Adam %A Romdhani, Sami %A Theobalt, Christian %A Blanz, Volker %A Vetter, Thomas %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T 3D Morphable Face Models -Past, Present and Future : %G eng %U http://hdl.handle.net/21.11116/0000-0007-1CF5-6 %R 10.1145/3395208 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 5 %Z sequence number: 157 %I ACM %C New York, NY %@ false
Elgharib, M., Mendiratta, M., Thies, J., et al. 2020. Egocentric Videoconferencing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Elgharib_ToG2020, TITLE = {Egocentric Videoconferencing}, AUTHOR = {Elgharib, Mohamed and Mendiratta, Mohit and Thies, Justus and Nie{\ss}ner, Matthias and Seidel, Hans-Peter and Tewari, Ayush and Golyanik, Vladislav and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417808}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {268}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Elgharib, Mohamed %A Mendiratta, Mohit %A Thies, Justus %A Nießner, Matthias %A Seidel, Hans-Peter %A Tewari, Ayush %A Golyanik, Vladislav %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Egocentric Videoconferencing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B36-E %R 10.1145/3414685.3417808 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 268 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %I ACM %C New York, NY
Günther, F., Jiang, C., and Pottmann, H. 2020. Smooth Polyhedral Surfaces. Advances in Mathematics363.
(arXiv: 1703.05318)
Abstract
Polyhedral surfaces are fundamental objects in architectural geometry and industrial design. Whereas closeness of a given mesh to a smooth reference surface and its suitability for numerical simulations were already studied extensively, the aim of our work is to find and to discuss suitable assessments of smoothness of polyhedral surfaces that only take the geometry of the polyhedral surface itself into account. Motivated by analogies to classical differential geometry, we propose a theory of smoothness of polyhedral surfaces including suitable notions of normal vectors, tangent planes, asymptotic directions, and parabolic curves that are invariant under projective transformations. It is remarkable that seemingly mild conditions significantly limit the shapes of faces of a smooth polyhedral surface. Besides being of theoretical interest, we believe that smoothness of polyhedral surfaces is of interest in the architectural context, where vertices and edges of polyhedral surfaces are highly visible.
Export
BibTeX
@article{Guenther2020, TITLE = {Smooth Polyhedral Surfaces}, AUTHOR = {G{\"u}nther, Felix and Jiang, Caigui and Pottmann, Helmut}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1703.05318}, DOI = {10.1016/j.aim.2020.107004}, EPRINT = {1703.05318}, EPRINTTYPE = {arXiv}, PUBLISHER = {Elsevier}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, ABSTRACT = {Polyhedral surfaces are fundamental objects in architectural geometry and industrial design. Whereas closeness of a given mesh to a smooth reference surface and its suitability for numerical simulations were already studied extensively, the aim of our work is to find and to discuss suitable assessments of smoothness of polyhedral surfaces that only take the geometry of the polyhedral surface itself into account. Motivated by analogies to classical differential geometry, we propose a theory of smoothness of polyhedral surfaces including suitable notions of normal vectors, tangent planes, asymptotic directions, and parabolic curves that are invariant under projective transformations. It is remarkable that seemingly mild conditions significantly limit the shapes of faces of a smooth polyhedral surface. Besides being of theoretical interest, we believe that smoothness of polyhedral surfaces is of interest in the architectural context, where vertices and edges of polyhedral surfaces are highly visible.}, JOURNAL = {Advances in Mathematics}, VOLUME = {363}, EID = {107004}, }
Endnote
%0 Journal Article %A Günther, Felix %A Jiang, Caigui %A Pottmann, Helmut %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Smooth Polyhedral Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0006-9760-3 %U http://arxiv.org/abs/1703.05318 %R 10.1016/j.aim.2020.107004 %D 2020 %* Review method: peer-reviewed %X Polyhedral surfaces are fundamental objects in architectural geometry and industrial design. Whereas closeness of a given mesh to a smooth reference surface and its suitability for numerical simulations were already studied extensively, the aim of our work is to find and to discuss suitable assessments of smoothness of polyhedral surfaces that only take the geometry of the polyhedral surface itself into account. Motivated by analogies to classical differential geometry, we propose a theory of smoothness of polyhedral surfaces including suitable notions of normal vectors, tangent planes, asymptotic directions, and parabolic curves that are invariant under projective transformations. It is remarkable that seemingly mild conditions significantly limit the shapes of faces of a smooth polyhedral surface. Besides being of theoretical interest, we believe that smoothness of polyhedral surfaces is of interest in the architectural context, where vertices and edges of polyhedral surfaces are highly visible. %K Mathematics, Metric Geometry, Mathematics, Differential Geometry %J Advances in Mathematics %O Adv. Math. %V 363 %Z sequence number: 107004 %I Elsevier
Mehta, D., Sotnychenko, O., Mueller, F., et al. 2020. XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2020)39, 4.
Export
BibTeX
@article{Mehta_TOG2020, TITLE = {{XNect}: {R}eal-time Multi-person {3D} Human Pose Estimation with a Single {RGB} Camera}, AUTHOR = {Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392410}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {39}, NUMBER = {4}, EID = {82}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2020}, }
Endnote
%0 Journal Article %A Mehta, Dushyant %A Sotnychenko, Oleksandr %A Mueller, Franziska %A Xu, Weipeng %A Elgharib, Mohamed %A Fua, Pascal %A Seidel, Hans-Peter %A Rhodin, Helge %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera : %G eng %U http://hdl.handle.net/21.11116/0000-0007-832D-3 %R 10.1145/3386569.3392410 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %Z sequence number: 82 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2020 %O ACM SIGGRAPH 2020 Virtual Conference ; 2020, 17-28 August
Meka, A., Pandey, R., Häne, C., et al. 2020. Deep Relightable Textures Volumetric Performance Capture with Neural Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Meka_ToG2020, TITLE = {Deep Relightable Textures Volumetric Performance Capture with Neural Rendering}, AUTHOR = {Meka, Abhimitra and Pandey, Rohit and H{\"a}ne, Christian and Orts-Escolano, Sergio and Barnum, Peter and David-Son, Philip and Erickson, Daniel and Zhang, Yinda and Taylor, Jonathan and Bouaziz, Sofien and Legendre, Chloe and Ma, Wan-Chun and Overbeck, Ryan and Beeler, Thabo and Debevec, Paul and Izadi, Shahram and Theobalt, Christian and Rhemann, Christoph and Fanello, Sean}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417814}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {259}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Meka, Abhimitra %A Pandey, Rohit %A Häne, Christian %A Orts-Escolano, Sergio %A Barnum, Peter %A David-Son, Philip %A Erickson, Daniel %A Zhang, Yinda %A Taylor, Jonathan %A Bouaziz, Sofien %A Legendre, Chloe %A Ma, Wan-Chun %A Overbeck, Ryan %A Beeler, Thabo %A Debevec, Paul %A Izadi, Shahram %A Theobalt, Christian %A Rhemann, Christoph %A Fanello, Sean %+ External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Deep Relightable Textures Volumetric Performance Capture with Neural Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0007-A6FA-4 %R 10.1145/3414685.3417814 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 259 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %I ACM %C New York, NY %U https://dl.acm.org/doi/pdf/10.1145/3414685.3417814
Mlakar, D., Winter, M., Stadlbauer, P., Seidel, H.-P., Steinberger, M., and Zayer, R. 2020. Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the GPU. Computer Graphics Forum (Proc. EUROGRAPHICS 2020)39, 2.
Export
BibTeX
@article{Mlakar_EG2020, TITLE = {Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the {GPU}}, AUTHOR = {Mlakar, Daniel and Winter, M. and Stadlbauer, Pascal and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13934}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {39}, NUMBER = {2}, PAGES = {335--349}, BOOKTITLE = {The European Association for Computer Graphics 41st Annual Conference (EUROGRAPHICS 2020)}, EDITOR = {Panozzo, Daniele and Assarsson, Ulf}, }
Endnote
%0 Journal Article %A Mlakar, Daniel %A Winter, M. %A Stadlbauer, Pascal %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Subdivision-Specialized Linear Algebra Kernels for Static and Dynamic Mesh Connectivity on the GPU : %G eng %U http://hdl.handle.net/21.11116/0000-0006-DB80-2 %R 10.1111/cgf.13934 %7 2020 %D 2020 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 39 %N 2 %& 335 %P 335 - 349 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 41st Annual Conference %O EUROGRAPHICS 2020 EG 2020 The European Association for Computer Graphics 41st Annual Conference ; Norrköping, Sweden, May 25 – 29, 2020
Piovarči, M., Foshey, M., Babaei, V., Rusinkiewicz, S., Matusik, W., and Didyk, P. 2020. Towards Spatially Varying Gloss Reproduction for 3D Printing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Piovarci_ToG2020, TITLE = {Towards Spatially Varying Gloss Reproduction for {3D} Printing}, AUTHOR = {Piovar{\v c}i, Michal and Foshey, Michael and Babaei, Vahid and Rusinkiewicz, Szymon and Matusik, Wojciech and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417850}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {206}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Piovarči, Michal %A Foshey, Michael %A Babaei, Vahid %A Rusinkiewicz, Szymon %A Matusik, Wojciech %A Didyk, Piotr %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Towards Spatially Varying Gloss Reproduction for 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-A6FE-0 %R 10.1145/3414685.3417850 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 206 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %I ACM %C New York, NY
Saberpour, A., Hersch, R.D., Fang, J., Zayer, R., Seidel, H.-P., and Babaei, V. 2020. Fabrication of Moiré on Curved Surfaces. Optics Express28, 13.
Export
BibTeX
@article{Saberpour2020, TITLE = {Fabrication of Moir{\'e} on Curved Surfaces}, AUTHOR = {Saberpour, Artin and Hersch, Roger D. and Fang, Jiajing and Zayer, Rhaleb and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {1094-4087}, DOI = {10.1364/OE.393843}, PUBLISHER = {Optical Society of America}, ADDRESS = {Washington, DC}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, JOURNAL = {Optics Express}, VOLUME = {28}, NUMBER = {13}, PAGES = {19413--19427}, }
Endnote
%0 Journal Article %A Saberpour, Artin %A Hersch, Roger D. %A Fang, Jiajing %A Zayer, Rhaleb %A Seidel, Hans-Peter %A Babaei, Vahid %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Fabrication of Moiré on Curved Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-0006-D39D-B %R 10.1364/OE.393843 %7 2020 %D 2020 %J Optics Express %O Opt. Express %V 28 %N 13 %& 19413 %P 19413 - 19427 %I Optical Society of America %C Washington, DC %@ false
Serrano, A., Martin, D., Gutierrez, D., Myszkowski, K., and Masia, B. 2020. Imperceptible Manipulation of Lateral Camera Motion for Improved Virtual Reality Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Serrano2020, TITLE = {Imperceptible Manipulation of Lateral Camera Motion for Improved Virtual Reality Applications}, AUTHOR = {Serrano, Ana and Martin, Daniel and Gutierrez, Diego and Myszkowski, Karol and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417773}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {268}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Martin, Daniel %A Gutierrez, Diego %A Myszkowski, Karol %A Masia, Belen %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Imperceptible Manipulation of Lateral Camera Motion for Improved Virtual Reality Applications : %G eng %U http://hdl.handle.net/21.11116/0000-0006-FBE8-A %R 10.1145/3414685.3417773 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 268 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %I ACM %C New York, NY
Shimada, S., Golyanik, V., Xu, W., and Theobalt, C. 2020. PhysCap: Physically Plausible Monocular 3D Motion Capture in Real Time. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Piovarci_ToG2020, TITLE = {{PhysCap}: {P}hysically Plausible Monocular {3D} Motion Capture in Real Time}, AUTHOR = {Shimada, Soshi and Golyanik, Vladislav and Xu, Weipeng and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417877}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {235}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Shimada, Soshi %A Golyanik, Vladislav %A Xu, Weipeng %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T PhysCap: Physically Plausible Monocular 3D Motion Capture in Real Time : %G eng %U http://hdl.handle.net/21.11116/0000-0007-A709-3 %R 10.1145/3414685.3417877 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 235 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %I ACM %C New York, NY
Singh, G., Subr, K., Coeurjolly, D., Ostromoukhov, V., and Jarosz, W. 2020. Fourier Analysis of Correlated Monte Carlo Importance Sampling. Computer Graphics Forum39, 1.
Export
BibTeX
@article{SinghCGF2020, TITLE = {Fourier Analysis of Correlated {Monte Carlo} Importance Sampling}, AUTHOR = {Singh, Gurprit and Subr, Kartic and Coeurjolly, David and Ostromoukhov, Victor and Jarosz, Wojciech}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13613}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, JOURNAL = {Computer Graphics Forum}, VOLUME = {39}, NUMBER = {1}, PAGES = {7--19}, }
Endnote
%0 Journal Article %A Singh, Gurprit %A Subr, Kartic %A Coeurjolly, David %A Ostromoukhov, Victor %A Jarosz, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations %T Fourier Analysis of Correlated Monte Carlo Importance Sampling : %G eng %U http://hdl.handle.net/21.11116/0000-0006-978D-1 %R 10.1111/cgf.13613 %7 2020 %D 2020 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 39 %N 1 %& 7 %P 7 - 19 %I Blackwell-Wiley %C Oxford %@ false
Stadlbauer, P., Mlakar, D., Seidel, H.-P., Steinberger, M., and Zayer, R. 2020. Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing. Computer Graphics Forum (Proc. EUROGRAPHICS 2020)39, 2.
Export
BibTeX
@article{Stadlbauer_EG2020, TITLE = {Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing}, AUTHOR = {Stadlbauer, Pascal and Mlakar, Daniel and Seidel, Hans-Peter and Steinberger, Markus and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13929}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {39}, NUMBER = {2}, PAGES = {277--289}, BOOKTITLE = {The European Association for Computer Graphics 41st Annual Conference (EUROGRAPHICS 2020)}, EDITOR = {Panozzo, Daniele and Assarsson, Ulf}, }
Endnote
%0 Journal Article %A Stadlbauer, Pascal %A Mlakar, Daniel %A Seidel, Hans-Peter %A Steinberger, Markus %A Zayer, Rhaleb %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Modeling of Cellular Structures on Surfaces with Application to Additive Manufacturing : %G eng %U http://hdl.handle.net/21.11116/0000-0006-DB8A-8 %R 10.1111/cgf.13929 %7 2020 %D 2020 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 39 %N 2 %& 277 %P 277 - 289 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 41st Annual Conference %O EUROGRAPHICS 2020 EG 2020 The European Association for Computer Graphics 41st Annual Conference ; Norrköping, Sweden, May 25 – 29, 2020
Sultan, A.S., Elgharib, M., Tavares, T., Jessri, M., and Basile, J.R. 2020. The Use of Artificial Intelligence, Machine Learning and Deep Learning in Oncologic Histopathology. Journal of Oral Pathology & Medicine.
Export
BibTeX
@article{Sultan2020, TITLE = {The Use of Artificial Intelligence, Machine Learning and Deep Learning in Oncologic Histopathology}, AUTHOR = {Sultan, Ahmed S. and Elgharib, Mohamed and Tavares, Tiffany and Jessri, Maryam and Basile, John R.}, LANGUAGE = {eng}, ISSN = {0904-2512}, DOI = {10.1111/jop.13042}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {Journal of Oral Pathology \& Medicine}, }
Endnote
%0 Journal Article %A Sultan, Ahmed S. %A Elgharib, Mohamed %A Tavares, Tiffany %A Jessri, Maryam %A Basile, John R. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T The Use of Artificial Intelligence, Machine Learning and Deep Learning in Oncologic Histopathology : %G eng %U http://hdl.handle.net/21.11116/0000-0006-A2C9-0 %R 10.1111/jop.13042 %7 2020 %D 2020 %J Journal of Oral Pathology & Medicine %I Wiley-Blackwell %C Oxford %@ false
Tewari, A., Fried, O., Thies, J., et al. 2020a. State of the Art on Neural Rendering. Computer Graphics Forum (Proc. EUROGRAPHICS 2020)39, 2.
Export
BibTeX
@article{Tewari_EG2020, TITLE = {State of the Art on Neural Rendering}, AUTHOR = {Tewari, Ayush and Fried, O. and Thies, J. and Sitzmann, V. and Lombardi, S. and Sunkavalli, K. and Martin-Brualla, R. and Simon, T. and Saragih, J. and Nie{\ss}ner, M. and Pandey, R. and Fanello, S. and Wetzstein, G. and Zhu, J.-Y. and Theobalt, Christian and Agrawala, M. and Shechtman, E. and Goldman, D. B and Zollh{\"o}fer, Michael}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14022}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {39}, NUMBER = {2}, PAGES = {701--727}, BOOKTITLE = {The European Association for Computer Graphics 41st Annual Conference (EUROGRAPHICS 2020)}, EDITOR = {Panozzo, Daniele and Assarsson, Ulf}, }
Endnote
%0 Journal Article %A Tewari, Ayush %A Fried, O. %A Thies, J. %A Sitzmann, V. %A Lombardi, S. %A Sunkavalli, K. %A Martin‐Brualla, R. %A Simon, T. %A Saragih, J. %A Nießner, M. %A Pandey, R. %A Fanello, S. %A Wetzstein, G. %A Zhu, J.‐Y. %A Theobalt, Christian %A Agrawala, M. %A Shechtman, E. %A Goldman, D. B %A Zollhöfer, Michael %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations %T State of the Art on Neural Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0006-DB93-D %R 10.1111/cgf.14022 %7 2020 %D 2020 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 39 %N 2 %& 701 %P 701 - 727 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 41st Annual Conference %O EUROGRAPHICS 2020 EG 2020 The European Association for Computer Graphics 41st Annual Conference ; Norrköping, Sweden, May 25 – 29, 2020
Tewari, A., Elgharib, M., Mallikarjun B R, et al. 2020b. PIE: Portrait Image Embedding for Semantic Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Tewari_ToG2020, TITLE = {{PIE}: {P}ortrait Image Embedding for Semantic Control}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Mallikarjun B R, and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417803}, PUBLISHER = {ACM}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {223}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Tewari, Ayush %A Elgharib, Mohamed %A Mallikarjun B R, %A Bernard, Florian %A Seidel, Hans-Peter %A Pérez, Patrick %A Zollhöfer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T PIE: Portrait Image Embedding for Semantic Control : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B0C-E %R 10.1145/3414685.3417803 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 223 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %I ACM %C New York, NY
Wang, P., Liu, L., Chen, N., Chu, H.-K., Theobalt, C., and Wang, W. 2020. Vid2Curve: Simultaneous Camera Motion Estimation and Thin Structure Reconstruction from an RGB Video. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2020)39, 4.
Export
BibTeX
@article{Wang_SIGGRAPH2020, TITLE = {{Vid2Curve}: {S}imultaneous Camera Motion Estimation and Thin Structure Reconstruction from an {RGB} Video}, AUTHOR = {Wang, Peng and Liu, Lingjie and Chen, Nenglun and Chu, Hung-Kuo and Theobalt, Christian and Wang, Wenping}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3386569.3392476}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {39}, NUMBER = {4}, EID = {132}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2020}, }
Endnote
%0 Journal Article %A Wang, Peng %A Liu, Lingjie %A Chen, Nenglun %A Chu, Hung-Kuo %A Theobalt, Christian %A Wang, Wenping %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Vid2Curve: Simultaneous Camera Motion Estimation and Thin Structure Reconstruction from an RGB Video : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9A74-9 %R 10.1145/3386569.3392476 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 4 %Z sequence number: 132 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2020 %O ACM SIGGRAPH 2020 Virtual Conference ; 2020, 17-28 August
Zheng, Q., Babaei, V., Wetzstein, G., Seidel, H.-P., Zwicker, M., and Singh, G. 2020. Neural Light Field 3D Printing. ACM Transactions on Graphics (Proc. SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Zheng_TOG2020, TITLE = {Neural Light Field {3D} Printing}, AUTHOR = {Zheng, Quan and Babaei, Vahid and Wetzstein, Gordon and Seidel, Hans-Peter and Zwicker, Matthias and Singh, Gurprit}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {207}, BOOKTITLE = {Proceedings of the SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Zheng, Quan %A Babaei, Vahid %A Wetzstein, Gordon %A Seidel, Hans-Peter %A Zwicker, Matthias %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Light Field 3D Printing : %U http://hdl.handle.net/21.11116/0000-0007-9AA8-E %R 10.1145/3414685.3417879 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 207 %I ACM %C New York, NY %@ false %B Proceedings of the SIGGRAPH Asia 2020 %O SIGGRAPH Asia 2020 SA'20 SA 2020
Conference Paper
Bhatnagar, B.L., Sminchisescu, C., Theobalt, C., and Pons-Moll, G. 2020a. Combining Implicit Function Learning and Parametric Models for 3D Human Reconstruction. Computer Vision -- ECCV 2020, Springer.
Export
BibTeX
@inproceedings{bhatnagar2020ipnet, TITLE = {Combining Implicit Function Learning and Parametric Models for {3D} Human Reconstruction}, AUTHOR = {Bhatnagar, Bharat Lal and Sminchisescu, Cristian and Theobalt, Christian and Pons-Moll, Gerard}, LANGUAGE = {eng}, ISBN = {978-3-030-58535-8}, DOI = {10.1007/978-3-030-58536-5_19}, PUBLISHER = {Springer}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, BOOKTITLE = {Computer Vision -- ECCV 2020}, EDITOR = {Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael}, PAGES = {311--329}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {12347}, ADDRESS = {Glasgow, UK}, }
Endnote
%0 Conference Proceedings %A Bhatnagar, Bharat Lal %A Sminchisescu, Cristian %A Theobalt, Christian %A Pons-Moll, Gerard %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Combining Implicit Function Learning and Parametric Models for 3D Human Reconstruction : %G eng %U http://hdl.handle.net/21.11116/0000-0006-E89E-3 %R 10.1007/978-3-030-58536-5_19 %D 2020 %B 16th European Conference on Computer Vision %Z date of event: 2020-08-23 - 2020-08-28 %C Glasgow, UK %B Computer Vision -- ECCV 2020 %E Vedaldi, Andrea; Bischof, Horst; Brox, Thomas; Frahm, Jan-Michael %P 311 - 329 %I Springer %@ 978-3-030-58535-8 %B Lecture Notes in Computer Science %N 12347
Bhatnagar, B.L., Sminchisescu, C., Theobalt, C., and Pons-Moll, G. 2020b. LoopReg: Self-supervised Learning of Implicit Surface Correspondences, Pose and Shape for 3D Human Mesh Registration. Advances in Neural Information Processing Systems 33 (NIPS 2020), Curran Associates, Inc.
Export
BibTeX
@inproceedings{bhatnagar2020loopreg, TITLE = {{LoopReg}: Self-supervised Learning of Implicit Surface Correspondences, Pose and Shape for {3D} Human Mesh Registration}, AUTHOR = {Bhatnagar, Bharat Lal and Sminchisescu, Cristian and Theobalt, Christian and Pons-Moll, Gerard}, LANGUAGE = {eng}, PUBLISHER = {Curran Associates, Inc.}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Advances in Neural Information Processing Systems 33 (NIPS 2020)}, EDITOR = {Larochelle, H. and Ranzato, M. and Hadsell, R. and Balcan, M. F. and Lin, H.}, ADDRESS = {Virtual Event}, }
Endnote
%0 Conference Proceedings %A Bhatnagar, Bharat Lal %A Sminchisescu, Cristian %A Theobalt, Christian %A Pons-Moll, Gerard %+ Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T LoopReg: Self-supervised Learning of Implicit Surface Correspondences, Pose and Shape for 3D Human Mesh Registration : %G eng %U http://hdl.handle.net/21.11116/0000-0007-6FD1-1 %D 2020 %B 34th Conference on Neural Information Processing Systems %Z date of event: 2020-12-06 - 2020-12-12 %C Virtual Event %B Advances in Neural Information Processing Systems 33 %E Larochelle, H.; Ranzato, M.; Hadsell, R.; Balcan, M. F.; Lin, H. %I Curran Associates, Inc. %U https://papers.nips.cc/paper/2020/file/970af30e481057c48f87e101b61e6994-Paper.pdf
Dunn, D., Tursun, O., Yu, H., Didyk, P., Myszkowski, K., and Fuchs, H. Stimulating the Human Visual System Beyond Real World Performance in Future Augmented Reality Displays. IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2020), IEEE.
(Accepted/in press)
Export
BibTeX
@inproceedings{Dunn2020, TITLE = {Stimulating the Human Visual System Beyond Real World Performance in Future Augmented Reality Displays}, AUTHOR = {Dunn, David and Tursun, Okan and Yu, Hyeonseung and Didyk, Piotr and Myszkowski, Karol and Fuchs, Henry}, LANGUAGE = {eng}, PUBLISHER = {IEEE}, YEAR = {2020}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2020)}, ADDRESS = {Porto de Galinhas, Brazil}, }
Endnote
%0 Conference Proceedings %A Dunn, David %A Tursun, Okan %A Yu, Hyeonseung %A Didyk, Piotr %A Myszkowski, Karol %A Fuchs, Henry %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Stimulating the Human Visual System Beyond Real World Performance in Future Augmented Reality Displays : %G eng %U http://hdl.handle.net/21.11116/0000-0006-FBDF-5 %D 2020 %B International Symposium on Mixed and Augmented Reality %Z date of event: 2020-11-09 - 2020-11-13 %C Porto de Galinhas, Brazil %B IEEE International Symposium on Mixed and Augmented Reality %I IEEE
Habermann, M., Xu, W., Zollhöfer, M., Pons-Moll, G., and Theobalt, C. 2020a. DeepCap: Monocular Human Performance Capture Using Weak Supervision. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020), IEEE.
Export
BibTeX
@inproceedings{deepcap2020, TITLE = {{DeepCap}: {M}onocular Human Performance Capture Using Weak Supervision}, AUTHOR = {Habermann, Marc and Xu, Weipeng and Zollh{\"o}fer, Michael and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-7281-7168-5}, DOI = {10.1109/CVPR42600.2020.00510}, PUBLISHER = {IEEE}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020)}, PAGES = {5051--5062}, ADDRESS = {Seattle, WA, USA (Virtual)}, }
Endnote
%0 Conference Proceedings %A Habermann, Marc %A Xu, Weipeng %A Zollhöfer, Michael %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T DeepCap: Monocular Human Performance Capture Using Weak Supervision : %G eng %U http://hdl.handle.net/21.11116/0000-0006-A895-4 %R 10.1109/CVPR42600.2020.00510 %D 2020 %B 33rd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2020-06-14 - 2020-06-19 %C Seattle, WA, USA (Virtual) %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 5051 - 5062 %I IEEE %@ 978-1-7281-7168-5
Habermann, M., Xu, W., Zollhöfer, M., Pons-Moll, G., and Theobalt, C. 2020b. DeepCap: Monocular Human Performance Capture Using Weak Supervision. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020), IEEE.
Export
BibTeX
@inproceedings{deepcap2020, TITLE = {{DeepCap}: {M}onocular Human Performance Capture Using Weak Supervision}, AUTHOR = {Habermann, Marc and Xu, Weipeng and Zollh{\"o}fer, Michael and Pons-Moll, Gerard and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-7281-7168-5}, DOI = {10.1109/CVPR42600.2020.00510}, PUBLISHER = {IEEE}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020)}, PAGES = {5051--5062}, ADDRESS = {Seattle, WA, USA (Virtual)}, }
Endnote
%0 Conference Proceedings %A Habermann, Marc %A Xu, Weipeng %A Zollhöfer, Michael %A Pons-Moll, Gerard %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T DeepCap: Monocular Human Performance Capture Using Weak Supervision : %G eng %U http://hdl.handle.net/21.11116/0000-0007-AE12-1 %R 10.1109/CVPR42600.2020.00510 %D 2020 %B 33rd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2020-06-14 - 2020-06-19 %C Seattle, WA, USA (Virtual) %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 5051 - 5062 %I IEEE %@ 978-1-7281-7168-5
Huang, L., Gao, C., Zhou, Y., et al. 2020. Universal Physical Camouflage Attacks on Object Detectors. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020), IEEE.
Export
BibTeX
@inproceedings{Huang_CVPR2020, TITLE = {Universal Physical Camou{fl}age Attacks on Object Detectors}, AUTHOR = {Huang, Lifeng and Gao, Chengying and Zhou, Yuyin and Xie, Cihang and Yuille, Alan and Zou, Changqing and Liu, Ning}, LANGUAGE = {eng}, ISBN = {978-1-7281-7168-5}, DOI = {10.1109/CVPR42600.2020.00080}, PUBLISHER = {IEEE}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020)}, PAGES = {717--726}, ADDRESS = {Seattle, WA, USA (Virtual)}, }
Endnote
%0 Conference Proceedings %A Huang, Lifeng %A Gao, Chengying %A Zhou, Yuyin %A Xie, Cihang %A Yuille, Alan %A Zou, Changqing %A Liu, Ning %+ External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Universal Physical Camouflage Attacks on Object Detectors : %G eng %U http://hdl.handle.net/21.11116/0000-0006-09F0-1 %R 10.1109/CVPR42600.2020.00080 %D 2020 %B 33rd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2020-06-14 - 2020-06-19 %C Seattle, WA, USA (Virtual) %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 717 - 726 %I IEEE %@ 978-1-7281-7168-5
Tewari, A., Elgharib, M., Bharaj, G., et al. 2020c. StyleRig: Rigging StyleGAN for 3D Control Over Portrait Images. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020), IEEE.
Export
BibTeX
@inproceedings{Tewari_CVPR2020, TITLE = {{StyleRig}: {R}igging {StyleGAN} for {3D} Control Over Portrait Images}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Bharaj, Gaurav and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-1-7281-7168-5}, DOI = {10.1109/CVPR42600.2020.00618}, PUBLISHER = {IEEE}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020)}, PAGES = {6141--6150}, ADDRESS = {Seattle, WA, USA (Virtual)}, }
Endnote
%0 Conference Proceedings %A Tewari, Ayush %A Elgharib, Mohamed %A Bharaj, Gaurav %A Bernard, Florian %A Seidel, Hans-Peter %A Pérez, Patrick %A Zollhöfer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T StyleRig: Rigging StyleGAN for 3D Control Over Portrait Images : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0E7-D %R 10.1109/CVPR42600.2020.00618 %D 2020 %B 33rd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2020-06-14 - 2020-06-19 %C Seattle, WA, USA (Virtual) %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 6141 - 6150 %I IEEE %@ 978-1-7281-7168-5
Yu, T., Zheng, Z., Zhong, Y., et al. 2020a. SimulCap : Single-View Human Performance Capture with Cloth Simulation. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019), IEEE.
Export
BibTeX
@inproceedings{SimulCap19, TITLE = {{SimulCap} : {S}ingle-View Human Performance Capture with Cloth Simulation}, AUTHOR = {Yu, Tao and Zheng, Zerong and Zhong, Yuan and Zhao, Jianhui and Quionhai, Dai and Pons-Moll, Gerard and Liu, Yebin}, ISBN = {978-1-7281-3293-8}, DOI = {10.1109/CVPR.2019.00565}, PUBLISHER = {IEEE}, YEAR = {2019}, MARGINALMARK = {$\bullet$}, DATE = {2020}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2019)}, PAGES = {5499--5509}, ADDRESS = {Long Beach, CA, USA}, }
Endnote
%0 Conference Proceedings %A Yu, Tao %A Zheng, Zerong %A Zhong, Yuan %A Zhao, Jianhui %A Quionhai, Dai %A Pons-Moll, Gerard %A Liu, Yebin %+ External Organizations External Organizations External Organizations External Organizations External Organizations Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T SimulCap : Single-View Human Performance Capture with Cloth Simulation : %U http://hdl.handle.net/21.11116/0000-0003-651E-B %R 10.1109/CVPR.2019.00565 %D 2020 %B 32nd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2019-06-16 - 2019-06-20 %C Long Beach, CA, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 5499 - 5509 %I IEEE %@ 978-1-7281-3293-8
Yu, Y., Meka, A., Elgharib, M., Seidel, H.-P., Theobalt, C., and Smith, W.A.P. 2020b. Self-supervised Outdoor Scene Relighting. Computer Vision -- ECCV 2020, Springer.
Export
BibTeX
@inproceedings{yu_ECCV20, TITLE = {Self-supervised Outdoor Scene Relighting}, AUTHOR = {Yu, Ye and Meka, Abhimitra and Elgharib, Mohamed and Seidel, Hans-Peter and Theobalt, Christian and Smith, William A. P.}, LANGUAGE = {eng}, ISBN = {978-3-030-58541-9}, DOI = {10.1007/978-3-030-58542-6_6}, PUBLISHER = {Springer}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, BOOKTITLE = {Computer Vision -- ECCV 2020}, EDITOR = {Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael}, PAGES = {84--101}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {12367}, ADDRESS = {Glasgow, UK}, }
Endnote
%0 Conference Proceedings %A Yu, Ye %A Meka, Abhimitra %A Elgharib, Mohamed %A Seidel, Hans-Peter %A Theobalt, Christian %A Smith, William A. P. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Self-supervised Outdoor Scene Relighting : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0F6-C %R 10.1007/978-3-030-58542-6_6 %D 2020 %B 16th European Conference on Computer Vision %Z date of event: 2020-08-23 - 2020-08-28 %C Glasgow, UK %B Computer Vision -- ECCV 2020 %E Vedaldi, Andrea; Bischof, Horst; Brox, Thomas; Frahm, Jan-Michael %P 84 - 101 %I Springer %@ 978-3-030-58541-9 %B Lecture Notes in Computer Science %N 12367
Zhou, Y., Habermann, M., Xu, W., Habibie, I., Theobalt, C., and Xu, F. 2020. Monocular Real-time Hand Shape and Motion Capture using Multi-modal Data. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020), IEEE.
Export
BibTeX
@inproceedings{zhou2019monocular, TITLE = {Monocular Real-time Hand Shape and Motion Capture using Multi-modal Data}, AUTHOR = {Zhou, Yuxiao and Habermann, Marc and Xu, Weipeng and Habibie, Ikhsanul and Theobalt, Christian and Xu, Feng}, LANGUAGE = {eng}, ISBN = {978-1-7281-7168-5}, DOI = {10.1109/CVPR42600.2020.00539}, PUBLISHER = {IEEE}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2020)}, PAGES = {5345--5354}, ADDRESS = {Seattle, WA, USA (Virtual)}, }
Endnote
%0 Conference Proceedings %A Zhou, Yuxiao %A Habermann, Marc %A Xu, Weipeng %A Habibie, Ikhsanul %A Theobalt, Christian %A Xu, Feng %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Monocular Real-time Hand Shape and Motion Capture using Multi-modal Data : %G eng %U http://hdl.handle.net/21.11116/0000-0006-A89E-B %R 10.1109/CVPR42600.2020.00539 %D 2020 %B 33rd IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2020-06-14 - 2020-06-19 %C Seattle, WA, USA (Virtual) %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %P 5345 - 5354 %I IEEE %@ 978-1-7281-7168-5
Paper
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020b. X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation. https://arxiv.org/abs/2010.00450.
(arXiv: 2010.00450)
Abstract
We suggest to represent an X-Field -a set of 2D images taken across different view, time or illumination conditions, i.e., video, light field, reflectance fields or combinations thereof-by learning a neural network (NN) to map their view, time or light coordinates to 2D images. Executing this NN at new coordinates results in joint view, time or light interpolation. The key idea to make this workable is a NN that already knows the "basic tricks" of graphics (lighting, 3D projection, occlusion) in a hard-coded and differentiable form. The NN represents the input to that rendering as an implicit map, that for any view, time, or light coordinate and for any pixel can quantify how it will move if view, time or light coordinates change (Jacobian of pixel position with respect to view, time, illumination, etc.). Our X-Field representation is trained for one scene within minutes, leading to a compact set of trainable parameters and hence real-time navigation in view, time and illumination.
Export
BibTeX
@online{Bemana_arXiv2010.00450, TITLE = {X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2010.00450}, EPRINT = {2010.00450}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We suggest to represent an X-Field -a set of 2D images taken across different view, time or illumination conditions, i.e., video, light field, reflectance fields or combinations thereof-by learning a neural network (NN) to map their view, time or light coordinates to 2D images. Executing this NN at new coordinates results in joint view, time or light interpolation. The key idea to make this workable is a NN that already knows the "basic tricks" of graphics (lighting, 3D projection, occlusion) in a hard-coded and differentiable form. The NN represents the input to that rendering as an implicit map, that for any view, time, or light coordinate and for any pixel can quantify how it will move if view, time or light coordinates change (Jacobian of pixel position with respect to view, time, illumination, etc.). Our X-Field representation is trained for one scene within minutes, leading to a compact set of trainable parameters and hence real-time navigation in view, time and illumination.}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B6EC-2 %U https://arxiv.org/abs/2010.00450 %D 2020 %X We suggest to represent an X-Field -a set of 2D images taken across different view, time or illumination conditions, i.e., video, light field, reflectance fields or combinations thereof-by learning a neural network (NN) to map their view, time or light coordinates to 2D images. Executing this NN at new coordinates results in joint view, time or light interpolation. The key idea to make this workable is a NN that already knows the "basic tricks" of graphics (lighting, 3D projection, occlusion) in a hard-coded and differentiable form. The NN represents the input to that rendering as an implicit map, that for any view, time, or light coordinate and for any pixel can quantify how it will move if view, time or light coordinates change (Jacobian of pixel position with respect to view, time, illumination, etc.). Our X-Field representation is trained for one scene within minutes, leading to a compact set of trainable parameters and hence real-time navigation in view, time and illumination. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020. HDR Denoising and Deblurring by Learning Spatio-temporal Distortion Models. https://arxiv.org/abs/2012.12009.
(arXiv: 2012.12009)
Abstract
We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video from a dual-exposure sensor that records different low-dynamic range (LDR) information in different pixel columns: Odd columns provide low-exposure, sharp, but noisy information; even columns complement this with less noisy, high-exposure, but motion-blurred data. Previous LDR work learns to deblur and denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images. Regrettably, capturing DISTORTED sensor readings is time-consuming; as well, there is a lack of CLEAN HDR videos. We suggest a method to overcome those two limitations. First, we learn a different function instead: CLEAN->DISTORTED, which generates samples containing correlated pixel noise, and row and column noise, as well as motion blur from a low number of CLEAN sensor readings. Second, as there is not enough CLEAN HDR video available, we devise a method to learn from LDR video in-stead. Our approach compares favorably to several strong baselines, and can boost existing methods when they are re-trained on our data. Combined with spatial and temporal super-resolution, it enables applications such as re-lighting with low noise or blur.
Export
BibTeX
@online{Cogalan_arXiv2012.12009, TITLE = {{HDR} Denoising and Deblurring by Learning Spatio-temporal Distortion Model}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.12009}, EPRINT = {2012.12009}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video from a dual-exposure sensor that records different low-dynamic range (LDR) information in different pixel columns: Odd columns provide low-exposure, sharp, but noisy information; even columns complement this with less noisy, high-exposure, but motion-blurred data. Previous LDR work learns to deblur and denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images. Regrettably, capturing DISTORTED sensor readings is time-consuming; as well, there is a lack of CLEAN HDR videos. We suggest a method to overcome those two limitations. First, we learn a different function instead: CLEAN->DISTORTED, which generates samples containing correlated pixel noise, and row and column noise, as well as motion blur from a low number of CLEAN sensor readings. Second, as there is not enough CLEAN HDR video available, we devise a method to learn from LDR video in-stead. Our approach compares favorably to several strong baselines, and can boost existing methods when they are re-trained on our data. Combined with spatial and temporal super-resolution, it enables applications such as re-lighting with low noise or blur.}, }
Endnote
%0 Report %A Çoğalan, Uğur %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T HDR Denoising and Deblurring by Learning Spatio-temporal Distortion Models : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B721-5 %U https://arxiv.org/abs/2012.12009 %D 2020 %X We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video from a dual-exposure sensor that records different low-dynamic range (LDR) information in different pixel columns: Odd columns provide low-exposure, sharp, but noisy information; even columns complement this with less noisy, high-exposure, but motion-blurred data. Previous LDR work learns to deblur and denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images. Regrettably, capturing DISTORTED sensor readings is time-consuming; as well, there is a lack of CLEAN HDR videos. We suggest a method to overcome those two limitations. First, we learn a different function instead: CLEAN->DISTORTED, which generates samples containing correlated pixel noise, and row and column noise, as well as motion blur from a low number of CLEAN sensor readings. Second, as there is not enough CLEAN HDR video available, we devise a method to learn from LDR video in-stead. Our approach compares favorably to several strong baselines, and can boost existing methods when they are re-trained on our data. Combined with spatial and temporal super-resolution, it enables applications such as re-lighting with low noise or blur. %K eess.IV,Computer Science, Computer Vision and Pattern Recognition, cs.CV
Fox, G., Liu, W., Kim, H., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2020. VideoForensicsHQ: Detecting High-quality Manipulated Face Videos. https://arxiv.org/abs/2005.10360.
(arXiv: 2005.10360)
Abstract
New approaches to synthesize and manipulate face videos at very high quality have paved the way for new applications in computer animation, virtual and augmented reality, or face video analysis. However, there are concerns that they may be used in a malicious way, e.g. to manipulate videos of public figures, politicians or reporters, to spread false information. The research community therefore developed techniques for automated detection of modified imagery, and assembled benchmark datasets showing manipulatons by state-of-the-art techniques. In this paper, we contribute to this initiative in two ways: First, we present a new audio-visual benchmark dataset. It shows some of the highest quality visual manipulations available today. Human observers find them significantly harder to identify as forged than videos from other benchmarks. Furthermore we propose new family of deep-learning-based fake detectors, demonstrating that existing detectors are not well-suited for detecting fakes of a quality as high as presented in our dataset. Our detectors examine spatial and temporal features. This allows them to outperform existing approaches both in terms of high detection accuracy and generalization to unseen fake generation methods and unseen identities.
Export
BibTeX
@online{Fox_2005.10360, TITLE = {{VideoForensicsHQ}: {D}etecting High-quality Manipulated Face Videos}, AUTHOR = {Fox, Gereon and Liu, Wentao and Kim, Hyeongwoo and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2005.10360}, EPRINT = {2005.10360}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {New approaches to synthesize and manipulate face videos at very high quality have paved the way for new applications in computer animation, virtual and augmented reality, or face video analysis. However, there are concerns that they may be used in a malicious way, e.g. to manipulate videos of public figures, politicians or reporters, to spread false information. The research community therefore developed techniques for automated detection of modified imagery, and assembled benchmark datasets showing manipulatons by state-of-the-art techniques. In this paper, we contribute to this initiative in two ways: First, we present a new audio-visual benchmark dataset. It shows some of the highest quality visual manipulations available today. Human observers find them significantly harder to identify as forged than videos from other benchmarks. Furthermore we propose new family of deep-learning-based fake detectors, demonstrating that existing detectors are not well-suited for detecting fakes of a quality as high as presented in our dataset. Our detectors examine spatial and temporal features. This allows them to outperform existing approaches both in terms of high detection accuracy and generalization to unseen fake generation methods and unseen identities.}, }
Endnote
%0 Report %A Fox, Gereon %A Liu, Wentao %A Kim, Hyeongwoo %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T VideoForensicsHQ: Detecting High-quality Manipulated Face Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B109-7 %U https://arxiv.org/abs/2005.10360 %D 2020 %X New approaches to synthesize and manipulate face videos at very high quality have paved the way for new applications in computer animation, virtual and augmented reality, or face video analysis. However, there are concerns that they may be used in a malicious way, e.g. to manipulate videos of public figures, politicians or reporters, to spread false information. The research community therefore developed techniques for automated detection of modified imagery, and assembled benchmark datasets showing manipulatons by state-of-the-art techniques. In this paper, we contribute to this initiative in two ways: First, we present a new audio-visual benchmark dataset. It shows some of the highest quality visual manipulations available today. Human observers find them significantly harder to identify as forged than videos from other benchmarks. Furthermore we propose new family of deep-learning-based fake detectors, demonstrating that existing detectors are not well-suited for detecting fakes of a quality as high as presented in our dataset. Our detectors examine spatial and temporal features. This allows them to outperform existing approaches both in terms of high detection accuracy and generalization to unseen fake generation methods and unseen identities. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Kappel, M., Golyanik, V., Elgharib, M., et al. 2020. High-Fidelity Neural Human Motion Transfer from Monocular Video. https://arxiv.org/abs/2012.10974.
(arXiv: 2012.10974)
Abstract
Video-based human motion transfer creates video animations of humans following a source motion. Current methods show remarkable results for tightly-clad subjects. However, the lack of temporally consistent handling of plausible clothing dynamics, including fine and high-frequency details, significantly limits the attainable visual quality. We address these limitations for the first time in the literature and present a new framework which performs high-fidelity and temporally-consistent human motion transfer with natural pose-dependent non-rigid deformations, for several types of loose garments. In contrast to the previous techniques, we perform image generation in three subsequent stages, synthesizing human shape, structure, and appearance. Given a monocular RGB video of an actor, we train a stack of recurrent deep neural networks that generate these intermediate representations from 2D poses and their temporal derivatives. Splitting the difficult motion transfer problem into subtasks that are aware of the temporal motion context helps us to synthesize results with plausible dynamics and pose-dependent detail. It also allows artistic control of results by manipulation of individual framework stages. In the experimental results, we significantly outperform the state-of-the-art in terms of video realism. Our code and data will be made publicly available.
Export
BibTeX
@online{Kappel_arXiv2012.10974, TITLE = {High-Fidelity Neural Human Motion Transfer from Monocular Video}, AUTHOR = {Kappel, Moritz and Golyanik, Vladislav and Elgharib, Mohamed and Henningson, Jann-Ole and Seidel, Hans-Peter and Castillo, Susana and Theobalt, Christian and Magnor, Marcus A.}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.10974}, EPRINT = {2012.10974}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Video-based human motion transfer creates video animations of humans following a source motion. Current methods show remarkable results for tightly-clad subjects. However, the lack of temporally consistent handling of plausible clothing dynamics, including fine and high-frequency details, significantly limits the attainable visual quality. We address these limitations for the first time in the literature and present a new framework which performs high-fidelity and temporally-consistent human motion transfer with natural pose-dependent non-rigid deformations, for several types of loose garments. In contrast to the previous techniques, we perform image generation in three subsequent stages, synthesizing human shape, structure, and appearance. Given a monocular RGB video of an actor, we train a stack of recurrent deep neural networks that generate these intermediate representations from 2D poses and their temporal derivatives. Splitting the difficult motion transfer problem into subtasks that are aware of the temporal motion context helps us to synthesize results with plausible dynamics and pose-dependent detail. It also allows artistic control of results by manipulation of individual framework stages. In the experimental results, we significantly outperform the state-of-the-art in terms of video realism. Our code and data will be made publicly available.}, }
Endnote
%0 Report %A Kappel, Moritz %A Golyanik, Vladislav %A Elgharib, Mohamed %A Henningson, Jann-Ole %A Seidel, Hans-Peter %A Castillo, Susana %A Theobalt, Christian %A Magnor, Marcus A. %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T High-Fidelity Neural Human Motion Transfer from Monocular Video : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B715-3 %U https://arxiv.org/abs/2012.10974 %D 2020 %X Video-based human motion transfer creates video animations of humans following a source motion. Current methods show remarkable results for tightly-clad subjects. However, the lack of temporally consistent handling of plausible clothing dynamics, including fine and high-frequency details, significantly limits the attainable visual quality. We address these limitations for the first time in the literature and present a new framework which performs high-fidelity and temporally-consistent human motion transfer with natural pose-dependent non-rigid deformations, for several types of loose garments. In contrast to the previous techniques, we perform image generation in three subsequent stages, synthesizing human shape, structure, and appearance. Given a monocular RGB video of an actor, we train a stack of recurrent deep neural networks that generate these intermediate representations from 2D poses and their temporal derivatives. Splitting the difficult motion transfer problem into subtasks that are aware of the temporal motion context helps us to synthesize results with plausible dynamics and pose-dependent detail. It also allows artistic control of results by manipulation of individual framework stages. In the experimental results, we significantly outperform the state-of-the-art in terms of video realism. Our code and data will be made publicly available. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Mallikarjun B R, Tewari, A., Oh, T.-H., et al. 2020a. Monocular Reconstruction of Neural Face Reflectance Fields. https://arxiv.org/abs/2008.10247.
(arXiv: 2008.10247)
Abstract
The reflectance field of a face describes the reflectance properties responsible for complex lighting effects including diffuse, specular, inter-reflection and self shadowing. Most existing methods for estimating the face reflectance from a monocular image assume faces to be diffuse with very few approaches adding a specular component. This still leaves out important perceptual aspects of reflectance as higher-order global illumination effects and self-shadowing are not modeled. We present a new neural representation for face reflectance where we can estimate all components of the reflectance responsible for the final appearance from a single monocular image. Instead of modeling each component of the reflectance separately using parametric models, our neural representation allows us to generate a basis set of faces in a geometric deformation-invariant space, parameterized by the input light direction, viewpoint and face geometry. We learn to reconstruct this reflectance field of a face just from a monocular image, which can be used to render the face from any viewpoint in any light condition. Our method is trained on a light-stage training dataset, which captures 300 people illuminated with 150 light conditions from 8 viewpoints. We show that our method outperforms existing monocular reflectance reconstruction methods, in terms of photorealism due to better capturing of physical premitives, such as sub-surface scattering, specularities, self-shadows and other higher-order effects.
Export
BibTeX
@online{Mallikarjun_2008.10247, TITLE = {Monocular Reconstruction of Neural Face Reflectance Fields}, AUTHOR = {Mallikarjun B R, and Tewari, Ayush and Oh, Tae-Hyun and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2008.10247}, EPRINT = {2008.10247}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {The reflectance field of a face describes the reflectance properties responsible for complex lighting effects including diffuse, specular, inter-reflection and self shadowing. Most existing methods for estimating the face reflectance from a monocular image assume faces to be diffuse with very few approaches adding a specular component. This still leaves out important perceptual aspects of reflectance as higher-order global illumination effects and self-shadowing are not modeled. We present a new neural representation for face reflectance where we can estimate all components of the reflectance responsible for the final appearance from a single monocular image. Instead of modeling each component of the reflectance separately using parametric models, our neural representation allows us to generate a basis set of faces in a geometric deformation-invariant space, parameterized by the input light direction, viewpoint and face geometry. We learn to reconstruct this reflectance field of a face just from a monocular image, which can be used to render the face from any viewpoint in any light condition. Our method is trained on a light-stage training dataset, which captures 300 people illuminated with 150 light conditions from 8 viewpoints. We show that our method outperforms existing monocular reflectance reconstruction methods, in terms of photorealism due to better capturing of physical premitives, such as sub-surface scattering, specularities, self-shadows and other higher-order effects.}, }
Endnote
%0 Report %A Mallikarjun B R, %A Tewari, Ayush %A Oh, Tae-Hyun %A Weyrich, Tim %A Bickel, Bernd %A Seidel, Hans-Peter %A Pfister, Hanspeter %A Matusik, Wojciech %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Monocular Reconstruction of Neural Face Reflectance Fields : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B110-E %U https://arxiv.org/abs/2008.10247 %D 2020 %X The reflectance field of a face describes the reflectance properties responsible for complex lighting effects including diffuse, specular, inter-reflection and self shadowing. Most existing methods for estimating the face reflectance from a monocular image assume faces to be diffuse with very few approaches adding a specular component. This still leaves out important perceptual aspects of reflectance as higher-order global illumination effects and self-shadowing are not modeled. We present a new neural representation for face reflectance where we can estimate all components of the reflectance responsible for the final appearance from a single monocular image. Instead of modeling each component of the reflectance separately using parametric models, our neural representation allows us to generate a basis set of faces in a geometric deformation-invariant space, parameterized by the input light direction, viewpoint and face geometry. We learn to reconstruct this reflectance field of a face just from a monocular image, which can be used to render the face from any viewpoint in any light condition. Our method is trained on a light-stage training dataset, which captures 300 people illuminated with 150 light conditions from 8 viewpoints. We show that our method outperforms existing monocular reflectance reconstruction methods, in terms of photorealism due to better capturing of physical premitives, such as sub-surface scattering, specularities, self-shadows and other higher-order effects. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Mallikarjun B R, Tewari, A., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2020b. Learning Complete 3D Morphable Face Models from Images and Videos. https://arxiv.org/abs/2010.01679.
(arXiv: 2010.01679)
Abstract
Most 3D face reconstruction methods rely on 3D morphable models, which disentangle the space of facial deformations into identity geometry, expressions and skin reflectance. These models are typically learned from a limited number of 3D scans and thus do not generalize well across different identities and expressions. We present the first approach to learn complete 3D models of face identity geometry, albedo and expression just from images and videos. The virtually endless collection of such data, in combination with our self-supervised learning-based approach allows for learning face models that generalize beyond the span of existing approaches. Our network design and loss functions ensure a disentangled parameterization of not only identity and albedo, but also, for the first time, an expression basis. Our method also allows for in-the-wild monocular reconstruction at test time. We show that our learned models better generalize and lead to higher quality image-based reconstructions than existing approaches.
Export
BibTeX
@online{Mallikarjun_arXiv2010.01679, TITLE = {Learning Complete {3D} Morphable Face Models from Images and Videos}, AUTHOR = {Mallikarjun B R, and Tewari, Ayush and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2010.01679}, EPRINT = {2010.01679}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Most 3D face reconstruction methods rely on 3D morphable models, which disentangle the space of facial deformations into identity geometry, expressions and skin reflectance. These models are typically learned from a limited number of 3D scans and thus do not generalize well across different identities and expressions. We present the first approach to learn complete 3D models of face identity geometry, albedo and expression just from images and videos. The virtually endless collection of such data, in combination with our self-supervised learning-based approach allows for learning face models that generalize beyond the span of existing approaches. Our network design and loss functions ensure a disentangled parameterization of not only identity and albedo, but also, for the first time, an expression basis. Our method also allows for in-the-wild monocular reconstruction at test time. We show that our learned models better generalize and lead to higher quality image-based reconstructions than existing approaches.}, }
Endnote
%0 Report %A Mallikarjun B R, %A Tewari, Ayush %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning Complete 3D Morphable Face Models from Images and Videos : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B6FB-1 %U https://arxiv.org/abs/2010.01679 %D 2020 %X Most 3D face reconstruction methods rely on 3D morphable models, which disentangle the space of facial deformations into identity geometry, expressions and skin reflectance. These models are typically learned from a limited number of 3D scans and thus do not generalize well across different identities and expressions. We present the first approach to learn complete 3D models of face identity geometry, albedo and expression just from images and videos. The virtually endless collection of such data, in combination with our self-supervised learning-based approach allows for learning face models that generalize beyond the span of existing approaches. Our network design and loss functions ensure a disentangled parameterization of not only identity and albedo, but also, for the first time, an expression basis. Our method also allows for in-the-wild monocular reconstruction at test time. We show that our learned models better generalize and lead to higher quality image-based reconstructions than existing approaches. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Artificial Intelligence, cs.AI,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG,Computer Science, Multimedia, cs.MM
Rao, S., Stutz, D., and Schiele, B. 2020. Adversarial Training against Location-Optimized Adversarial Patches. https://arxiv.org/abs/2005.02313.
(arXiv: 2005.02313)
Abstract
Deep neural networks have been shown to be susceptible to adversarial examples -- small, imperceptible changes constructed to cause mis-classification in otherwise highly accurate image classifiers. As a practical alternative, recent work proposed so-called adversarial patches: clearly visible, but adversarially crafted rectangular patches in images. These patches can easily be printed and applied in the physical world. While defenses against imperceptible adversarial examples have been studied extensively, robustness against adversarial patches is poorly understood. In this work, we first devise a practical approach to obtain adversarial patches while actively optimizing their location within the image. Then, we apply adversarial training on these location-optimized adversarial patches and demonstrate significantly improved robustness on CIFAR10 and GTSRB. Additionally, in contrast to adversarial training on imperceptible adversarial examples, our adversarial patch training does not reduce accuracy.
Export
BibTeX
@online{Rao_arXiv2005.02313, TITLE = {Adversarial Training against Location-Optimized Adversarial Patches}, AUTHOR = {Rao, Sukrut and Stutz, David and Schiele, Bernt}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2005.02313}, EPRINT = {2005.02313}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Deep neural networks have been shown to be susceptible to adversarial examples -- small, imperceptible changes constructed to cause mis-classification in otherwise highly accurate image classifiers. As a practical alternative, recent work proposed so-called adversarial patches: clearly visible, but adversarially crafted rectangular patches in images. These patches can easily be printed and applied in the physical world. While defenses against imperceptible adversarial examples have been studied extensively, robustness against adversarial patches is poorly understood. In this work, we first devise a practical approach to obtain adversarial patches while actively optimizing their location within the image. Then, we apply adversarial training on these location-optimized adversarial patches and demonstrate significantly improved robustness on CIFAR10 and GTSRB. Additionally, in contrast to adversarial training on imperceptible adversarial examples, our adversarial patch training does not reduce accuracy.}, }
Endnote
%0 Report %A Rao, Sukrut %A Stutz, David %A Schiele, Bernt %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Adversarial Training against Location-Optimized Adversarial Patches : %G eng %U http://hdl.handle.net/21.11116/0000-0007-80D0-C %U https://arxiv.org/abs/2005.02313 %D 2020 %X Deep neural networks have been shown to be susceptible to adversarial examples -- small, imperceptible changes constructed to cause mis-classification in otherwise highly accurate image classifiers. As a practical alternative, recent work proposed so-called adversarial patches: clearly visible, but adversarially crafted rectangular patches in images. These patches can easily be printed and applied in the physical world. While defenses against imperceptible adversarial examples have been studied extensively, robustness against adversarial patches is poorly understood. In this work, we first devise a practical approach to obtain adversarial patches while actively optimizing their location within the image. Then, we apply adversarial training on these location-optimized adversarial patches and demonstrate significantly improved robustness on CIFAR10 and GTSRB. Additionally, in contrast to adversarial training on imperceptible adversarial examples, our adversarial patch training does not reduce accuracy. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Cryptography and Security, cs.CR,Computer Science, Learning, cs.LG,Statistics, Machine Learning, stat.ML
Rudnev, V., Golyanik, V., Wang, J., et al. 2020. EventHands: Real-Time Neural 3D Hand Reconstruction from an Event Stream. https://arxiv.org/abs/2012.06475.
(arXiv: 2012.06475)
Abstract
3D hand pose estimation from monocular videos is a long-standing and challenging problem, which is now seeing a strong upturn. In this work, we address it for the first time using a single event camera, i.e., an asynchronous vision sensor reacting on brightness changes. Our EventHands approach has characteristics previously not demonstrated with a single RGB or depth camera such as high temporal resolution at low data throughputs and real-time performance at 1000 Hz. Due to the different data modality of event cameras compared to classical cameras, existing methods cannot be directly applied to and re-trained for event streams. We thus design a new neural approach which accepts a new event stream representation suitable for learning, which is trained on newly-generated synthetic event streams and can generalise to real data. Experiments show that EventHands outperforms recent monocular methods using a colour (or depth) camera in terms of accuracy and its ability to capture hand motions of unprecedented speed. Our method, the event stream simulator and the dataset will be made publicly available.
Export
BibTeX
@online{Rudnev_arXiv2012.06475, TITLE = {{EventHands}: {R}eal-Time Neural {3D} Hand Reconstruction from an Event Stream}, AUTHOR = {Rudnev, Viktor and Golyanik, Vladislav and Wang, Jiayi and Seidel, Hans-Peter and Mueller, Franziska and Elgharib, Mohamed and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.06475}, EPRINT = {2012.06475}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {3D hand pose estimation from monocular videos is a long-standing and challenging problem, which is now seeing a strong upturn. In this work, we address it for the first time using a single event camera, i.e., an asynchronous vision sensor reacting on brightness changes. Our EventHands approach has characteristics previously not demonstrated with a single RGB or depth camera such as high temporal resolution at low data throughputs and real-time performance at 1000 Hz. Due to the different data modality of event cameras compared to classical cameras, existing methods cannot be directly applied to and re-trained for event streams. We thus design a new neural approach which accepts a new event stream representation suitable for learning, which is trained on newly-generated synthetic event streams and can generalise to real data. Experiments show that EventHands outperforms recent monocular methods using a colour (or depth) camera in terms of accuracy and its ability to capture hand motions of unprecedented speed. Our method, the event stream simulator and the dataset will be made publicly available.}, }
Endnote
%0 Report %A Rudnev, Viktor %A Golyanik, Vladislav %A Wang, Jiayi %A Seidel, Hans-Peter %A Mueller, Franziska %A Elgharib, Mohamed %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T EventHands: Real-Time Neural 3D Hand Reconstruction from an Event Stream : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B709-1 %U https://arxiv.org/abs/2012.06475 %D 2020 %X 3D hand pose estimation from monocular videos is a long-standing and challenging problem, which is now seeing a strong upturn. In this work, we address it for the first time using a single event camera, i.e., an asynchronous vision sensor reacting on brightness changes. Our EventHands approach has characteristics previously not demonstrated with a single RGB or depth camera such as high temporal resolution at low data throughputs and real-time performance at 1000 Hz. Due to the different data modality of event cameras compared to classical cameras, existing methods cannot be directly applied to and re-trained for event streams. We thus design a new neural approach which accepts a new event stream representation suitable for learning, which is trained on newly-generated synthetic event streams and can generalise to real data. Experiments show that EventHands outperforms recent monocular methods using a colour (or depth) camera in terms of accuracy and its ability to capture hand motions of unprecedented speed. Our method, the event stream simulator and the dataset will be made publicly available. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Tewari, A., Elgharib, M., Bharaj, G., et al. 2020d. StyleRig: Rigging StyleGAN for 3D Control over Portrait Images. https://arxiv.org/abs/2004.00121.
(arXiv: 2004.00121)
Abstract
StyleGAN generates photorealistic portrait images of faces with eyes, teeth, hair and context (neck, shoulders, background), but lacks a rig-like control over semantic face parameters that are interpretable in 3D, such as face pose, expressions, and scene illumination. Three-dimensional morphable face models (3DMMs) on the other hand offer control over the semantic parameters, but lack photorealism when rendered and only model the face interior, not other parts of a portrait image (hair, mouth interior, background). We present the first method to provide a face rig-like control over a pretrained and fixed StyleGAN via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's semantic parameters and StyleGAN's input. The network is trained in a self-supervised manner, without the need for manual annotations. At test time, our method generates portrait images with the photorealism of StyleGAN and provides explicit control over the 3D semantic parameters of the face.
Export
BibTeX
@online{Tewari_2004.00121, TITLE = {{StyleRig}: Rigging {StyleGAN} for {3D} Control over Portrait Images}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Bharaj, Gaurav and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2004.00121}, EPRINT = {2004.00121}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {StyleGAN generates photorealistic portrait images of faces with eyes, teeth, hair and context (neck, shoulders, background), but lacks a rig-like control over semantic face parameters that are interpretable in 3D, such as face pose, expressions, and scene illumination. Three-dimensional morphable face models (3DMMs) on the other hand offer control over the semantic parameters, but lack photorealism when rendered and only model the face interior, not other parts of a portrait image (hair, mouth interior, background). We present the first method to provide a face rig-like control over a pretrained and fixed StyleGAN via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's semantic parameters and StyleGAN's input. The network is trained in a self-supervised manner, without the need for manual annotations. At test time, our method generates portrait images with the photorealism of StyleGAN and provides explicit control over the 3D semantic parameters of the face.}, }
Endnote
%0 Report %A Tewari, Ayush %A Elgharib, Mohamed %A Bharaj, Gaurav %A Bernard, Florian %A Seidel, Hans-Peter %A Pérez, Patrick %A Zollhöfer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T StyleRig: Rigging StyleGAN for 3D Control over Portrait Images : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B0FC-6 %U https://arxiv.org/abs/2004.00121 %D 2020 %X StyleGAN generates photorealistic portrait images of faces with eyes, teeth, hair and context (neck, shoulders, background), but lacks a rig-like control over semantic face parameters that are interpretable in 3D, such as face pose, expressions, and scene illumination. Three-dimensional morphable face models (3DMMs) on the other hand offer control over the semantic parameters, but lack photorealism when rendered and only model the face interior, not other parts of a portrait image (hair, mouth interior, background). We present the first method to provide a face rig-like control over a pretrained and fixed StyleGAN via a 3DMM. A new rigging network, RigNet is trained between the 3DMM's semantic parameters and StyleGAN's input. The network is trained in a self-supervised manner, without the need for manual annotations. At test time, our method generates portrait images with the photorealism of StyleGAN and provides explicit control over the 3D semantic parameters of the face. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Tewari, A., Elgharib, M., Mallikarjun B R, et al. 2020e. PIE: Portrait Image Embedding for Semantic Control. https://arxiv.org/abs/2009.09485.
(arXiv: 2009.09485)
Abstract
Editing of portrait images is a very popular and important research topic with a large variety of applications. For ease of use, control should be provided via a semantically meaningful parameterization that is akin to computer animation controls. The vast majority of existing techniques do not provide such intuitive and fine-grained control, or only enable coarse editing of a single isolated control parameter. Very recently, high-quality semantically controlled editing has been demonstrated, however only on synthetically created StyleGAN images. We present the first approach for embedding real portrait images in the latent space of StyleGAN, which allows for intuitive editing of the head pose, facial expression, and scene illumination in the image. Semantic editing in parameter space is achieved based on StyleRig, a pretrained neural network that maps the control space of a 3D morphable face model to the latent space of the GAN. We design a novel hierarchical non-linear optimization problem to obtain the embedding. An identity preservation energy term allows spatially coherent edits while maintaining facial integrity. Our approach runs at interactive frame rates and thus allows the user to explore the space of possible edits. We evaluate our approach on a wide set of portrait photos, compare it to the current state of the art, and validate the effectiveness of its components in an ablation study.
Export
BibTeX
@online{Tewari_2009.09485, TITLE = {{PIE}: {P}ortrait Image Embedding for Semantic Control}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Mallikarjun B R, and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2009.09485}, EPRINT = {2009.09485}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Editing of portrait images is a very popular and important research topic with a large variety of applications. For ease of use, control should be provided via a semantically meaningful parameterization that is akin to computer animation controls. The vast majority of existing techniques do not provide such intuitive and fine-grained control, or only enable coarse editing of a single isolated control parameter. Very recently, high-quality semantically controlled editing has been demonstrated, however only on synthetically created StyleGAN images. We present the first approach for embedding real portrait images in the latent space of StyleGAN, which allows for intuitive editing of the head pose, facial expression, and scene illumination in the image. Semantic editing in parameter space is achieved based on StyleRig, a pretrained neural network that maps the control space of a 3D morphable face model to the latent space of the GAN. We design a novel hierarchical non-linear optimization problem to obtain the embedding. An identity preservation energy term allows spatially coherent edits while maintaining facial integrity. Our approach runs at interactive frame rates and thus allows the user to explore the space of possible edits. We evaluate our approach on a wide set of portrait photos, compare it to the current state of the art, and validate the effectiveness of its components in an ablation study.}, }
Endnote
%0 Report %A Tewari, Ayush %A Elgharib, Mohamed %A Mallikarjun B R, %A Bernard, Florian %A Seidel, Hans-Peter %A Pérez, Patrick %A Zollhöfer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T PIE: Portrait Image Embedding for Semantic Control : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B117-7 %U https://arxiv.org/abs/2009.09485 %D 2020 %X Editing of portrait images is a very popular and important research topic with a large variety of applications. For ease of use, control should be provided via a semantically meaningful parameterization that is akin to computer animation controls. The vast majority of existing techniques do not provide such intuitive and fine-grained control, or only enable coarse editing of a single isolated control parameter. Very recently, high-quality semantically controlled editing has been demonstrated, however only on synthetically created StyleGAN images. We present the first approach for embedding real portrait images in the latent space of StyleGAN, which allows for intuitive editing of the head pose, facial expression, and scene illumination in the image. Semantic editing in parameter space is achieved based on StyleRig, a pretrained neural network that maps the control space of a 3D morphable face model to the latent space of the GAN. We design a novel hierarchical non-linear optimization problem to obtain the embedding. An identity preservation energy term allows spatially coherent edits while maintaining facial integrity. Our approach runs at interactive frame rates and thus allows the user to explore the space of possible edits. We evaluate our approach on a wide set of portrait photos, compare it to the current state of the art, and validate the effectiveness of its components in an ablation study. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Yenamandra, T., Tewari, A., Bernard, F., et al. 2020. i3DMM: Deep Implicit 3D Morphable Model of Human Heads. https://arxiv.org/abs/2011.14143.
(arXiv: 2011.14143)
Abstract
We present the first deep implicit 3D morphable model (i3DMM) of full heads. Unlike earlier morphable face models it not only captures identity-specific geometry, texture, and expressions of the frontal face, but also models the entire head, including hair. We collect a new dataset consisting of 64 people with different expressions and hairstyles to train i3DMM. Our approach has the following favorable properties: (i) It is the first full head morphable model that includes hair. (ii) In contrast to mesh-based models it can be trained on merely rigidly aligned scans, without requiring difficult non-rigid registration. (iii) We design a novel architecture to decouple the shape model into an implicit reference shape and a deformation of this reference shape. With that, dense correspondences between shapes can be learned implicitly. (iv) This architecture allows us to semantically disentangle the geometry and color components, as color is learned in the reference space. Geometry is further disentangled as identity, expressions, and hairstyle, while color is disentangled as identity and hairstyle components. We show the merits of i3DMM using ablation studies, comparisons to state-of-the-art models, and applications such as semantic head editing and texture transfer. We will make our model publicly available.
Export
BibTeX
@online{Yenamandra_arXiv2011.14143, TITLE = {i{3D}MM: Deep Implicit {3D} Morphable Model of Human Heads}, AUTHOR = {Yenamandra, Tarun and Tewari, Ayush and Bernard, Florian and Seidel, Hans-Peter and Elgharib, Mohamed and Cremers, Daniel and Theobalt, Christian}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2011.14143}, EPRINT = {2011.14143}, EPRINTTYPE = {arXiv}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {We present the first deep implicit 3D morphable model (i3DMM) of full heads. Unlike earlier morphable face models it not only captures identity-specific geometry, texture, and expressions of the frontal face, but also models the entire head, including hair. We collect a new dataset consisting of 64 people with different expressions and hairstyles to train i3DMM. Our approach has the following favorable properties: (i) It is the first full head morphable model that includes hair. (ii) In contrast to mesh-based models it can be trained on merely rigidly aligned scans, without requiring difficult non-rigid registration. (iii) We design a novel architecture to decouple the shape model into an implicit reference shape and a deformation of this reference shape. With that, dense correspondences between shapes can be learned implicitly. (iv) This architecture allows us to semantically disentangle the geometry and color components, as color is learned in the reference space. Geometry is further disentangled as identity, expressions, and hairstyle, while color is disentangled as identity and hairstyle components. We show the merits of i3DMM using ablation studies, comparisons to state-of-the-art models, and applications such as semantic head editing and texture transfer. We will make our model publicly available.}, }
Endnote
%0 Report %A Yenamandra, Tarun %A Tewari, Ayush %A Bernard, Florian %A Seidel, Hans-Peter %A Elgharib, Mohamed %A Cremers, Daniel %A Theobalt, Christian %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T i3DMM: Deep Implicit 3D Morphable Model of Human Heads : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B702-8 %U https://arxiv.org/abs/2011.14143 %D 2020 %X We present the first deep implicit 3D morphable model (i3DMM) of full heads. Unlike earlier morphable face models it not only captures identity-specific geometry, texture, and expressions of the frontal face, but also models the entire head, including hair. We collect a new dataset consisting of 64 people with different expressions and hairstyles to train i3DMM. Our approach has the following favorable properties: (i) It is the first full head morphable model that includes hair. (ii) In contrast to mesh-based models it can be trained on merely rigidly aligned scans, without requiring difficult non-rigid registration. (iii) We design a novel architecture to decouple the shape model into an implicit reference shape and a deformation of this reference shape. With that, dense correspondences between shapes can be learned implicitly. (iv) This architecture allows us to semantically disentangle the geometry and color components, as color is learned in the reference space. Geometry is further disentangled as identity, expressions, and hairstyle, while color is disentangled as identity and hairstyle components. We show the merits of i3DMM using ablation studies, comparisons to state-of-the-art models, and applications such as semantic head editing and texture transfer. We will make our model publicly available. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Report
Qian, N., Wang, J., Mueller, F., Bernard, F., Golyanik, V., and Theobalt, C. 2020. Parametric Hand Texture Model for 3D Hand Reconstruction and Personalization. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
3D hand reconstruction from image data is a widely-studied problem in com- puter vision and graphics, and has a particularly high relevance for virtual and augmented reality. Although several 3D hand reconstruction approaches leverage hand models as a strong prior to resolve ambiguities and achieve a more robust reconstruction, most existing models account only for the hand shape and poses and do not model the texture. To fill this gap, in this work we present the first parametric texture model of human hands. Our model spans several dimensions of hand appearance variability (e.g., related to gen- der, ethnicity, or age) and only requires a commodity camera for data acqui- sition. Experimentally, we demonstrate that our appearance model can be used to tackle a range of challenging problems such as 3D hand reconstruc- tion from a single monocular image. Furthermore, our appearance model can be used to define a neural rendering layer that enables training with a self-supervised photometric loss. We make our model publicly available.
Export
BibTeX
@techreport{Qian_report2020, TITLE = {Parametric Hand Texture Model for {3D} Hand Reconstruction and Personalization}, AUTHOR = {Qian, Neng and Wang, Jiayi and Mueller, Franziska and Bernard, Florian and Golyanik, Vladislav and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0946-011X}, NUMBER = {MPI-I-2020-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, ABSTRACT = {3D hand reconstruction from image data is a widely-studied problem in com- puter vision and graphics, and has a particularly high relevance for virtual and augmented reality. Although several 3D hand reconstruction approaches leverage hand models as a strong prior to resolve ambiguities and achieve a more robust reconstruction, most existing models account only for the hand shape and poses and do not model the texture. To {fi}ll this gap, in this work we present the {fi}rst parametric texture model of human hands. Our model spans several dimensions of hand appearance variability (e.g., related to gen- der, ethnicity, or age) and only requires a commodity camera for data acqui- sition. Experimentally, we demonstrate that our appearance model can be used to tackle a range of challenging problems such as 3D hand reconstruc- tion from a single monocular image. Furthermore, our appearance model can be used to de{fi}ne a neural rendering layer that enables training with a self-supervised photometric loss. We make our model publicly available.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Qian, Neng %A Wang, Jiayi %A Mueller, Franziska %A Bernard, Florian %A Golyanik, Vladislav %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Parametric Hand Texture Model for 3D Hand Reconstruction and Personalization : %G eng %U http://hdl.handle.net/21.11116/0000-0006-9128-9 %Y Max-Planck-Institut für Informatik %C Saarbrücken %D 2020 %P 37 p. %X 3D hand reconstruction from image data is a widely-studied problem in com- puter vision and graphics, and has a particularly high relevance for virtual and augmented reality. Although several 3D hand reconstruction approaches leverage hand models as a strong prior to resolve ambiguities and achieve a more robust reconstruction, most existing models account only for the hand shape and poses and do not model the texture. To fill this gap, in this work we present the first parametric texture model of human hands. Our model spans several dimensions of hand appearance variability (e.g., related to gen- der, ethnicity, or age) and only requires a commodity camera for data acqui- sition. Experimentally, we demonstrate that our appearance model can be used to tackle a range of challenging problems such as 3D hand reconstruc- tion from a single monocular image. Furthermore, our appearance model can be used to define a neural rendering layer that enables training with a self-supervised photometric loss. We make our model publicly available. %K hand texture model, appearance modeling, hand tracking, 3D hand recon- struction %B Research Report %@ false
Thesis
Meka, A. 2020. Live inverse rendering. .
Export
BibTeX
@phdthesis{Meka_2019, TITLE = {Live inverse rendering}, AUTHOR = {Meka, Abhimitra}, LANGUAGE = {eng}, DOI = {http://dx.doi.org/10.22028/D291-30206}, SCHOOL = {Universit{\"a}t des Saarlandes}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2020}, MARGINALMARK = {$\bullet$}, DATE = {2020}, }
Endnote
%0 Thesis %A Meka, Abhimitra %Y Theobalt, Christian %A referee: Drettakis, George %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Live inverse rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0007-715A-5 %R http://dx.doi.org/10.22028/D291-30206 %I Universität des Saarlandes %C Saarbrücken %D 2020 %P 189 p. %V phd %9 phd %U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/28721