The Year Before Last
Article
Chen, B., Wang, C., Piovarči, M., et al. 2021. The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories. The Visual Computer37.
Export
BibTeX
@article{Chen2021b,
TITLE = {The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories},
AUTHOR = {Chen, Bin and Wang, Chao and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana},
LANGUAGE = {eng},
ISSN = {0178-2789},
DOI = {10.1007/s00371-021-02227-x},
PUBLISHER = {Springer},
ADDRESS = {Berlin},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {The Visual Computer},
VOLUME = {37},
PAGES = {2975--2987},
}
Endnote
%0 Journal Article
%A Chen, Bin
%A Wang, Chao
%A Piovarči, Michal
%A Seidel, Hans-Peter
%A Didyk, Piotr
%A Myszkowski, Karol
%A Serrano, Ana
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-F05C-2
%R 10.1007/s00371-021-02227-x
%7 2021
%D 2021
%J The Visual Computer
%V 37
%& 2975
%P 2975 - 2987
%I Springer
%C Berlin
%@ false
Chu, M., Thuerey, N., Seidel, H.-P., Theobalt, C., and Zayer, R. 2021. Learning Meaningful Controls for Fluids. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{Chu2021,
TITLE = {Learning Meaningful Controls for Fluids},
AUTHOR = {Chu, Mengyu and Thuerey, Nils and Seidel, Hans-Peter and Theobalt, Christian and Zayer, Rhaleb},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3450626.3459845},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)},
VOLUME = {40},
NUMBER = {4},
PAGES = {1--13},
EID = {100},
BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021},
}
Endnote
%0 Journal Article
%A Chu, Mengyu
%A Thuerey, Nils
%A Seidel, Hans-Peter
%A Theobalt, Christian
%A Zayer, Rhaleb
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Learning Meaningful Controls for Fluids :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-4B91-F
%R 10.1145/3450626.3459845
%7 2021
%D 2021
%J ACM Transactions on Graphics
%V 40
%N 4
%& 1
%P 1 - 13
%Z sequence number: 100
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH 2021
%O ACM SIGGRAPH 2021
Delanoy, J., Serrano, A., Masia, B., and Gutierrez, D. 2021. Perception of Material Appearance: A Comparison between Painted and Rendered Images. Journal of Vision21, 5.
Export
BibTeX
@article{Delanoy2021,
TITLE = {Perception of Material Appearance: {A} Comparison between Painted and Rendered Images},
AUTHOR = {Delanoy, Johanna and Serrano, Ana and Masia, Belen and Gutierrez, Diego},
LANGUAGE = {eng},
ISSN = {1534-7362},
DOI = {10.1167/jov.21.5.16},
PUBLISHER = {Scholar One, Inc.},
ADDRESS = {Charlottesville, VA},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {Journal of Vision},
VOLUME = {21},
NUMBER = {5},
EID = {16},
}
Endnote
%0 Journal Article
%A Delanoy, Johanna
%A Serrano, Ana
%A Masia, Belen
%A Gutierrez, Diego
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Perception of Material Appearance: A Comparison between Painted and Rendered Images :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-A6CC-7
%R 10.1167/jov.21.5.16
%2 PMC8131993
%7 2021
%D 2021
%J Journal of Vision
%V 21
%N 5
%Z sequence number: 16
%I Scholar One, Inc.
%C Charlottesville, VA
%@ false
Elek, O., Zhang, R., Sumin, D., et al. 2021. Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for 3D Printing. Optics Express29, 5.
Export
BibTeX
@article{Elek2021,
TITLE = {Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for {3D} Printing},
AUTHOR = {Elek, Oskar and Zhang, Ran and Sumin, Denis and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and Krivanek, Jaroslav and Weyrich, Tim},
LANGUAGE = {eng},
ISSN = {1094-4087},
DOI = {10.1364/OE.406095},
PUBLISHER = {Optical Society of America},
ADDRESS = {Washington, DC},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
JOURNAL = {Optics Express},
VOLUME = {29},
NUMBER = {5},
PAGES = {7568--7588},
}
Endnote
%0 Journal Article
%A Elek, Oskar
%A Zhang, Ran
%A Sumin, Denis
%A Myszkowski, Karol
%A Bickel, Bernd
%A Wilkie, Alexander
%A Krivanek, Jaroslav
%A Weyrich, Tim
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
%T Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for 3D Printing :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-E013-6
%R 10.1364/OE.406095
%7 2021
%D 2021
%J Optics Express
%O Opt. Express
%V 29
%N 5
%& 7568
%P 7568 - 7588
%I Optical Society of America
%C Washington, DC
%@ false
Hladký, J., Seidel, H.-P., and Steinberger, M. 2021. SnakeBinning: Efficient Temporally Coherent Triangle Packing for Shading Streaming. Computer Graphics Forum (Proc. EUROGRAPHICS 2021)40, 2.
Export
BibTeX
@article{10.1111:cgf.142648,
TITLE = {{SnakeBinning}: {E}fficient Temporally Coherent Triangle Packing for Shading Streaming},
AUTHOR = {Hladk{\'y}, Jozef and Seidel, Hans-Peter and Steinberger, Markus},
LANGUAGE = {eng},
ISSN = {0167-7055},
DOI = {10.1111/cgf.142648},
PUBLISHER = {Blackwell-Wiley},
ADDRESS = {Oxford},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)},
VOLUME = {40},
NUMBER = {2},
PAGES = {475--488},
BOOKTITLE = {42nd Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2021)},
EDITOR = {Mitra, Niloy and Viola, Ivan},
}
Endnote
%0 Journal Article
%A Hladký, Jozef
%A Seidel, Hans-Peter
%A Steinberger, Markus
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T SnakeBinning: Efficient Temporally Coherent Triangle Packing
for Shading Streaming :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-7AFD-3
%R 10.1111/cgf.142648
%7 2021
%D 2021
%J Computer Graphics Forum
%O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum
%V 40
%N 2
%& 475
%P 475 - 488
%I Blackwell-Wiley
%C Oxford
%@ false
%B 42nd Annual Conference of the European Association for Computer Graphics
%O EUROGRAPHICS 2021 EG 2021
Jiang, C., Tang, C., Seidel, H.-P., Chen, R., and Wonka, P. 2021. Computational Design of Lightweight Trusses. Computer-Aided Design141.
Export
BibTeX
@article{Jiang2021,
TITLE = {Computational Design of Lightweight Trusses},
AUTHOR = {Jiang, Caigui and Tang, Chengcheng and Seidel, Hans-Peter and Chen, Renjie and Wonka, Peter},
ISSN = {0010-4485},
DOI = {10.1016/j.cad.2021.103076},
PUBLISHER = {Elsevier},
ADDRESS = {Amsterdam},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {Computer-Aided Design},
VOLUME = {141},
EID = {103076},
}
Endnote
%0 Journal Article
%A Jiang, Caigui
%A Tang, Chengcheng
%A Seidel, Hans-Peter
%A Chen, Renjie
%A Wonka, Peter
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Computational Design of Lightweight Trusses :
%U http://hdl.handle.net/21.11116/0000-0009-70C2-D
%R 10.1016/j.cad.2021.103076
%7 2021
%D 2021
%J Computer-Aided Design
%V 141
%Z sequence number: 103076
%I Elsevier
%C Amsterdam
%@ false
Jindal, A., Wolski, K., Mantiuk, R.K., and Myszkowski, K. 2021. Perceptual Model for Adaptive Local Shading and Refresh Rate. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2021)40, 6.
Export
BibTeX
@article{JindalSIGGRAPHAsia21,
TITLE = {Perceptual Model for Adaptive Local Shading and Refresh Rate},
AUTHOR = {Jindal, Akshay and Wolski, Krzysztof and Mantiuk, Rafa{\l} K. and Myszkowski, Karol},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3478513.3480514},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)},
VOLUME = {40},
NUMBER = {6},
PAGES = {1--18},
EID = {281},
BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2021},
}
Endnote
%0 Journal Article
%A Jindal, Akshay
%A Wolski, Krzysztof
%A Mantiuk, Rafał K.
%A Myszkowski, Karol
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T Perceptual Model for Adaptive Local Shading and Refresh Rate :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-9B45-B
%R 10.1145/3478513.3480514
%7 2021
%D 2021
%J ACM Transactions on Graphics
%V 40
%N 6
%& 1
%P 1 - 18
%Z sequence number: 281
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH Asia 2021
%O ACM SIGGRAPH Asia 2021 SA '21 SA 2021
Lagunas, M., Serrano, A., Gutierrez, D., and Masia, B. 2021. The Joint Role of Geometry and Illumination on Material Recognition. Journal of Vision21, 2.
Export
BibTeX
@article{Lagunas2021_MatRecog,
TITLE = {The Joint Role of Geometry and Illumination on Material Recognition},
AUTHOR = {Lagunas, Manuel and Serrano, Ana and Gutierrez, Diego and Masia, Belen},
LANGUAGE = {eng},
ISSN = {1534-7362},
DOI = {10.1167/jov.21.2.2},
PUBLISHER = {Scholar One, Inc.},
ADDRESS = {Charlottesville, VA},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {Journal of Vision},
VOLUME = {21},
NUMBER = {2},
PAGES = {1--18},
}
Endnote
%0 Journal Article
%A Lagunas, Manuel
%A Serrano, Ana
%A Gutierrez, Diego
%A Masia, Belen
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T The Joint Role of Geometry and Illumination on Material Recognition :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-EAF9-9
%R 10.1167/jov.21.2.2
%7 2021
%D 2021
%J Journal of Vision
%V 21
%N 2
%& 1
%P 1 - 18
%I Scholar One, Inc.
%C Charlottesville, VA
%@ false
Leimkühler, T. and Drettakis, G. 2021. FreeStyleGAN: Free-view Editable Portrait Rendering with the Camera Manifold. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2021)40, 6.
Export
BibTeX
@article{Leimkuehler_SIGGRAPHAsia21,
TITLE = {{FreeStyleGAN}: {F}ree-view editable portrait rendering with the camera manifold},
AUTHOR = {Leimk{\"u}hler, Thomas and Drettakis, George},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3478513.3480538},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)},
VOLUME = {40},
NUMBER = {6},
PAGES = {1--15},
EID = {224},
BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2021},
}
Endnote
%0 Journal Article
%A Leimkühler, Thomas
%A Drettakis, George
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T FreeStyleGAN: Free-view Editable Portrait Rendering with the Camera Manifold :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-AFF0-0
%R 10.1145/3478513.3480538
%7 2021
%D 2021
%J ACM Transactions on Graphics
%V 40
%N 6
%& 1
%P 1 - 15
%Z sequence number: 224
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH Asia 2021
%O ACM SIGGRAPH Asia 2021 SA '21 SA 2021
Liu, L., Xu, W., Habermann, M., et al. 2021. Neural Human Video Rendering by Learning Dynamic Textures and Rendering-to-Video Translation. IEEE Transactions on Visualization and Computer Graphics27, 10.
Export
BibTeX
@article{liu2020NeuralHumanRendering,
TITLE = {Neural Human Video Rendering by Learning Dynamic Textures and Rendering-to-Video Translation},
AUTHOR = {Liu, Lingjie and Xu, Weipeng and Habermann, Marc and Zollh{\"o}fer, Michael and Bernard, Florian and Kim, Hyeongwoo and Wang, Wenping and Theobalt, Christian},
LANGUAGE = {eng},
ISSN = {1077-2626},
DOI = {10.1109/TVCG.2020.2996594},
PUBLISHER = {IEEE},
ADDRESS = {Piscataway, NJ},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
JOURNAL = {IEEE Transactions on Visualization and Computer Graphics},
VOLUME = {27},
NUMBER = {10},
PAGES = {4009--4022},
}
Endnote
%0 Journal Article
%A Liu, Lingjie
%A Xu, Weipeng
%A Habermann, Marc
%A Zollhöfer, Michael
%A Bernard, Florian
%A Kim, Hyeongwoo
%A Wang, Wenping
%A Theobalt, Christian
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T Neural Human Video Rendering by Learning Dynamic Textures and Rendering-to-Video Translation :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-0369-F
%R 10.1109/TVCG.2020.2996594
%7 2020
%D 2021
%J IEEE Transactions on Visualization and Computer Graphics
%V 27
%N 10
%& 4009
%P 4009 - 4022
%I IEEE
%C Piscataway, NJ
%@ false
Mallikarjun B R, Tewari, A., Dib, A., et al. 2021a. PhotoApp: Photorealistic Appearance Editing of Head Portraits. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{MallikarjunBR2021,
TITLE = {{PhotoApp}: {P}hotorealistic Appearance Editing of Head Portraits},
AUTHOR = {Mallikarjun B R and Tewari, Ayush and Dib, Abdallah and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Chevallier, Louis and Elgharib, Mohamed and Theobalt, Christian},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3450626.3459765},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)},
VOLUME = {40},
NUMBER = {4},
PAGES = {1--16},
EID = {44},
BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021},
}
Endnote
%0 Journal Article
%A Mallikarjun B R,
%A Tewari, Ayush
%A Dib, Abdallah
%A Weyrich, Tim
%A Bickel, Bernd
%A Seidel, Hans-Peter
%A Pfister, Hanspeter
%A Matusik, Wojciech
%A Chevallier, Louis
%A Elgharib, Mohamed
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T PhotoApp: Photorealistic Appearance Editing of Head Portraits :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-2A9B-A
%R 10.1145/3450626.3459765
%7 2021
%D 2021
%J ACM Transactions on Graphics
%V 40
%N 4
%& 1
%P 1 - 16
%Z sequence number: 44
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH 2021
%O ACM SIGGRAPH 2021
Masia, B., Camon, J., Gutierrez,, D., and Serrano, A. 2021. Influence of Directional Sound Cues on Users’ Exploration Across 360° Movie Cuts. IEEE Computer Graphics and Applications41, 4.
Export
BibTeX
@article{Masia2021,
TITLE = {Influence of Directional Sound Cues on Users' Exploration Across 360{\textdegree} Movie Cuts},
AUTHOR = {Masia, Belen and Camon, Javier and Gutierrez,, Diego and Serrano, Ana},
LANGUAGE = {eng},
ISSN = {0272-1716},
DOI = {10.1109/MCG.2021.3064688},
PUBLISHER = {IEEE},
ADDRESS = {Piscataway, NJ},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
JOURNAL = {IEEE Computer Graphics and Applications},
VOLUME = {41},
NUMBER = {4},
PAGES = {64--75},
}
Endnote
%0 Journal Article
%A Masia, Belen
%A Camon, Javier
%A Gutierrez,, Diego
%A Serrano, Ana
%+ External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T Influence of Directional Sound Cues on Users' Exploration Across 360° Movie Cuts :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-F077-3
%R 10.1109/MCG.2021.3064688
%7 2021
%D 2021
%J IEEE Computer Graphics and Applications
%V 41
%N 4
%& 64
%P 64 - 75
%I IEEE
%C Piscataway, NJ
%@ false
Meka, A., Shafiei, M., Zollhöfer, M., Richardt, C., and Theobalt, C. 2021. Real-time Global Illumination Decomposition of Videos. ACM Transactions on Graphics40, 3.
Export
BibTeX
@article{Meka:2021,
TITLE = {Real-time Global Illumination Decomposition of Videos},
AUTHOR = {Meka, Abhimitra and Shafiei, Mohammad and Zollh{\"o}fer, Michael and Richardt, Christian and Theobalt, Christian},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3374753},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics},
VOLUME = {40},
NUMBER = {3},
PAGES = {1--16},
EID = {22},
}
Endnote
%0 Journal Article
%A Meka, Abhimitra
%A Shafiei, Mohammad
%A Zollhöfer, Michael
%A Richardt, Christian
%A Theobalt, Christian
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T Real-time Global Illumination Decomposition of Videos :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-EE07-6
%R 10.1145/3374753
%7 2021
%D 2021
%J ACM Transactions on Graphics
%V 40
%N 3
%& 1
%P 1 - 16
%Z sequence number: 22
%I ACM
%C New York, NY
%@ false
%U http://gvv.mpi-inf.mpg.de/projects/LiveIlluminationDecomposition/
Rittig, T., Sumin, D., Babaei, V., et al. 2021. Neural Acceleration of Scattering-Aware Color 3D Printing. Computer Graphics Forum (Proc. EUROGRAPHICS 2021)40, 2.
Export
BibTeX
@article{rittig2021neural,
TITLE = {Neural Acceleration of Scattering-Aware Color {3D} Printing},
AUTHOR = {Rittig, Tobias and Sumin, Denis and Babaei, Vahid and Didyk, Piotr and Voloboy, Alexei and Wilkie, Alexander and Bickel, Bernd and Myszkowski, Karol and Weyrich, Tim and Krivanek, Jaroslav},
LANGUAGE = {eng},
ISSN = {0167-7055},
DOI = {10.1111/cgf.142626},
PUBLISHER = {Blackwell-Wiley},
ADDRESS = {Oxford},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)},
EDITOR = {Mitra, Niloy and Violoa, Ivan},
VOLUME = {40},
NUMBER = {2},
PAGES = {205--219},
BOOKTITLE = {42nd Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2021)},
}
Endnote
%0 Journal Article
%A Rittig, Tobias
%A Sumin, Denis
%A Babaei, Vahid
%A Didyk, Piotr
%A Voloboy, Alexei
%A Wilkie, Alexander
%A Bickel, Bernd
%A Myszkowski, Karol
%A Weyrich, Tim
%A Krivanek, Jaroslav
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Neural Acceleration of Scattering-Aware Color 3D Printing :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-F073-8
%R 10.1111/cgf.142626
%7 2021
%D 2021
%J Computer Graphics Forum
%O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum
%V 40
%N 2
%& 205
%P 205 - 219
%I Blackwell-Wiley
%C Oxford
%@ false
%B 42nd Annual Conference of the European Association for Computer Graphics
%O EUROGRAPHICS 2021 EG 2021
Ruan, L., Chen, B., Li, J., and Lam, M.-L. 2021. AIFNet: All-in-Focus Image Restoration Network Using a Light Field-Based Dataset. IEEE Transactions on Computational Imaging7.
Export
BibTeX
@article{Ruan2021,
TITLE = {{AIFNet}: {A}ll-in-Focus Image Restoration Network Using a Light Field-Based Dataset},
AUTHOR = {Ruan, Lingyan and Chen, Bin and Li, Jizhou and Lam, Miu-Ling},
LANGUAGE = {eng},
ISSN = {2573-0436},
DOI = {10.1109/TCI.2021.3092891},
PUBLISHER = {IEEE},
ADDRESS = {Piscataway, NJ},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
JOURNAL = {IEEE Transactions on Computational Imaging},
VOLUME = {7},
PAGES = {675--688},
}
Endnote
%0 Journal Article
%A Ruan, Lingyan
%A Chen, Bin
%A Li, Jizhou
%A Lam, Miu-Ling
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T AIFNet: All-in-Focus Image Restoration Network Using a Light Field-Based Dataset :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-4795-F
%R 10.1109/TCI.2021.3092891
%7 2021
%D 2021
%J IEEE Transactions on Computational Imaging
%V 7
%& 675
%P 675 - 688
%I IEEE
%C Piscataway, NJ
%@ false
Serrano, A., Chen, B., Wang, C., et al. 2021. The Effect of Shape and Illumination on Material Perception. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{SIGG2021_Materials,
TITLE = {The Effect of Shape and Illumination on Material Perception},
AUTHOR = {Serrano, Ana and Chen, Bin and Wang, Chao and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3450626.3459813},
PUBLISHER = {Association for Computing Machinery},
ADDRESS = {New York, NY},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)},
VOLUME = {40},
NUMBER = {4},
PAGES = {1--16},
EID = {125},
BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021},
}
Endnote
%0 Journal Article
%A Serrano, Ana
%A Chen, Bin
%A Wang, Chao
%A Piovarči, Michal
%A Seidel, Hans-Peter
%A Didyk, Piotr
%A Myszkowski, Karol
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T The Effect of Shape and Illumination on Material Perception : Model and Applications
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-0565-0
%R 10.1145/3450626.3459813
%7 2021
%D 2021
%J ACM Transactions on Graphics
%V 40
%N 4
%& 1
%P 1 - 16
%Z sequence number: 125
%I Association for Computing Machinery
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH 2021
%O ACM SIGGRAPH 2021
Van Onzenoodt, C., Singh, G., Ropinski, T., and Ritschel, T. 2021a. Blue Noise Plots. Computer Graphics Forum (Proc. EUROGRAPHICS 2021)40, 2.
Export
BibTeX
@article{onzenoodt2021blue,
TITLE = {Blue Noise Plots},
AUTHOR = {van Onzenoodt, Christian and Singh, Gurprit and Ropinski, Timo and Ritschel, Tobias},
LANGUAGE = {eng},
ISSN = {0167-7055},
DOI = {10.1111/cgf.142644},
PUBLISHER = {Blackwell-Wiley},
ADDRESS = {Oxford},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)},
VOLUME = {40},
NUMBER = {2},
PAGES = {425--433},
BOOKTITLE = {42nd Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2021)},
EDITOR = {Mitra, Niloy and Viola, Ivan},
}
Endnote
%0 Journal Article
%A van Onzenoodt, Christian
%A Singh, Gurprit
%A Ropinski, Timo
%A Ritschel, Tobias
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Blue Noise Plots :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-010F-7
%R 10.1111/cgf.142644
%7 2021
%D 2021
%J Computer Graphics Forum
%O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum
%V 40
%N 2
%& 425
%P 425 - 433
%I Blackwell-Wiley
%C Oxford
%@ false
%B 42nd Annual Conference of the European Association for Computer Graphics
%O EUROGRAPHICS 2021 EG 2021
Conference Paper
Ali, J., Lahoti, P., and Gummadi, K.P. 2021. Accounting for Model Uncertainty in Algorithmic Discrimination. AIES ’21, Fourth AAAI/ACM Conference on Artificial Intelligence, Ethics and Society, ACM.
Export
BibTeX
@inproceedings{Ali_AIES2021,
TITLE = {Accounting for Model Uncertainty in Algorithmic Discrimination},
AUTHOR = {Ali, Junaid and Lahoti, Preethi and Gummadi, Krishna P.},
LANGUAGE = {eng},
ISBN = {978-1-4503-8473-5},
DOI = {10.1145/3461702.3462630},
PUBLISHER = {ACM},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {AIES '21, Fourth AAAI/ACM Conference on Artificial Intelligence, Ethics and Society},
EDITOR = {Fourcade, Marion and Kuipers, Benjamin and Lazar, Seth and Mulligan, Deirdre},
PAGES = {336--345},
ADDRESS = {Virtual Conference},
}
Endnote
%0 Conference Proceedings
%A Ali, Junaid
%A Lahoti, Preethi
%A Gummadi, Krishna P.
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Databases and Information Systems, MPI for Informatics, Max Planck Society
External Organizations
%T Accounting for Model Uncertainty in Algorithmic Discrimination :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-72E3-7
%R 10.1145/3461702.3462630
%D 2021
%B Fourth AAAI/ACM Conference on Artificial Intelligence, Ethics and Society
%Z date of event: 2021-05-19 - 2021-05-21
%C Virtual Conference
%B AIES '21
%E Fourcade, Marion; Kuipers, Benjamin; Lazar, Seth; Mulligan, Deirdre
%P 336 - 345
%I ACM
%@ 978-1-4503-8473-5
Doosti, N., Panetta, J., and Babaei, V. 2021. Topology Optimization via Frequency Tuning of Neural Design Representations. Proceedings SCF 2021, ACM.
Export
BibTeX
@inproceedings{Doosti21,
TITLE = {Topology Optimization via Frequency Tuning of Neural Design Representations},
AUTHOR = {Doosti, Nikan and Panetta, Julian and Babaei, Vahid},
LANGUAGE = {eng},
ISBN = {978-1-4503-9090-3},
DOI = {10.1145/3485114.3485124},
PUBLISHER = {ACM},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings SCF 2021},
EDITOR = {Whiting, Emily and Hart, John and Sung, Cynthia and McCann, James and Peek, Nadya},
PAGES = {1--9},
EID = {1},
ADDRESS = {Virtual Event},
}
Endnote
%0 Conference Proceedings
%A Doosti, Nikan
%A Panetta, Julian
%A Babaei, Vahid
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T Topology Optimization via Frequency Tuning of Neural Design Representations :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-C159-6
%R 10.1145/3485114.3485124
%D 2021
%B ACM Symposium on Computational Fabrication
%Z date of event: 2021-10-28 - 2021-10-29
%C Virtual Event
%B Proceedings SCF 2021
%E Whiting, Emily; Hart, John; Sung, Cynthia; McCann, James; Peek, Nadya
%P 1 - 9
%Z sequence number: 1
%I ACM
%@ 978-1-4503-9090-3
Fox, G., Liu, W., Kim, H., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2021. VideoForensicsHQ: Detecting High-quality Manipulated Face Videos. IEEE International Conference on Multimedia and Expo (ICME 2021), IEEE.
Export
BibTeX
@inproceedings{Fox_ICME2021,
TITLE = {{Video\-Foren\-sics\-HQ}: {D}etecting High-quality Manipulated Face Videos},
AUTHOR = {Fox, Gereon and Liu, Wentao and Kim, Hyeongwoo and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {978-1-6654-3864-3},
DOI = {10.1109/ICME51207.2021.9428101},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {IEEE International Conference on Multimedia and Expo (ICME 2021)},
PAGES = {1--6},
ADDRESS = {Shenzhen, China (Virtual)},
}
Endnote
%0 Conference Proceedings
%A Fox, Gereon
%A Liu, Wentao
%A Kim, Hyeongwoo
%A Seidel, Hans-Peter
%A Elgharib, Mohamed
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T VideoForensicsHQ: Detecting High-quality Manipulated Face Videos :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-88DF-4
%R 10.1109/ICME51207.2021.9428101
%D 2021
%B 22nd IEEE International Conference on Multimedia and Expo
%Z date of event: 2021-07-05 - 2021-07-07
%C Shenzhen, China (Virtual)
%B IEEE International Conference on Multimedia and Expo
%P 1 - 6
%I IEEE
%@ 978-1-6654-3864-3
%U http://gvv.mpi-inf.mpg.de/projects/VForensicsHQ/
Habibie, I., Xu, W., Mehta, D., et al. 2021a. Learning Speech-driven 3D Conversational Gestures from Video. Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents (IVA 2021), ACM.
Export
BibTeX
@inproceedings{Habibie_IVA2021,
TITLE = {Learning Speech-driven {3D} Conversational Gestures from Video},
AUTHOR = {Habibie, Ikhsanul and Xu, Weipeng and Mehta, Dushyant and Liu, Lingjie and Seidel, Hans-Peter and Pons-Moll, Gerard and Elgharib, Mohamed and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {9781450386197},
DOI = {10.1145/3472306.3478335},
PUBLISHER = {ACM},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents (IVA 2021)},
PAGES = {101--108},
ADDRESS = {Virtual Event, Japan},
}
Endnote
%0 Conference Proceedings
%A Habibie, Ikhsanul
%A Xu, Weipeng
%A Mehta, Dushyant
%A Liu, Lingjie
%A Seidel, Hans-Peter
%A Pons-Moll, Gerard
%A Elgharib, Mohamed
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T Learning Speech-driven 3D Conversational Gestures from Video :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-4D19-6
%R 10.1145/3472306.3478335
%D 2021
%B 21st ACM International Conference on Intelligent Virtual
Agents
%Z date of event: 2021-09-14 - 2021-09-17
%C Virtual Event, Japan
%B Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents
%P 101 - 108
%I ACM
%@ 9781450386197
Kappel, M., Golyanik, V., Elgharib, M., et al. 2021. High-Fidelity Neural Human Motion Transfer from Monocular Video Computer Vision and Pattern Recognition. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Kappel_CVPR2021,
TITLE = {High-Fidelity Neural Human Motion Transfer from Monocular Video Computer Vision and Pattern Recognition},
AUTHOR = {Kappel, Moritz and Golyanik, Vladislav and Elgharib, Mohamed and Henningson, Jann-Ole and Seidel, Hans-Peter and Castillo, Susana and Theobalt, Christian and Magnor, Marcus A.},
LANGUAGE = {eng},
ISBN = {978-1-6654-4509-2},
DOI = {10.1109/CVPR46437.2021.00159},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)},
PAGES = {1541--1550},
ADDRESS = {Virtual Conference},
}
Endnote
%0 Conference Proceedings
%A Kappel, Moritz
%A Golyanik, Vladislav
%A Elgharib, Mohamed
%A Henningson, Jann-Ole
%A Seidel, Hans-Peter
%A Castillo, Susana
%A Theobalt, Christian
%A Magnor, Marcus A.
%+ External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
%T High-Fidelity Neural Human Motion Transfer from Monocular Video
Computer Vision and Pattern Recognition :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-8947-E
%R 10.1109/CVPR46437.2021.00159
%D 2021
%B 34th IEEE Conference on Computer Vision and Pattern Recognition
%Z date of event: 2021-06-19 - 2021-06-25
%C Virtual Conference
%B IEEE/CVF Conference on Computer Vision and Pattern Recognition
%P 1541 - 1550
%I IEEE
%@ 978-1-6654-4509-2
%U https://gvv.mpi-inf.mpg.de/projects/NHMT/
Knibbe, J., Freire, R., Koelle, M., and Strohmeier, P. 2021. Skill-Sleeves: Designing Electrode Garments for Wearability. TEI ’21, Fifteenth International Conference on Tangible, Embedded, and Embodied Interaction, ACM.
Export
BibTeX
@inproceedings{Knibbe_TEI21,
TITLE = {Skill-Sleeves: {D}esigning Electrode Garments for Wearability},
AUTHOR = {Knibbe, Jarrod and Freire, Rachel and Koelle, Marion and Strohmeier, Paul},
LANGUAGE = {eng},
ISBN = {978-1-4503-8213-7},
DOI = {10.1145/3430524.3440652},
PUBLISHER = {ACM},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {TEI '21, Fifteenth International Conference on Tangible, Embedded, and Embodied Interaction},
PAGES = {1--16},
EID = {33},
ADDRESS = {Salzburg, Austria},
}
Endnote
%0 Conference Proceedings
%A Knibbe, Jarrod
%A Freire, Rachel
%A Koelle, Marion
%A Strohmeier, Paul
%+ External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Skill-Sleeves: Designing Electrode Garments for Wearability :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-BDF0-0
%R 10.1145/3430524.3440652
%D 2021
%B Fifteenth International Conference on Tangible, Embedded, and Embodied Interaction
%Z date of event: 2021-02-14 - 2021-02-17
%C Salzburg, Austria
%B TEI '21
%P 1 - 16
%Z sequence number: 33
%I ACM
%@ 978-1-4503-8213-7
Li, Y., Habermann, M., Thomaszewski,, B., Coros, S., Beeler, T., and Theobalt, C. 2021. Deep Physics-aware Inference of Cloth Deformation for Monocular Human Performance Capture. 2021 International Conference on 3D Vision, IEEE.
Export
BibTeX
@inproceedings{Li_3DV21,
TITLE = {Deep Physics-aware Inference of Cloth Deformation for Monocular Human Performance Capture},
AUTHOR = {Li, Yue and Habermann, Marc and Thomaszewski,, Bernhard and Coros, Stelian and Beeler, Thabo and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {978-1-6654-2688-6},
DOI = {10.1109/3DV53792.2021.00047},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
BOOKTITLE = {2021 International Conference on 3D Vision},
PAGES = {373--384},
ADDRESS = {Virtual Conference},
}
Endnote
%0 Conference Proceedings
%A Li, Yue
%A Habermann, Marc
%A Thomaszewski,, Bernhard
%A Coros, Stelian
%A Beeler, Thabo
%A Theobalt, Christian
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T Deep Physics-aware Inference of Cloth Deformation for Monocular Human Performance Capture :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-E9D0-7
%R 10.1109/3DV53792.2021.00047
%D 2021
%B International Conference on 3D Vision
%Z date of event: 2021-12-01 - 2021-12-03
%C Virtual Conference
%B 2021 International Conference on 3D Vision
%P 373 - 384
%I IEEE
%@ 978-1-6654-2688-6
Mallikarjun B R, Tewari, A., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2021b. Learning Complete 3D Morphable Face Models from Images and Videos. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Mallikarjun_CVPR2021b,
TITLE = {Learning Complete {3D} Morphable Face Models from Images and Videos},
AUTHOR = {Mallikarjun B R and Tewari, Ayush and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {978-1-6654-4509-2},
DOI = {10.1109/CVPR46437.2021.00337},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)},
PAGES = {3360--3370},
ADDRESS = {Virtual Conference},
}
Endnote
%0 Conference Proceedings
%A Mallikarjun B R,
%A Tewari, Ayush
%A Seidel, Hans-Peter
%A Elgharib, Mohamed
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T Learning Complete 3D Morphable Face Models from Images and Videos :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-8926-3
%R 10.1109/CVPR46437.2021.00337
%D 2021
%B 34th IEEE Conference on Computer Vision and Pattern Recognition
%Z date of event: 2021-06-19 - 2021-06-25
%C Virtual Conference
%B IEEE/CVF Conference on Computer Vision and Pattern Recognition
%P 3360 - 3370
%I IEEE
%@ 978-1-6654-4509-2
%U https://gvv.mpi-inf.mpg.de/projects/LeMoMo/
Mallikarjun B R, Tewari, A., Oh, T.-H., et al. 2021c. Monocular Reconstruction of Neural Face Reflectance Fields. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Mallikarjun_CVPR2021,
TITLE = {Monocular Reconstruction of Neural Face Reflectance Fields},
AUTHOR = {Mallikarjun B R and Tewari, Ayush and Oh, Tae-Hyun and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {978-1-6654-4509-2},
DOI = {10.1109/CVPR46437.2021.00476},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)},
PAGES = {4789--4798},
ADDRESS = {Virtual Conference},
}
Endnote
%0 Conference Proceedings
%A Mallikarjun B R,
%A Tewari, Ayush
%A Oh, Tae-Hyun
%A Weyrich, Tim
%A Bickel, Bernd
%A Seidel, Hans-Peter
%A Pfister, Hanspeter
%A Matusik, Wojciech
%A Elgharib, Mohamed
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T Monocular Reconstruction of Neural Face Reflectance Fields :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-88FB-4
%R 10.1109/CVPR46437.2021.00476
%D 2021
%B 34th IEEE Conference on Computer Vision and Pattern Recognition
%Z date of event: 2021-06-19 - 2021-06-25
%C Virtual Conference
%B IEEE/CVF Conference on Computer Vision and Pattern Recognition
%P 4789 - 4798
%I IEEE
%@ 978-1-6654-4509-2
%U https://gvv.mpi-inf.mpg.de/projects/FaceReflectanceFields/
Nehvi, J., Golyanik, V., Mueller, F., Seidel, H.-P., Elgharib, M., and Theobalt, C. 2021. Differentiable Event Stream Simulator for Non-Rigid 3D Tracking. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Nehvi_CVPR2021Workshop,
TITLE = {Differentiable Event Stream Simulator for Non-Rigid {3D} Tracking},
AUTHOR = {Nehvi, Jalees and Golyanik, Vladislav and Mueller, Franziska and Seidel, Hans-Peter and Elgharib, Mohamed and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {978-1-6654-4899-4},
DOI = {10.1109/CVPRW53098.2021.00143},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR 2021)},
PAGES = {1302--1311},
ADDRESS = {Virtual Workshop},
}
Endnote
%0 Conference Proceedings
%A Nehvi, Jalees
%A Golyanik, Vladislav
%A Mueller, Franziska
%A Seidel, Hans-Peter
%A Elgharib, Mohamed
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T Differentiable Event Stream Simulator for Non-Rigid 3D Tracking :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-8957-C
%R 10.1109/CVPRW53098.2021.00143
%D 2021
%B Third International Workshop on Event-Based Vision
%Z date of event: 2021-06-19 - 2021-06-19
%C Virtual Workshop
%B Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops
%P 1302 - 1311
%I IEEE
%@ 978-1-6654-4899-4
%U https://gvv.mpi-inf.mpg.de/projects/Event-based_Non-rigid_3D_Tracking/
Rao, S., Stutz, D., and Schiele, B. 2021. Adversarial Training Against Location-Optimized Adversarial Patches. Computer Vision -- ECCV Workshops 2020, Springer.
Export
BibTeX
@inproceedings{DBLP:conf/eccv/RaoSS20,
TITLE = {Adversarial Training Against Location-Optimized Adversarial Patches},
AUTHOR = {Rao, Sukrut and Stutz, David and Schiele, Bernt},
LANGUAGE = {eng},
ISBN = {978-3-030-68237-8},
DOI = {10.1007/978-3-030-68238-5_32},
PUBLISHER = {Springer},
YEAR = {2020},
MARGINALMARK = {$\bullet$},
DATE = {2021},
BOOKTITLE = {Computer Vision -- ECCV Workshops 2020},
EDITOR = {Bartoli, Adrian and Fusiello, Andrea},
PAGES = {429--448},
SERIES = {Lecture Notes in Computer Science},
VOLUME = {12539},
ADDRESS = {Glasgow, UK},
}
Endnote
%0 Conference Proceedings
%A Rao, Sukrut
%A Stutz, David
%A Schiele, Bernt
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
%T Adversarial Training Against Location-Optimized Adversarial Patches :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-1662-1
%R 10.1007/978-3-030-68238-5_32
%D 2021
%B 16th European Conference on Computer Vision
%Z date of event: 2020-08-23 - 2020-08-28
%C Glasgow, UK
%B Computer Vision -- ECCV Workshops 2020
%E Bartoli, Adrian; Fusiello, Andrea
%P 429 - 448
%I Springer
%@ 978-3-030-68237-8
%B Lecture Notes in Computer Science
%N 12539
Rudnev, V., Golyanik, V., Wang, J., et al. 2021. EventHands: Real-Time Neural 3D Hand Pose Estimation from an Event Stream. ICCV 2021, IEEE.
Export
BibTeX
@inproceedings{Rudnev_2021_ICCV,
TITLE = {{EventHands}: {R}eal-Time Neural {3D} Hand Pose Estimation from an Event Stream},
AUTHOR = {Rudnev, Viktor and Golyanik, Vladislav and Wang, Jiayi and Seidel, Hans-Peter and Mueller, Franziska and Elgharib, Mohamed and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {978-1-6654-2812-5},
DOI = {10.1109/ICCV48922.2021.01216},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {ICCV 2021},
PAGES = {12365--12375},
ADDRESS = {Virtual Event},
}
Endnote
%0 Conference Proceedings
%A Rudnev, Viktor
%A Golyanik, Vladislav
%A Wang, Jiayi
%A Seidel, Hans-Peter
%A Mueller, Franziska
%A Elgharib, Mohamed
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T EventHands: Real-Time Neural 3D Hand Pose Estimation from an Event Stream :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-B709-1
%R 10.1109/ICCV48922.2021.01216
%D 2021
%B IEEE/CVF International Conference on Computer Vision
%Z date of event: 2021-10-10 - 2021-10-17
%C Virtual Event
%B ICCV 2021
%P 12365 - 12375
%I IEEE
%@ 978-1-6654-2812-5
Yenamandra, T., Tewari, A., Bernard, F., et al. 2021. i3DMM: Deep Implicit 3D Morphable Model of Human Heads. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021), IEEE.
Export
BibTeX
@inproceedings{Yenamandra_CVPR2021,
TITLE = {{i3DMM}: {D}eep Implicit {3D} Morphable Model of Human Heads},
AUTHOR = {Yenamandra, Tarun and Tewari, Ayush and Bernard, Florian and Seidel, Hans-Peter and Elgharib, Mohamed and Cremers, Daniel and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {978-1-6654-4509-2},
DOI = {10.1109/CVPR46437.2021.01261},
PUBLISHER = {IEEE},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2021)},
PAGES = {12798--12808},
ADDRESS = {Virtual Conference},
}
Endnote
%0 Conference Proceedings
%A Yenamandra, Tarun
%A Tewari, Ayush
%A Bernard, Florian
%A Seidel, Hans-Peter
%A Elgharib, Mohamed
%A Cremers, Daniel
%A Theobalt, Christian
%+ External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T i3DMM: Deep Implicit 3D Morphable Model of Human Heads :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-8966-B
%R 10.1109/CVPR46437.2021.01261
%D 2021
%B 34th IEEE Conference on Computer Vision and Pattern Recognition
%Z date of event: 2021-06-19 - 2021-06-25
%C Virtual Conference
%B IEEE/CVF Conference on Computer Vision and Pattern Recognition
%P 12798 - 12808
%I IEEE
%@ 978-1-6654-4509-2
%U https://gvv.mpi-inf.mpg.de/projects/i3DMM/
Zheng, Q., Singh, G., and Seidel, H.-P. 2021. Neural Relightable Participating Media Rendering. Advances in Neural Information Processing Systems 34 (NeurIPS 2021), Curran Associates, Inc.
Export
BibTeX
@inproceedings{Zheng_Neurips2021,
TITLE = {Neural Relightable Participating Media Rendering},
AUTHOR = {Zheng, Quan and Singh, Gurprit and Seidel, Hans-Peter},
LANGUAGE = {eng},
ISBN = {9781713845393},
PUBLISHER = {Curran Associates, Inc.},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Advances in Neural Information Processing Systems 34 (NeurIPS 2021)},
EDITOR = {Ranzato, M. and Beygelzimer, A. and Liang, P. S. and Vaughan, J. W. and Dauphin, Y.},
PAGES = {15203--15215},
ADDRESS = {Virtual},
}
Endnote
%0 Conference Proceedings
%A Zheng, Quan
%A Singh, Gurprit
%A Seidel, Hans-Peter
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Neural Relightable Participating Media Rendering :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-7117-E
%D 2021
%B 35th Conference on Neural Information Processing Systems
%Z date of event: 2021-12-06 - 2021-12-14
%C Virtual
%B Advances in Neural Information Processing Systems 34
%E Ranzato, M.; Beygelzimer, A.; Liang, P. S.; Vaughan, J. W.; Dauphin, Y.
%P 15203 - 15215
%I Curran Associates, Inc.
%@ 9781713845393
Paper
Ansari, N., Seidel, H.-P., and Babaei, V. 2021. Mixed Integer Neural Inverse Design. https://arxiv.org/abs/2109.12888.
(arXiv: 2109.12888) Abstract
In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br>
Export
BibTeX
@online{Ansari_2109.12888,
TITLE = {Mixed Integer Neural Inverse Design},
AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Babaei, Vahid},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2109.12888},
EPRINT = {2109.12888},
EPRINTTYPE = {arXiv},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
ABSTRACT = {In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br>},
}
Endnote
%0 Report
%A Ansari, Navid
%A Seidel, Hans-Peter
%A Babaei, Vahid
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Mixed Integer Neural Inverse Design :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-7104-3
%U https://arxiv.org/abs/2109.12888
%D 2021
%X In computational design and fabrication, neural networks are becoming<br>important surrogates for bulky forward simulations. A long-standing,<br>intertwined question is that of inverse design: how to compute a design that<br>satisfies a desired target performance? Here, we show that the piecewise linear<br>property, very common in everyday neural networks, allows for an inverse design<br>formulation based on mixed-integer linear programming. Our mixed-integer<br>inverse design uncovers globally optimal or near optimal solutions in a<br>principled manner. Furthermore, our method significantly facilitates emerging,<br>but challenging, combinatorial inverse design tasks, such as material<br>selection. For problems where finding the optimal solution is not desirable or<br>tractable, we develop an efficient yet near-optimal hybrid optimization.<br>Eventually, our method is able to find solutions provably robust to possible<br>fabrication perturbations among multiple designs with similar performances.<br>
%K Computer Science, Graphics, cs.GR,Computer Science, Learning, cs.LG
Habibie, I., Xu, W., Mehta, D., et al. 2021b. Learning Speech-driven 3D Conversational Gestures from Video. https://arxiv.org/abs/2102.06837.
(arXiv: 2102.06837) Abstract
We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br>
Export
BibTeX
@online{Habibie_2102.06837,
TITLE = {Learning Speech-driven {3D} Conversational Gestures from Video},
AUTHOR = {Habibie, Ikhsanul and Xu, Weipeng and Mehta, Dushyant and Liu, Lingjie and Seidel, Hans-Peter and Pons-Moll, Gerard and Elgharib, Mohamed and Theobalt, Christian},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2102.06837},
EPRINT = {2102.06837},
EPRINTTYPE = {arXiv},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br>},
}
Endnote
%0 Report
%A Habibie, Ikhsanul
%A Xu, Weipeng
%A Mehta, Dushyant
%A Liu, Lingjie
%A Seidel, Hans-Peter
%A Pons-Moll, Gerard
%A Elgharib, Mohamed
%A Theobalt, Christian
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T Learning Speech-driven 3D Conversational Gestures from Video :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-70C7-8
%U https://arxiv.org/abs/2102.06837
%D 2021
%X We propose the first approach to automatically and jointly synthesize both<br>the synchronous 3D conversational body and hand gestures, as well as 3D face<br>and head animations, of a virtual character from speech input. Our algorithm<br>uses a CNN architecture that leverages the inherent correlation between facial<br>expression and hand gestures. Synthesis of conversational body gestures is a<br>multi-modal problem since many similar gestures can plausibly accompany the<br>same input speech. To synthesize plausible body gestures in this setting, we<br>train a Generative Adversarial Network (GAN) based model that measures the<br>plausibility of the generated sequences of 3D body motion when paired with the<br>input audio features. We also contribute a new way to create a large corpus of<br>more than 33 hours of annotated body, hand, and face data from in-the-wild<br>videos of talking people. To this end, we apply state-of-the-art monocular<br>approaches for 3D body and hand pose estimation as well as dense 3D face<br>performance capture to the video corpus. In this way, we can train on orders of<br>magnitude more data than previous algorithms that resort to complex in-studio<br>motion capture solutions, and thereby train more expressive synthesis<br>algorithms. Our experiments and user study show the state-of-the-art quality of<br>our speech-synthesized full 3D character animations.<br>
%K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Martin, D., Malpica, S., Gutierrez, D., Masia, B., and Serrano, A. 2021. Multimodality in VR: A Survey. https://arxiv.org/abs/2101.07906.
(arXiv: 2101.07906) Abstract
Virtual reality has the potential to change the way we create and consume<br>content in our everyday life. Entertainment, training, design and<br>manufacturing, communication, or advertising are all applications that already<br>benefit from this new medium reaching consumer level. VR is inherently<br>different from traditional media: it offers a more immersive experience, and<br>has the ability to elicit a sense of presence through the place and<br>plausibility illusions. It also gives the user unprecedented capabilities to<br>explore their environment, in contrast with traditional media. In VR, like in<br>the real world, users integrate the multimodal sensory information they receive<br>to create a unified perception of the virtual world. Therefore, the sensory<br>cues that are available in a virtual environment can be leveraged to enhance<br>the final experience. This may include increasing realism, or the sense of<br>presence; predicting or guiding the attention of the user through the<br>experience; or increasing their performance if the experience involves the<br>completion of certain tasks. In this state-of-the-art report, we survey the<br>body of work addressing multimodality in virtual reality, its role and benefits<br>in the final user experience. The works here reviewed thus encompass several<br>fields of research, including computer graphics, human computer interaction, or<br>psychology and perception. Additionally, we give an overview of different<br>applications that leverage multimodal input in areas such as medicine, training<br>and education, or entertainment; we include works in which the integration of<br>multiple sensory information yields significant improvements, demonstrating how<br>multimodality can play a fundamental role in the way VR systems are designed,<br>and VR experiences created and consumed.<br>
Export
BibTeX
@online{Martin2021_VRsurvey,
TITLE = {Multimodality in {VR}: {A} Survey},
AUTHOR = {Martin, Daniel and Malpica, Sandra and Gutierrez, Diego and Masia, Belen and Serrano, Ana},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2101.07906},
EPRINT = {2101.07906},
EPRINTTYPE = {arXiv},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Virtual reality has the potential to change the way we create and consume<br>content in our everyday life. Entertainment, training, design and<br>manufacturing, communication, or advertising are all applications that already<br>benefit from this new medium reaching consumer level. VR is inherently<br>different from traditional media: it offers a more immersive experience, and<br>has the ability to elicit a sense of presence through the place and<br>plausibility illusions. It also gives the user unprecedented capabilities to<br>explore their environment, in contrast with traditional media. In VR, like in<br>the real world, users integrate the multimodal sensory information they receive<br>to create a unified perception of the virtual world. Therefore, the sensory<br>cues that are available in a virtual environment can be leveraged to enhance<br>the final experience. This may include increasing realism, or the sense of<br>presence; predicting or guiding the attention of the user through the<br>experience; or increasing their performance if the experience involves the<br>completion of certain tasks. In this state-of-the-art report, we survey the<br>body of work addressing multimodality in virtual reality, its role and benefits<br>in the final user experience. The works here reviewed thus encompass several<br>fields of research, including computer graphics, human computer interaction, or<br>psychology and perception. Additionally, we give an overview of different<br>applications that leverage multimodal input in areas such as medicine, training<br>and education, or entertainment; we include works in which the integration of<br>multiple sensory information yields significant improvements, demonstrating how<br>multimodality can play a fundamental role in the way VR systems are designed,<br>and VR experiences created and consumed.<br>},
}
Endnote
%0 Report
%A Martin, Daniel
%A Malpica, Sandra
%A Gutierrez, Diego
%A Masia, Belen
%A Serrano, Ana
%+ External Organizations
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T Multimodality in VR: A Survey :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-EB00-0
%U https://arxiv.org/abs/2101.07906
%D 2021
%X Virtual reality has the potential to change the way we create and consume<br>content in our everyday life. Entertainment, training, design and<br>manufacturing, communication, or advertising are all applications that already<br>benefit from this new medium reaching consumer level. VR is inherently<br>different from traditional media: it offers a more immersive experience, and<br>has the ability to elicit a sense of presence through the place and<br>plausibility illusions. It also gives the user unprecedented capabilities to<br>explore their environment, in contrast with traditional media. In VR, like in<br>the real world, users integrate the multimodal sensory information they receive<br>to create a unified perception of the virtual world. Therefore, the sensory<br>cues that are available in a virtual environment can be leveraged to enhance<br>the final experience. This may include increasing realism, or the sense of<br>presence; predicting or guiding the attention of the user through the<br>experience; or increasing their performance if the experience involves the<br>completion of certain tasks. In this state-of-the-art report, we survey the<br>body of work addressing multimodality in virtual reality, its role and benefits<br>in the final user experience. The works here reviewed thus encompass several<br>fields of research, including computer graphics, human computer interaction, or<br>psychology and perception. Additionally, we give an overview of different<br>applications that leverage multimodal input in areas such as medicine, training<br>and education, or entertainment; we include works in which the integration of<br>multiple sensory information yields significant improvements, demonstrating how<br>multimodality can play a fundamental role in the way VR systems are designed,<br>and VR experiences created and consumed.<br>
%K Computer Science, Human-Computer Interaction, cs.HC,Computer Science, Graphics, cs.GR
Sarkar, K., Mehta, D., Xu, W., Golyanik, V., and Theobalt, C. 2021. Neural Re-Rendering of Humans from a Single Image. https://arxiv.org/abs/2101.04104.
(arXiv: 2101.04104) Abstract
Human re-rendering from a single image is a starkly under-constrained<br>problem, and state-of-the-art algorithms often exhibit undesired artefacts,<br>such as over-smoothing, unrealistic distortions of the body parts and garments,<br>or implausible changes of the texture. To address these challenges, we propose<br>a new method for neural re-rendering of a human under a novel user-defined pose<br>and viewpoint, given one input image. Our algorithm represents body pose and<br>shape as a parametric mesh which can be reconstructed from a single image and<br>easily reposed. Instead of a colour-based UV texture map, our approach further<br>employs a learned high-dimensional UV feature map to encode appearance. This<br>rich implicit representation captures detailed appearance variation across<br>poses, viewpoints, person identities and clothing styles better than learned<br>colour texture maps. The body model with the rendered feature maps is fed<br>through a neural image-translation network that creates the final rendered<br>colour image. The above components are combined in an end-to-end-trained neural<br>network architecture that takes as input a source person image, and images of<br>the parametric body model in the source pose and desired target pose.<br>Experimental evaluation demonstrates that our approach produces higher quality<br>single image re-rendering results than existing methods.<br>
Export
BibTeX
@online{Sarkar_arXiv2101.04104,
TITLE = {Neural Re-Rendering of Humans from a Single Image},
AUTHOR = {Sarkar, Kripasindhu and Mehta, Dushyant and Xu, Weipeng and Golyanik, Vladislav and Theobalt, Christian},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2101.04104},
EPRINT = {2101.04104},
EPRINTTYPE = {arXiv},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Human re-rendering from a single image is a starkly under-constrained<br>problem, and state-of-the-art algorithms often exhibit undesired artefacts,<br>such as over-smoothing, unrealistic distortions of the body parts and garments,<br>or implausible changes of the texture. To address these challenges, we propose<br>a new method for neural re-rendering of a human under a novel user-defined pose<br>and viewpoint, given one input image. Our algorithm represents body pose and<br>shape as a parametric mesh which can be reconstructed from a single image and<br>easily reposed. Instead of a colour-based UV texture map, our approach further<br>employs a learned high-dimensional UV feature map to encode appearance. This<br>rich implicit representation captures detailed appearance variation across<br>poses, viewpoints, person identities and clothing styles better than learned<br>colour texture maps. The body model with the rendered feature maps is fed<br>through a neural image-translation network that creates the final rendered<br>colour image. The above components are combined in an end-to-end-trained neural<br>network architecture that takes as input a source person image, and images of<br>the parametric body model in the source pose and desired target pose.<br>Experimental evaluation demonstrates that our approach produces higher quality<br>single image re-rendering results than existing methods.<br>},
}
Endnote
%0 Report
%A Sarkar, Kripasindhu
%A Mehta, Dushyant
%A Xu, Weipeng
%A Golyanik, Vladislav
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T Neural Re-Rendering of Humans from a Single Image :
%G eng
%U http://hdl.handle.net/21.11116/0000-0007-CF05-B
%U https://arxiv.org/abs/2101.04104
%D 2021
%X Human re-rendering from a single image is a starkly under-constrained<br>problem, and state-of-the-art algorithms often exhibit undesired artefacts,<br>such as over-smoothing, unrealistic distortions of the body parts and garments,<br>or implausible changes of the texture. To address these challenges, we propose<br>a new method for neural re-rendering of a human under a novel user-defined pose<br>and viewpoint, given one input image. Our algorithm represents body pose and<br>shape as a parametric mesh which can be reconstructed from a single image and<br>easily reposed. Instead of a colour-based UV texture map, our approach further<br>employs a learned high-dimensional UV feature map to encode appearance. This<br>rich implicit representation captures detailed appearance variation across<br>poses, viewpoints, person identities and clothing styles better than learned<br>colour texture maps. The body model with the rendered feature maps is fed<br>through a neural image-translation network that creates the final rendered<br>colour image. The above components are combined in an end-to-end-trained neural<br>network architecture that takes as input a source person image, and images of<br>the parametric body model in the source pose and desired target pose.<br>Experimental evaluation demonstrates that our approach produces higher quality<br>single image re-rendering results than existing methods.<br>
%K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Surace, L., Wernikowski, M., Tursun, O.T., Myszkowski, K., Mantiuk, R., and Didyk, P. 2021. Learning Foveated Reconstruction to Preserve Perceived Image Statistics. https://arxiv.org/abs/2108.03499.
(arXiv: 2108.03499) Abstract
Foveated image reconstruction recovers full image from a sparse set of<br>samples distributed according to the human visual system's retinal sensitivity<br>that rapidly drops with eccentricity. Recently, the use of Generative<br>Adversarial Networks was shown to be a promising solution for such a task as<br>they can successfully hallucinate missing image information. Like for other<br>supervised learning approaches, also for this one, the definition of the loss<br>function and training strategy heavily influences the output quality. In this<br>work, we pose the question of how to efficiently guide the training of foveated<br>reconstruction techniques such that they are fully aware of the human visual<br>system's capabilities and limitations, and therefore, reconstruct visually<br>important image features. Due to the nature of GAN-based solutions, we<br>concentrate on the human's sensitivity to hallucination for different input<br>sample densities. We present new psychophysical experiments, a dataset, and a<br>procedure for training foveated image reconstruction. The strategy provides<br>flexibility to the generator network by penalizing only perceptually important<br>deviations in the output. As a result, the method aims to preserve perceived<br>image statistics rather than natural image statistics. We evaluate our strategy<br>and compare it to alternative solutions using a newly trained objective metric<br>and user experiments.<br>
Export
BibTeX
@online{Surace2108.03499,
TITLE = {Learning Foveated Reconstruction to Preserve Perceived Image Statistics},
AUTHOR = {Surace, Luca and Wernikowski, Marek and Tursun, Okan Tarhan and Myszkowski, Karol and Mantiuk, Rados{\l}aw and Didyk, Piotr},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2108.03499},
EPRINT = {2108.03499},
EPRINTTYPE = {arXiv},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Foveated image reconstruction recovers full image from a sparse set of<br>samples distributed according to the human visual system's retinal sensitivity<br>that rapidly drops with eccentricity. Recently, the use of Generative<br>Adversarial Networks was shown to be a promising solution for such a task as<br>they can successfully hallucinate missing image information. Like for other<br>supervised learning approaches, also for this one, the definition of the loss<br>function and training strategy heavily influences the output quality. In this<br>work, we pose the question of how to efficiently guide the training of foveated<br>reconstruction techniques such that they are fully aware of the human visual<br>system's capabilities and limitations, and therefore, reconstruct visually<br>important image features. Due to the nature of GAN-based solutions, we<br>concentrate on the human's sensitivity to hallucination for different input<br>sample densities. We present new psychophysical experiments, a dataset, and a<br>procedure for training foveated image reconstruction. The strategy provides<br>flexibility to the generator network by penalizing only perceptually important<br>deviations in the output. As a result, the method aims to preserve perceived<br>image statistics rather than natural image statistics. We evaluate our strategy<br>and compare it to alternative solutions using a newly trained objective metric<br>and user experiments.<br>},
}
Endnote
%0 Report
%A Surace, Luca
%A Wernikowski, Marek
%A Tursun, Okan Tarhan
%A Myszkowski, Karol
%A Mantiuk, Radosław
%A Didyk, Piotr
%+ External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Learning Foveated Reconstruction to Preserve Perceived Image Statistics :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-73D9-1
%U https://arxiv.org/abs/2108.03499
%D 2021
%X Foveated image reconstruction recovers full image from a sparse set of<br>samples distributed according to the human visual system's retinal sensitivity<br>that rapidly drops with eccentricity. Recently, the use of Generative<br>Adversarial Networks was shown to be a promising solution for such a task as<br>they can successfully hallucinate missing image information. Like for other<br>supervised learning approaches, also for this one, the definition of the loss<br>function and training strategy heavily influences the output quality. In this<br>work, we pose the question of how to efficiently guide the training of foveated<br>reconstruction techniques such that they are fully aware of the human visual<br>system's capabilities and limitations, and therefore, reconstruct visually<br>important image features. Due to the nature of GAN-based solutions, we<br>concentrate on the human's sensitivity to hallucination for different input<br>sample densities. We present new psychophysical experiments, a dataset, and a<br>procedure for training foveated image reconstruction. The strategy provides<br>flexibility to the generator network by penalizing only perceptually important<br>deviations in the output. As a result, the method aims to preserve perceived<br>image statistics rather than natural image statistics. We evaluate our strategy<br>and compare it to alternative solutions using a newly trained objective metric<br>and user experiments.<br>
%K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV
Van Onzenoodt, C., Singh, G., Ropinski, T., and Ritschel, T. 2021b. Blue Noise Plots. https://arxiv.org/abs/2102.04072.
(arXiv: 2102.04072) Abstract
We propose Blue Noise Plots, two-dimensional dot plots that depict data<br>points of univariate data sets. While often one-dimensional strip plots are<br>used to depict such data, one of their main problems is visual clutter which<br>results from overlap. To reduce this overlap, jitter plots were introduced,<br>whereby an additional, non-encoding plot dimension is introduced, along which<br>the data point representing dots are randomly perturbed. Unfortunately, this<br>randomness can suggest non-existent clusters, and often leads to visually<br>unappealing plots, in which overlap might still occur. To overcome these<br>shortcomings, we introduce BlueNoise Plots where random jitter along the<br>non-encoding plot dimension is replaced by optimizing all dots to keep a<br>minimum distance in 2D i. e., Blue Noise. We evaluate the effectiveness as well<br>as the aesthetics of Blue Noise Plots through both, a quantitative and a<br>qualitative user study.<br>
Export
BibTeX
@online{Onzenoodt_2102.04072,
TITLE = {Blue Noise Plots},
AUTHOR = {van Onzenoodt, Christian and Singh, Gurprit and Ropinski, Timo and Ritschel, Tobias},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2102.04072},
EPRINT = {2102.04072},
EPRINTTYPE = {arXiv},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
ABSTRACT = {We propose Blue Noise Plots, two-dimensional dot plots that depict data<br>points of univariate data sets. While often one-dimensional strip plots are<br>used to depict such data, one of their main problems is visual clutter which<br>results from overlap. To reduce this overlap, jitter plots were introduced,<br>whereby an additional, non-encoding plot dimension is introduced, along which<br>the data point representing dots are randomly perturbed. Unfortunately, this<br>randomness can suggest non-existent clusters, and often leads to visually<br>unappealing plots, in which overlap might still occur. To overcome these<br>shortcomings, we introduce BlueNoise Plots where random jitter along the<br>non-encoding plot dimension is replaced by optimizing all dots to keep a<br>minimum distance in 2D i. e., Blue Noise. We evaluate the effectiveness as well<br>as the aesthetics of Blue Noise Plots through both, a quantitative and a<br>qualitative user study.<br>},
}
Endnote
%0 Report
%A van Onzenoodt, Christian
%A Singh, Gurprit
%A Ropinski, Timo
%A Ritschel, Tobias
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Blue Noise Plots :
%G eng
%U http://hdl.handle.net/21.11116/0000-0008-01ED-C
%U https://arxiv.org/abs/2102.04072
%D 2021
%X We propose Blue Noise Plots, two-dimensional dot plots that depict data<br>points of univariate data sets. While often one-dimensional strip plots are<br>used to depict such data, one of their main problems is visual clutter which<br>results from overlap. To reduce this overlap, jitter plots were introduced,<br>whereby an additional, non-encoding plot dimension is introduced, along which<br>the data point representing dots are randomly perturbed. Unfortunately, this<br>randomness can suggest non-existent clusters, and often leads to visually<br>unappealing plots, in which overlap might still occur. To overcome these<br>shortcomings, we introduce BlueNoise Plots where random jitter along the<br>non-encoding plot dimension is replaced by optimizing all dots to keep a<br>minimum distance in 2D i. e., Blue Noise. We evaluate the effectiveness as well<br>as the aesthetics of Blue Noise Plots through both, a quantitative and a<br>qualitative user study.<br>
%K Computer Science, Graphics, cs.GR
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2021. Learning a self-supervised tone mapping operator via feature contrast masking loss. https://arxiv.org/abs/2110.09866.
(arXiv: 2110.09866) Abstract
High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>
Export
BibTeX
@online{Wang_2110.09866,
TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss},
AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2110.09866},
EPRINT = {2110.09866},
EPRINTTYPE = {arXiv},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
ABSTRACT = {High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>},
}
Endnote
%0 Report
%A Wang, Chao
%A Chen, Bin
%A Seidel, Hans-Peter
%A Myszkowski, Karol
%A Serrano, Ana
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Learning a self-supervised tone mapping operator via feature contrast masking loss :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-710E-9
%U https://arxiv.org/abs/2110.09866
%D 2021
%X High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>
%K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
Weinrauch, A., Seidel, H.-P., Mlakar, D., Steinberger, M., and Zayer, R. 2021. A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces. https://arxiv.org/abs/2105.13168.
(arXiv: 2105.13168) Abstract
The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br>
Export
BibTeX
@online{Weinrauch_2105.13168,
TITLE = {A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and {Reeb} Graph Construction on Surfaces},
AUTHOR = {Weinrauch, Alexander and Seidel, Hans-Peter and Mlakar, Daniel and Steinberger, Markus and Zayer, Rhaleb},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2105.13168},
EPRINT = {2105.13168},
EPRINTTYPE = {arXiv},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
ABSTRACT = {The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br>},
}
Endnote
%0 Report
%A Weinrauch, Alexander
%A Seidel, Hans-Peter
%A Mlakar, Daniel
%A Steinberger, Markus
%A Zayer, Rhaleb
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-70EE-D
%U https://arxiv.org/abs/2105.13168
%D 2021
%X The humble loop shrinking property played a central role in the inception of<br>modern topology but it has been eclipsed by more abstract algebraic formalism.<br>This is particularly true in the context of detecting relevant non-contractible<br>loops on surfaces where elaborate homological and/or graph theoretical<br>constructs are favored in algorithmic solutions. In this work, we devise a<br>variational analogy to the loop shrinking property and show that it yields a<br>simple, intuitive, yet powerful solution allowing a streamlined treatment of<br>the problem of handle and tunnel loop detection. Our formalization tracks the<br>evolution of a diffusion front randomly initiated on a single location on the<br>surface. Capitalizing on a diffuse interface representation combined with a set<br>of rules for concurrent front interactions, we develop a dynamic data structure<br>for tracking the evolution on the surface encoded as a sparse matrix which<br>serves for performing both diffusion numerics and loop detection and acts as<br>the workhorse of our fully parallel implementation. The substantiated results<br>suggest our approach outperforms state of the art and robustly copes with<br>highly detailed geometric models. As a byproduct, our approach can be used to<br>construct Reeb graphs by diffusion thus avoiding commonly encountered issues<br>when using Morse functions.<br>
%K Computer Science, Graphics, cs.GR,Computer Science, Computational Geometry, cs.CG,Mathematics, Algebraic Topology, math.AT
Thesis
Habermann, M. 2021. Real-time human performance capture and synthesis. nbn:de:bsz:291--ds-349617.
Abstract
Most of the images one finds in the media, such as on the Internet or in textbooks and magazines, contain humans as the main point of attention. Thus, there is an inherent necessity for industry, society, and private persons to be able to thoroughly analyze and synthesize the human-related content in these images. One aspect of this analysis and subject of this thesis is to infer the 3D pose and surface deformation, using only visual information, which is also known as human performance capture. Human performance capture enables the tracking of virtual characters from real-world observations, and this is key for visual effects, games, VR, and AR, to name just a few application areas. However, traditional capture methods usually rely on expensive multi-view (marker-based) systems that are prohibitively expensive for the vast majority of people, or they use depth sensors, which are still not as common as single color cameras. Recently, some approaches have attempted to solve the task by assuming only a single RGB image is given. Nonetheless, they can either not track the dense deforming geometry of the human, such as the clothing layers, or they are far from real time, which is indispensable for many applications. To overcome these shortcomings, this thesis proposes two monocular human performance capture methods, which for the first time allow the real-time capture of the dense deforming geometry as well as an unseen 3D accuracy for pose and surface deformations. At the technical core, this work introduces novel GPU-based and data-parallel optimization strategies in conjunction with other algorithmic design choices that are all geared towards real-time performance at high accuracy. Moreover, this thesis presents a new weakly supervised multiview training strategy combined with a fully differentiable character representation that shows superior 3D accuracy. However, there is more to human-related Computer Vision than only the analysis of people in images. It is equally important to synthesize new images of humans in unseen poses and also from camera viewpoints that have not been observed in the real world. Such tools are essential for the movie industry because they, for example, allow the synthesis of photo-realistic virtual worlds with real-looking humans or of contents that are too dangerous for actors to perform on set. But also video conferencing and telepresence applications can benefit from photo-real 3D characters, as they can enhance the immersive experience of these applications. Here, the traditional Computer Graphics pipeline for rendering photo-realistic images involves many tedious and time-consuming steps that require expert knowledge and are far from real time. Traditional rendering involves character rigging and skinning, the modeling of the surface appearance properties, and physically based ray tracing. Recent learning-based methods attempt to simplify the traditional rendering pipeline and instead learn the rendering function from data resulting in methods that are easier accessible to non-experts. However, most of them model the synthesis task entirely in image space such that 3D consistency cannot be achieved, and/or they fail to model motion- and view-dependent appearance effects. To this end, this thesis presents a method and ongoing work on character synthesis, which allow the synthesis of controllable photoreal characters that achieve motion- and view-dependent appearance effects as well as 3D consistency and which run in real time. This is technically achieved by a novel coarse-to-fine geometric character representation for efficient synthesis, which can be solely supervised on multi-view imagery. Furthermore, this work shows how such a geometric representation can be combined with an implicit surface representation to boost synthesis and geometric quality.
Export
BibTeX
@phdthesis{Habermannphd2021,
TITLE = {Real-time human performance capture and synthesis},
AUTHOR = {Habermann, Marc},
LANGUAGE = {eng},
URL = {nbn:de:bsz:291--ds-349617},
DOI = {10.22028/D291-34961},
SCHOOL = {Universit{\"a}t des Saarlandes},
ADDRESS = {Saarbr{\"u}cken},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
ABSTRACT = {Most of the images one finds in the media, such as on the Internet or in textbooks and magazines, contain humans as the main point of attention. Thus, there is an inherent necessity for industry, society, and private persons to be able to thoroughly analyze and synthesize the human-related content in these images. One aspect of this analysis and subject of this thesis is to infer the 3D pose and surface deformation, using only visual information, which is also known as human performance capture. Human performance capture enables the tracking of virtual characters from real-world observations, and this is key for visual effects, games, VR, and AR, to name just a few application areas. However, traditional capture methods usually rely on expensive multi-view (marker-based) systems that are prohibitively expensive for the vast majority of people, or they use depth sensors, which are still not as common as single color cameras. Recently, some approaches have attempted to solve the task by assuming only a single RGB image is given. Nonetheless, they can either not track the dense deforming geometry of the human, such as the clothing layers, or they are far from real time, which is indispensable for many applications. To overcome these shortcomings, this thesis proposes two monocular human performance capture methods, which for the first time allow the real-time capture of the dense deforming geometry as well as an unseen 3D accuracy for pose and surface deformations. At the technical core, this work introduces novel GPU-based and data-parallel optimization strategies in conjunction with other algorithmic design choices that are all geared towards real-time performance at high accuracy. Moreover, this thesis presents a new weakly supervised multiview training strategy combined with a fully differentiable character representation that shows superior 3D accuracy. However, there is more to human-related Computer Vision than only the analysis of people in images. It is equally important to synthesize new images of humans in unseen poses and also from camera viewpoints that have not been observed in the real world. Such tools are essential for the movie industry because they, for example, allow the synthesis of photo-realistic virtual worlds with real-looking humans or of contents that are too dangerous for actors to perform on set. But also video conferencing and telepresence applications can benefit from photo-real 3D characters, as they can enhance the immersive experience of these applications. Here, the traditional Computer Graphics pipeline for rendering photo-realistic images involves many tedious and time-consuming steps that require expert knowledge and are far from real time. Traditional rendering involves character rigging and skinning, the modeling of the surface appearance properties, and physically based ray tracing. Recent learning-based methods attempt to simplify the traditional rendering pipeline and instead learn the rendering function from data resulting in methods that are easier accessible to non-experts. However, most of them model the synthesis task entirely in image space such that 3D consistency cannot be achieved, and/or they fail to model motion- and view-dependent appearance effects. To this end, this thesis presents a method and ongoing work on character synthesis, which allow the synthesis of controllable photoreal characters that achieve motion- and view-dependent appearance effects as well as 3D consistency and which run in real time. This is technically achieved by a novel coarse-to-fine geometric character representation for efficient synthesis, which can be solely supervised on multi-view imagery. Furthermore, this work shows how such a geometric representation can be combined with an implicit surface representation to boost synthesis and geometric quality.},
}
Endnote
%0 Thesis
%A Habermann, Marc
%Y Theobalt, Christian
%A referee: Seidel, Hans-Peter
%A referee: Hilton, Adrian
%+ Computer Graphics, MPI for Informatics, Max Planck Society
International Max Planck Research School, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Real-time human performance capture and synthesis :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-7D87-3
%R 10.22028/D291-34961
%U nbn:de:bsz:291--ds-349617
%F OTHER: hdl:20.500.11880/31986
%I Universität des Saarlandes
%C Saarbrücken
%D 2021
%P 153 p.
%V phd
%9 phd
%X Most of the images one finds in the media, such as on the Internet or in textbooks and magazines, contain humans as the main point of attention. Thus, there is an inherent necessity for industry, society, and private persons to be able to thoroughly analyze and synthesize the human-related content in these images. One aspect of this analysis and subject of this thesis is to infer the 3D pose and surface deformation, using only visual information, which is also known as human performance capture. Human performance capture enables the tracking of virtual characters from real-world observations, and this is key for visual effects, games, VR, and AR, to name just a few application areas. However, traditional capture methods usually rely on expensive multi-view (marker-based) systems that are prohibitively expensive for the vast majority of people, or they use depth sensors, which are still not as common as single color cameras. Recently, some approaches have attempted to solve the task by assuming only a single RGB image is given. Nonetheless, they can either not track the dense deforming geometry of the human, such as the clothing layers, or they are far from real time, which is indispensable for many applications. To overcome these shortcomings, this thesis proposes two monocular human performance capture methods, which for the first time allow the real-time capture of the dense deforming geometry as well as an unseen 3D accuracy for pose and surface deformations. At the technical core, this work introduces novel GPU-based and data-parallel optimization strategies in conjunction with other algorithmic design choices that are all geared towards real-time performance at high accuracy. Moreover, this thesis presents a new weakly supervised multiview training strategy combined with a fully differentiable character representation that shows superior 3D accuracy. However, there is more to human-related Computer Vision than only the analysis of people in images. It is equally important to synthesize new images of humans in unseen poses and also from camera viewpoints that have not been observed in the real world. Such tools are essential for the movie industry because they, for example, allow the synthesis of photo-realistic virtual worlds with real-looking humans or of contents that are too dangerous for actors to perform on set. But also video conferencing and telepresence applications can benefit from photo-real 3D characters, as they can enhance the immersive experience of these applications. Here, the traditional Computer Graphics pipeline for rendering photo-realistic images involves many tedious and time-consuming steps that require expert knowledge and are far from real time. Traditional rendering involves character rigging and skinning, the modeling of the surface appearance properties, and physically based ray tracing. Recent learning-based methods attempt to simplify the traditional rendering pipeline and instead learn the rendering function from data resulting in methods that are easier accessible to non-experts. However, most of them model the synthesis task entirely in image space such that 3D consistency cannot be achieved, and/or they fail to model motion- and view-dependent appearance effects. To this end, this thesis presents a method and ongoing work on character synthesis, which allow the synthesis of controllable photoreal characters that achieve motion- and view-dependent appearance effects as well as 3D consistency and which run in real time. This is technically achieved by a novel coarse-to-fine geometric character representation for efficient synthesis, which can be solely supervised on multi-view imagery. Furthermore, this work shows how such a geometric representation can be combined with an implicit surface representation to boost synthesis and geometric quality.
%U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31986
Tewari, A. 2021. Self-supervised reconstruction and synthesis of faces. nbn:de:bsz:291--ds-345982.
Export
BibTeX
@phdthesis{Tewariphd2021,
TITLE = {Self-supervised reconstruction and synthesis of faces},
AUTHOR = {Tewari, Ayush},
LANGUAGE = {eng},
URL = {nbn:de:bsz:291--ds-345982},
DOI = {10.22028/D291-34598},
SCHOOL = {Universit{\"a}t des Saarlandes},
ADDRESS = {Saarbr{\"u}cken},
YEAR = {2021},
MARGINALMARK = {$\bullet$},
DATE = {2021},
}
Endnote
%0 Thesis
%A Tewari, Ayush
%Y Theobalt, Christian
%A referee: Zollhöfer, Michael
%A referee: Wonka, Peter
%+ Computer Graphics, MPI for Informatics, Max Planck Society
International Max Planck Research School, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Self-supervised reconstruction and synthesis of faces :
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-9CD2-A
%R 10.22028/D291-34598
%U nbn:de:bsz:291--ds-345982
%F OTHER: hdl:20.500.11880/31754
%I Universität des Saarlandes
%C Saarbrücken
%D 2021
%P 173 p.
%V phd
%9 phd
%U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/31754