Current Year
Article
Arabadzhiyska, E., Tursun, C., Seidel, H.-P., and Didyk, P. 2023. Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model. ACM Transactions on Applied Perception20, 1.
Export
BibTeX
@article{Arabadzhiyska23,
TITLE = {Practical Saccade Prediction for Head-Mounted Displays: {T}owards a Comprehensive Model},
AUTHOR = {Arabadzhiyska, Elena and Tursun, Cara and Seidel, Hans-Peter and Didyk, Piotr},
LANGUAGE = {eng},
ISSN = {1544-3558},
DOI = {10.1145/3568311},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Applied Perception},
VOLUME = {20},
NUMBER = {1},
PAGES = {1--23},
EID = {2},
}
Endnote
%0 Journal Article
%A Arabadzhiyska, Elena
%A Tursun, Cara
%A Seidel, Hans-Peter
%A Didyk, Piotr
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-B76B-E
%R 10.1145/3568311
%7 2023
%D 2023
%J ACM Transactions on Applied Perception
%V 20
%N 1
%& 1
%P 1 - 23
%Z sequence number: 2
%I ACM
%C New York, NY
%@ false
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2023. Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors. Computer Graphics Forum (Proc. EUROGRAPHICS 2023)42, 2.
Export
BibTeX
@article{Cogalan_Eurographics23,
TITLE = {Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors},
AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol},
LANGUAGE = {eng},
ISSN = {0167-7055},
DOI = {10.1111/cgf.14748},
PUBLISHER = {Blackwell-Wiley},
ADDRESS = {Oxford},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)},
VOLUME = {42},
NUMBER = {2},
PAGES = {119--131},
BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2023)},
}
Endnote
%0 Journal Article
%A Çoğalan, Uğur
%A Bemana, Mojtaba
%A Seidel, Hans-Peter
%A Myszkowski, Karol
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Video Frame Interpolation for High Dynamic Range Sequences
Captured with Dual-exposure Sensors :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-F953-E
%R 10.1111/cgf.14748
%7 2023
%D 2023
%J Computer Graphics Forum
%O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum
%V 42
%N 2
%& 119
%P 119 - 131
%I Blackwell-Wiley
%C Oxford
%@ false
%B The European Association for Computer Graphics 43rdAnnual Conference
%O EUROGRAPHICS 2023 EG 2023 Saarbrücken, Germany, May 8-12, 2023
Habermann, M., Xu, W., Zollhöfer, M., Pons-Moll, G., and Theobalt, C. 2023. A Deeper Look into DeepCap. IEEE Transactions on Pattern Analysis and Machine Intelligence45, 4.
Abstract
Human performance capture is a highly important computer vision problem with<br>many applications in movie production and virtual/augmented reality. Many<br>previous performance capture approaches either required expensive multi-view<br>setups or did not recover dense space-time coherent geometry with<br>frame-to-frame correspondences. We propose a novel deep learning approach for<br>monocular dense human performance capture. Our method is trained in a weakly<br>supervised manner based on multi-view supervision completely removing the need<br>for training data with 3D ground truth annotations. The network architecture is<br>based on two separate networks that disentangle the task into a pose estimation<br>and a non-rigid surface deformation step. Extensive qualitative and<br>quantitative evaluations show that our approach outperforms the state of the<br>art in terms of quality and robustness. This work is an extended version of<br>DeepCap where we provide more detailed explanations, comparisons and results as<br>well as applications.<br>
Export
BibTeX
@article{Habermann2111.10563,
TITLE = {A Deeper Look into {DeepCap}},
AUTHOR = {Habermann, Marc and Xu, Weipeng and Zollh{\"o}fer, Michael and Pons-Moll, Gerard and Theobalt, Christian},
LANGUAGE = {eng},
ISSN = {0162-8828},
DOI = {10.1109/TPAMI.2021.3093553},
PUBLISHER = {IEEE},
ADDRESS = {Piscataway, NJ},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
DATE = {2023},
ABSTRACT = {Human performance capture is a highly important computer vision problem with<br>many applications in movie production and virtual/augmented reality. Many<br>previous performance capture approaches either required expensive multi-view<br>setups or did not recover dense space-time coherent geometry with<br>frame-to-frame correspondences. We propose a novel deep learning approach for<br>monocular dense human performance capture. Our method is trained in a weakly<br>supervised manner based on multi-view supervision completely removing the need<br>for training data with 3D ground truth annotations. The network architecture is<br>based on two separate networks that disentangle the task into a pose estimation<br>and a non-rigid surface deformation step. Extensive qualitative and<br>quantitative evaluations show that our approach outperforms the state of the<br>art in terms of quality and robustness. This work is an extended version of<br>DeepCap where we provide more detailed explanations, comparisons and results as<br>well as applications.<br>},
JOURNAL = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
VOLUME = {45},
NUMBER = {4},
PAGES = {4009--4002},
}
Endnote
%0 Journal Article
%A Habermann, Marc
%A Xu, Weipeng
%A Zollhöfer, Michael
%A Pons-Moll, Gerard
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T A Deeper Look into DeepCap : (Invited Paper)
%G eng
%U http://hdl.handle.net/21.11116/0000-0009-8C33-0
%R 10.1109/TPAMI.2021.3093553
%7 2021
%D 2023
%X Human performance capture is a highly important computer vision problem with<br>many applications in movie production and virtual/augmented reality. Many<br>previous performance capture approaches either required expensive multi-view<br>setups or did not recover dense space-time coherent geometry with<br>frame-to-frame correspondences. We propose a novel deep learning approach for<br>monocular dense human performance capture. Our method is trained in a weakly<br>supervised manner based on multi-view supervision completely removing the need<br>for training data with 3D ground truth annotations. The network architecture is<br>based on two separate networks that disentangle the task into a pose estimation<br>and a non-rigid surface deformation step. Extensive qualitative and<br>quantitative evaluations show that our approach outperforms the state of the<br>art in terms of quality and robustness. This work is an extended version of<br>DeepCap where we provide more detailed explanations, comparisons and results as<br>well as applications.<br>
%K Computer Science, Computer Vision and Pattern Recognition, cs.CV
%J IEEE Transactions on Pattern Analysis and Machine Intelligence
%O IEEE Trans. Pattern Anal. Mach. Intell.
%V 45
%N 4
%& 4009
%P 4009 - 4002
%I IEEE
%C Piscataway, NJ
%@ false
Jambon, C., Kerbl, B., Kopanas, G., Diolatzis, S., Leimkühler, T., and Drettakis, G. NeRFshop: Interactive Editing of Neural Radiance Fields. Proceedings of the ACM on Computer Graphics and Interactive Techniques6, 1.
(Accepted/in press) Export
BibTeX
@article{JKKDLD23,
TITLE = {{NeRFshop}: {I}nteractive Editing of Neural Radiance Fields},
AUTHOR = {Jambon, Cl{\'e}ment and Kerbl, Bernhard and Kopanas, Georgios and Diolatzis, Stavros and Leimk{\"u}hler, Thomas and Drettakis, George},
LANGUAGE = {eng},
ISSN = {2577-6193},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2023},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
JOURNAL = {Proceedings of the ACM on Computer Graphics and Interactive Techniques},
VOLUME = {6},
NUMBER = {1},
}
Endnote
%0 Journal Article
%A Jambon, Clément
%A Kerbl, Bernhard
%A Kopanas, Georgios
%A Diolatzis, Stavros
%A Leimkühler, Thomas
%A Drettakis, George
%+ External Organizations
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T NeRFshop: Interactive Editing of Neural Radiance Fields :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-BEE0-1
%D 2023
%J Proceedings of the ACM on Computer Graphics and Interactive Techniques
%V 6
%N 1
%I ACM
%C New York, NY
%@ false
%U http://www-sop.inria.fr/reves/Basilic/2023/JKKDLD23
Surace, L., Wernikowski, M., Tursun, C., Myszkowski, K., Mantiuk, R., and Didyk, P. 2023. Learning GAN-based Foveated Reconstruction to Recover Perceptually Important Image Features. ACM Transactions on Applied Perception.
Export
BibTeX
@article{Surace23,
TITLE = {Learning {GAN}-based Foveated Reconstruction to Recover Perceptually Important Image Features},
AUTHOR = {Surace, Luca and Wernikowski, Marek and Tursun, Cara and Myszkowski, Karol and Mantiuk, Rados{\l}aw and Didyk, Piotr},
LANGUAGE = {eng},
ISSN = {1544-3558},
DOI = {10.1145/3583072},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Applied Perception},
}
Endnote
%0 Journal Article
%A Surace, Luca
%A Wernikowski, Marek
%A Tursun, Cara
%A Myszkowski, Karol
%A Mantiuk, Radosław
%A Didyk, Piotr
%+ External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Learning GAN-based Foveated Reconstruction to Recover Perceptually Important Image Features :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-A00D-1
%R 10.1145/3583072
%7 2023
%D 2023
%J ACM Transactions on Applied Perception
%I ACM
%C New York, NY
%@ false
Wang, C., Serrano, A., Pan, X., et al. A Neural Implicit Representation for the Image Stack: Depth, All in Focus, and High Dynamic Range. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2023).
(Accepted/in press) Export
BibTeX
@article{Wang_SIGGRAPHASIA23,
TITLE = {A Neural Implicit Representation for the Image Stack: {D}epth, All in Focus, and High Dynamic Range},
AUTHOR = {Wang, Chao and Serrano, Ana and Pan, Xingang and Wolski, Krzysztof and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas},
LANGUAGE = {eng},
ISSN = {0730-0301},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2023},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)},
BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2023},
}
Endnote
%0 Journal Article
%A Wang, Chao
%A Serrano, Ana
%A Pan, Xingang
%A Wolski, Krzysztof
%A Chen, Bin
%A Seidel, Hans-Peter
%A Theobalt, Christian
%A Myszkowski, Karol
%A Leimkühler, Thomas
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T A Neural Implicit Representation for the Image Stack: Depth,
All in Focus, and High Dynamic Range :
%G eng
%U http://hdl.handle.net/21.11116/0000-000D-B80B-8
%D 2023
%J ACM Transactions on Graphics
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH Asia 2023
%O ACM SIGGRAPH Asia 2023 Sydney, Australia, 12-15 December 2023 SA '23 SA 2023
Weinrauch, A., Seidel, H.-P., Mlakar, D., Steinberger, M., and Zayer, R. 2023. A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces. Computer Graphics Forum42, 2.
Export
BibTeX
@article{Weinrauch_CGF23,
TITLE = {A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and {Reeb} Graph Construction on Surfaces},
AUTHOR = {Weinrauch, Alexander and Seidel, Hans-Peter and Mlakar, Daniel and Steinberger, Markus and Zayer, Rhaleb},
LANGUAGE = {eng},
ISSN = {0167-7055},
PUBLISHER = {Blackwell-Wiley},
ADDRESS = {Oxford},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
JOURNAL = {Computer Graphics Forum},
VOLUME = {42},
NUMBER = {2},
}
Endnote
%0 Journal Article
%A Weinrauch, Alexander
%A Seidel, Hans-Peter
%A Mlakar, Daniel
%A Steinberger, Markus
%A Zayer, Rhaleb
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T A Variational Loop Shrinking Analogy for Handle and Tunnel Detection and Reeb Graph Construction on Surfaces :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-B851-9
%7 2023
%D 2023
%J Computer Graphics Forum
%O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum
%V 42
%N 2
%I Blackwell-Wiley
%C Oxford
%@ false
Conference Paper
Haynes, A., Reed, C.N., Nordmoen, C., and Skach, S. 2023. Being Meaningful: Weaving Soma-Reflective Technological Mediations into the Fabric of Daily Life. TEI ’23, Seventeenth International Conference on Tangible, Embedded, and Embodied Interaction, ACM.
Export
BibTeX
@inproceedings{Haynes_TEI23,
TITLE = {Being Meaningful: {W}eaving Soma-Reflective Technological Mediations into the Fabric of Daily Life},
AUTHOR = {Haynes, Alice and Reed, Courtney N. and Nordmoen, Charlotte and Skach, Sophie},
LANGUAGE = {eng},
ISBN = {978-1-4503-9977-7},
DOI = {10.1145/3569009.3571844},
PUBLISHER = {ACM},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {TEI '23, Seventeenth International Conference on Tangible, Embedded, and Embodied Interaction},
PAGES = {1--5},
EID = {68},
ADDRESS = {Warsaw, Poland},
}
Endnote
%0 Conference Proceedings
%A Haynes, Alice
%A Reed, Courtney N.
%A Nordmoen, Charlotte
%A Skach, Sophie
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Being Meaningful: Weaving Soma-Reflective Technological Mediations into the Fabric of Daily Life :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-BDEB-7
%R 10.1145/3569009.3571844
%D 2023
%B Seventeenth International Conference on Tangible, Embedded, and Embodied Interaction
%Z date of event: 2023-02-26 - 2023-03-01
%C Warsaw, Poland
%B TEI '23
%P 1 - 5
%Z sequence number: 68
%I ACM
%@ 978-1-4503-9977-7
Liao, K., Tricard, T., Piovarči, M., Seidel, H.-P., and Babaei, V. Learning Deposition Policies for Fused Multi-Material 3D Printing. IEEE International Conference on Robotics and Automation (ICRA 2023), IEEE.
(Accepted/in press) Export
BibTeX
@inproceedings{Liao_ICRA2023,
TITLE = {Learning Deposition Policies for Fused Multi-Material {3D} Printing},
AUTHOR = {Liao, Kang and Tricard, Thibault and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Babaei, Vahid},
LANGUAGE = {eng},
PUBLISHER = {IEEE},
YEAR = {2023},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {IEEE International Conference on Robotics and Automation (ICRA 2023)},
ADDRESS = {London, UK},
}
Endnote
%0 Conference Proceedings
%A Liao, Kang
%A Tricard, Thibault
%A Piovarči, Michal
%A Seidel, Hans-Peter
%A Babaei, Vahid
%+ External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Learning Deposition Policies for Fused Multi-Material 3D Printing :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-44C2-C
%D 2023
%B IEEE International Conference on Robotics and Automation
%Z date of event: 2023-05-29 - 2023-06-02
%C London, UK
%B IEEE International Conference on Robotics and Automation
%I IEEE
Reed, C.N., Strohmeier, P., and McPherson, A. 2023. Negotiating Experience and Communicating Information Through Abstract Metaphor. CHI ’23, CHI Conference on Human Factors in Computing Systems, ACM.
Export
BibTeX
@inproceedings{Reed_CHI2023,
TITLE = {Negotiating Experience and Communicating Information Through Abstract Metaphor},
AUTHOR = {Reed, Courtney N. and Strohmeier, Paul and McPherson, Andrew},
LANGUAGE = {eng},
ISBN = {978-1-4503-9421-5},
DOI = {10.1145/3544548.3580700},
PUBLISHER = {ACM},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {CHI '23, CHI Conference on Human Factors in Computing Systems},
PAGES = {1--16},
EID = {185},
ADDRESS = {Hamburg, Germany},
}
Endnote
%0 Conference Proceedings
%A Reed, Courtney N.
%A Strohmeier, Paul
%A McPherson, Andrew
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Negotiating Experience and Communicating Information Through Abstract Metaphor :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-A035-3
%R 10.1145/3544548.3580700
%D 2023
%B CHI Conference on Human Factors in Computing Systems
%Z date of event: 2023-04-23 - 2023-04-28
%C Hamburg, Germany
%B CHI '23
%P 1 - 16
%Z sequence number: 185
%I ACM
%@ 978-1-4503-9421-5
Reed, C.N. and McPherson, A.P. 2023. The Body as Sound: Unpacking Vocal Embodiment through Auditory Biofeedback. TEI ’23, Seventeenth International Conference on Tangible, Embedded, and Embodied Interaction, ACM.
Export
BibTeX
@inproceedings{Reed_TEI23,
TITLE = {The Body as Sound: {U}npacking Vocal Embodiment through Auditory Biofeedback},
AUTHOR = {Reed, Courtney N. and McPherson, Andrew P.},
LANGUAGE = {eng},
ISBN = {978-1-4503-9977-7},
DOI = {10.1145/3569009.3572738},
PUBLISHER = {ACM},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {TEI '23, Seventeenth International Conference on Tangible, Embedded, and Embodied Interaction},
PAGES = {1--15},
EID = {7},
ADDRESS = {Warsaw, Poland},
}
Endnote
%0 Conference Proceedings
%A Reed, Courtney N.
%A McPherson, Andrew P.
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T The Body as Sound: Unpacking Vocal Embodiment through Auditory Biofeedback :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-A02B-F
%R 10.1145/3569009.3572738
%D 2023
%B Seventeenth International Conference on Tangible, Embedded, and Embodied Interaction
%Z date of event: 2023-02-26 - 2023-03-01
%C Warsaw, Poland
%B TEI '23
%P 1 - 15
%Z sequence number: 7
%I ACM
%@ 978-1-4503-9977-7
Reed, C.N. As the Luthiers Do: Designing with a Living, Growing, Changing Body-Material. CHI’ 23 Workshop - Body x Materials.
(Accepted/in press) Export
BibTeX
@inproceedings{reed2023bodyx,
TITLE = {As the Luthiers Do: {D}esigning with a Living, Growing, Changing Body-Material},
AUTHOR = {Reed, Courtney N.},
LANGUAGE = {eng},
YEAR = {2023},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {CHI{\textquoteright} 23 Workshop -- Body x Materials},
ADDRESS = {Hamburg, Germany},
}
Endnote
%0 Conference Proceedings
%A Reed, Courtney N.
%+ Computer Graphics, MPI for Informatics, Max Planck Society
%T As the Luthiers Do: Designing with a Living, Growing, Changing Body-Material :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-BDD8-C
%D 2023
%B ACM CHI Workshop on Body X Materials
%Z date of event: 2023-04-23 - 2023-04-23
%C Hamburg, Germany
%B CHI’ 23 Workshop - Body x Materials
Sabnis, N., Wittchen, D., Reed, C.N., Pourjafarian, N., Steimle, J., and Strohmeier, P. Haptic Servos: Self-Contained Vibrotactile Rendering System for Creating or Augmenting Material Experiences. CHI ’23, CHI Conference on Human Factors in Computing Systems, ACM.
(Accepted/in press) Export
BibTeX
@inproceedings{Sabnis_CHI2023,
TITLE = {Haptic Servos: {S}elf-Contained Vibrotactile Rendering System for Creating or Augmenting Material Experiences},
AUTHOR = {Sabnis, Nihar and Wittchen, Dennis and Reed, Courtney N. and Pourjafarian, Narjes and Steimle, J{\"u}rgen and Strohmeier, Paul},
LANGUAGE = {eng},
DOI = {10.1145/3544548.3580716},
PUBLISHER = {ACM},
YEAR = {2023},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {CHI '23, CHI Conference on Human Factors in Computing Systems},
ADDRESS = {Hamburg, Germany},
}
Endnote
%0 Conference Proceedings
%A Sabnis, Nihar
%A Wittchen, Dennis
%A Reed, Courtney N.
%A Pourjafarian, Narjes
%A Steimle, Jürgen
%A Strohmeier, Paul
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T Haptic Servos: Self-Contained Vibrotactile Rendering System for Creating or Augmenting Material Experiences :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-A03C-C
%R 10.1145/3544548.3580716
%D 2023
%B CHI Conference on Human Factors in Computing Systems
%Z date of event: 2023-04-23 - 2023-04-28
%C Hamburg, Germany
%B CHI '23
%I ACM
Sabnis, N., Wittchen, D., Vega, G., Reed, C.N., and Strohmeier, P. Tactile Symbols with Continuous and Motion-Coupled Vibration: An Exploration of using Embodied Experiences for Hermeneutic Design. CHI ’23, CHI Conference on Human Factors in Computing Systems, ACM.
(Accepted/in press) Export
BibTeX
@inproceedings{Sabnis_CHI2023B,
TITLE = {Tactile Symbols with Continuous and Motion-Coupled Vibration: {A}n Exploration of using Embodied Experiences for Hermeneutic Design},
AUTHOR = {Sabnis, Nihar and Wittchen, Dennis and Vega, Gabriela and Reed, Courtney N. and Strohmeier, Paul},
LANGUAGE = {eng},
DOI = {10.1145/3544548.3581356},
PUBLISHER = {ACM},
YEAR = {2023},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {CHI '23, CHI Conference on Human Factors in Computing Systems},
ADDRESS = {Hamburg, Germany},
}
Endnote
%0 Conference Proceedings
%A Sabnis, Nihar
%A Wittchen, Dennis
%A Vega, Gabriela
%A Reed, Courtney N.
%A Strohmeier, Paul
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Tactile Symbols with Continuous and Motion-Coupled Vibration: An Exploration of using Embodied Experiences for Hermeneutic Design :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-A042-4
%R 10.1145/3544548.3581356
%D 2023
%B CHI Conference on Human Factors in Computing Systems
%Z date of event: 2023-04-23 - 2023-04-28
%C Hamburg, Germany
%B CHI '23
%I ACM
Wang, C., Serrano, A., Pan, X., et al. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV 2023), IEEE.
(Accepted/in press) Export
BibTeX
@inproceedings{wang2023glowgan,
TITLE = {{GlowGAN}: {U}nsupervised Learning of {HDR} Images from {LDR} Images in the Wild},
AUTHOR = {Wang, Chao and Serrano, Ana and Pan, Xingang and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas},
LANGUAGE = {eng},
PUBLISHER = {IEEE},
YEAR = {2023},
PUBLREMARK = {Accepted},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV 2023)},
ADDRESS = {Paris, France},
}
Endnote
%0 Conference Proceedings
%A Wang, Chao
%A Serrano, Ana
%A Pan, Xingang
%A Chen, Bin
%A Seidel, Hans-Peter
%A Theobalt, Christian
%A Myszkowski, Karol
%A Leimkühler, Thomas
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T GlowGAN: Unsupervised Learning of HDR Images from LDR
Images in the Wild :
%G eng
%U http://hdl.handle.net/21.11116/0000-000D-B7FC-9
%D 2023
%B IEEE/CVF International Conference on Computer Vision
%Z date of event: 2023-10-02 - 2023-10-06
%C Paris, France
%B Proceedings of the IEEE/CVF International Conference on Computer Vision
%I IEEE
Wittchen, D., Marinez-Missir, V., Mavali, S., Sabnis, N., Reed, C.N., and Strohmeier, P. 2023. Designing Interactive Shoes for Tactile Augmented Reality. AHs ’23, Augmented Humans International Conference, ACM.
Export
BibTeX
@inproceedings{Wittchen_AHs2023,
TITLE = {Designing Interactive Shoes for Tactile Augmented Reality},
AUTHOR = {Wittchen, Dennis and Marinez-Missir, Valenin and Mavali, Sina and Sabnis, Nihar and Reed, Courtney N. and Strohmeier, Paul},
LANGUAGE = {eng},
ISBN = {978-1-4503-9984-5},
DOI = {10.1145/3582700.3582728},
PUBLISHER = {ACM},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {AHs '23, Augmented Humans International Conference},
PAGES = {1--14},
ADDRESS = {Glasgow, UK},
}
Endnote
%0 Conference Proceedings
%A Wittchen, Dennis
%A Marinez-Missir, Valenin
%A Mavali, Sina
%A Sabnis, Nihar
%A Reed, Courtney N.
%A Strohmeier, Paul
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Designing Interactive Shoes for Tactile Augmented Reality :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-A04D-9
%R 10.1145/3582700.3582728
%D 2023
%B Augmented Humans International Conference
%Z date of event: 2023-03-12 - 2023-03-14
%C Glasgow, UK
%B AHs '23
%P 1 - 14
%I ACM
%@ 978-1-4503-9984-5
Paper
Ruan, L., Bemana, M., Seidel, H.-P., Myszkowski, K., and Chen, B. 2023. Revisiting Image Deblurring with an Efficient ConvNet. https://arxiv.org/abs/2302.02234.
(arXiv: 2302.02234) Abstract
Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>
Export
BibTeX
@online{ruan2023revisiting,
TITLE = {Revisiting Image Deblurring with an Efficient {ConvNet}},
AUTHOR = {Ruan, Lingyan and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol and Chen, Bin},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2302.02234},
EPRINT = {2302.02234},
EPRINTTYPE = {arXiv},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>},
}
Endnote
%0 Report
%A Ruan, Lingyan
%A Bemana, Mojtaba
%A Seidel, Hans-Peter
%A Myszkowski, Karol
%A Chen, Bin
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Revisiting Image Deblurring with an Efficient ConvNet :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-C7B9-3
%U https://arxiv.org/abs/2302.02234
%D 2023
%X Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>
%K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Thesis
Arabadzhiyska, E. 2023. Perceptually driven methods for improved gaze-contingent rendering. urn:nbn:de:bsz:291--ds-402489.
Export
BibTeX
@phdthesis{Arabadzhiyska_PhD2023,
TITLE = {Perceptually driven methods for improved gaze-contingent rendering},
AUTHOR = {Arabadzhiyska, Elena},
URL = {urn:nbn:de:bsz:291--ds-402489},
DOI = {10.22028/D291-40248},
SCHOOL = {Universit{\"a}t des Saarlandes},
ADDRESS = {Saarbr{\"u}cken},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
DATE = {2023},
}
Endnote
%0 Thesis
%A Arabadzhiyska, Elena
%Y Didyk, Piotr
%A referee: Seidel, Hans-Peter
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Perceptually driven methods for improved gaze-contingent rendering :
%U http://hdl.handle.net/21.11116/0000-000D-87B0-3
%U urn:nbn:de:bsz:291--ds-402489
%F OTHER: hdl:20.500.11880/36181
%R 10.22028/D291-40248
%I Universität des Saarlandes
%C Saarbrücken
%D 2023
%P xxiv, 119 p.
%V phd
%9 phd
%U https://publikationen.sulb.uni-saarland.de/handle/20.500.11880/36181
Habibie, I. 2023. Learning-based 3D human motion capture and animation synthesis. urn:nbn:de:bsz:291--ds-400122.
Export
BibTeX
@phdthesis{Habibie_PhD2023,
TITLE = {Learning-based {3D} human motion capture and animation synthesis},
AUTHOR = {Habibie, Ikhsanul},
LANGUAGE = {eng},
URL = {urn:nbn:de:bsz:291--ds-400122},
DOI = {10.22028/D291-40012},
SCHOOL = {Universit{\"a}t des Saarlandes},
ADDRESS = {Saarbr{\"u}cken},
YEAR = {2023},
MARGINALMARK = {$\bullet$},
DATE = {2023},
}
Endnote
%0 Thesis
%A Habibie, Ikhsanul
%Y Theobalt, Christian
%A referee: Neff, Michael Paul
%A referee: Krüger, Antonio
%A referee: Pons-Moll, Gerard
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
%T Learning-based 3D human motion capture and animation synthesis :
%G eng
%U http://hdl.handle.net/21.11116/0000-000D-7336-5
%R 10.22028/D291-40012
%U urn:nbn:de:bsz:291--ds-400122
%F OTHER: hdl:20.500.11880/36046
%I Universität des Saarlandes
%C Saarbrücken
%D 2023
%P xviii, 118 p.
%V phd
%9 phd
%U https://scidok.sulb.uni-saarland.de/handle/20.500.11880/36046