D4
Computer Graphics

Current Year

Article
Chizhov, V., Georgiev, I., Myszkowski, K., and Singh, G. 2022. Perceptual Error Optimization for Monte Carlo Rendering. ACM Transactions on Graphics41, 3.
Export
BibTeX
@article{ChizhovTOG22, TITLE = {Perceptual Error Optimization for {Monte Carlo} Rendering}, AUTHOR = {Chizhov, Vassillen and Georgiev, Iliyan and Myszkowski, Karol and Singh, Gurprit}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3504002}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {41}, NUMBER = {3}, PAGES = {1--17}, EID = {26}, }
Endnote
%0 Journal Article %A Chizhov, Vassillen %A Georgiev, Iliyan %A Myszkowski, Karol %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Error Optimization for Monte Carlo Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA49-3 %R 10.1145/3504002 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 3 %& 1 %P 1 - 17 %Z sequence number: 26 %I ACM %C New York, NY %@ false
Chu, M., Liu, L., Zheng, Q., et al. 2022. Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data. ACM Transactions on Graphics41, 4.
Export
BibTeX
@article{Chu2022, TITLE = {Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data}, AUTHOR = {Chu, Mengyu and Liu, Lingjie and Zheng, Quan and Franz, Erik and Seidel, Hans-Peter and Theobalt, Christian and Zayer, Rhaleb}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3528223.3530169}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {41}, NUMBER = {4}, PAGES = {1--14}, EID = {119}, }
Endnote
%0 Journal Article %A Chu, Mengyu %A Liu, Lingjie %A Zheng, Quan %A Franz, Erik %A Seidel, Hans-Peter %A Theobalt, Christian %A Zayer, Rhaleb %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data : %G eng %U http://hdl.handle.net/21.11116/0000-000B-6561-6 %R 10.1145/3528223.3530169 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 4 %& 1 %P 1 - 14 %Z sequence number: 119 %I ACM %C New York, NY %@ false %U https://people.mpi-inf.mpg.de/~mchu/projects/PI-NeRF/
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2022. Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures. Computers and Graphics105.
Export
BibTeX
@article{Cogalan2022, TITLE = {Learning {HDR} Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2022.04.008}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computers and Graphics}, VOLUME = {105}, PAGES = {57--72}, }
Endnote
%0 Journal Article %A Çoğalan, Uğur %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures : %G eng %U http://hdl.handle.net/21.11116/0000-000A-9D95-D %R 10.1016/j.cag.2022.04.008 %7 2022 %D 2022 %J Computers and Graphics %V 105 %& 57 %P 57 - 72 %I Elsevier %C Amsterdam %@ false
Panetta, J., Mohammadian, H., Luci, E., and Babaei, V. Shape from Release: Inverse Design and Fabrication of Controlled Release Structures. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
(Accepted/in press)
Export
BibTeX
@article{PanettalSIGGRAPHAsia22, TITLE = {Shape from Release: Inverse Design and Fabrication of Controlled Release Structures}, AUTHOR = {Panetta, Julian and Mohammadian, Haleh and Luci, Emiliano and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3550454.3555518}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {41}, NUMBER = {6}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022}, }
Endnote
%0 Journal Article %A Panetta, Julian %A Mohammadian, Haleh %A Luci, Emiliano %A Babaei, Vahid %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Shape from Release: Inverse Design and Fabrication of Controlled Release Structures : %G eng %U http://hdl.handle.net/21.11116/0000-000B-5E7D-1 %R 10.1145/3550454.3555518 %D 2022 %J ACM Transactions on Graphics %V 41 %N 6 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2022 %O ACM SIGGRAPH Asia 2022 SA '22 SA 2021
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2022a. Learning a self-supervised tone mapping operator via feature contrast masking loss. Computer Graphics Forum (Proc. EUROGRAPHICS 2022)41, 2.
Export
BibTeX
@article{Wang2022, TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss}, AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14459}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {41}, NUMBER = {2}, PAGES = {71--84}, BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2022)}, EDITOR = {Caine, Rapha{\"e}lle and Kim, Min H.}, }
Endnote
%0 Journal Article %A Wang, Chao %A Chen, Bin %A Seidel, Hans-Peter %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning a self-supervised tone mapping operator via feature contrast masking loss : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA09-B %R 10.1111/cgf.14459 %7 2022 %D 2022 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 41 %N 2 %& 71 %P 71 - 84 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 43rdAnnual Conference %O EUROGRAPHICS 2022 EG 2022 Reims, France, April 25 - 29, 2022
Wolski, K., Zhong, F., Myszkowski, K., and Mantiuk, R.K. Dark Stereo: Improving Depth Perception Under Low Luminance. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2022)41, 4.
(Accepted/in press)
Export
BibTeX
@article{Wolski_SIGGRAPH22, TITLE = {Dark Stereo: {I}mproving Depth Perception Under Low Luminance}, AUTHOR = {Wolski, Krzysztof and Zhong, F. and Myszkowski, Karol and Mantiuk, Rafa{\l} K.}, LANGUAGE = {eng}, ISSN = {0730-0301}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {41}, NUMBER = {4}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2022}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Zhong, F. %A Myszkowski, Karol %A Mantiuk, Rafał K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dark Stereo: Improving Depth Perception Under Low Luminance : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA6D-B %D 2022 %J ACM Transactions on Graphics %V 41 %N 4 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2022 %O ACM SIGGRAPH 2022
Conference Paper
Bemana, M., Myszkowski, K., Frisvad, J.R., Seidel, H.-P., and Ritschel, T. 2022. Eikonal Fields for Refractive Novel-View Synthesis. Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022), ACM.
Export
BibTeX
@inproceedings{Bemana_SIGGRAPH22, TITLE = {Eikonal Fields for Refractive Novel-View Synthesis}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Frisvad, Jeppe Revall and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISBN = {978-1-4503-9337-9}, DOI = {10.1145/3528233.3530706}, PUBLISHER = {ACM}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022)}, EDITOR = {Nandigjav, Munkhtsetseg and Mitra, Niloy J. and Hertzmann, Aaron}, PAGES = {1--9}, EID = {39}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Bemana, Mojtaba %A Myszkowski, Karol %A Frisvad, Jeppe Revall %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Eikonal Fields for Refractive Novel-View Synthesis : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA61-7 %R 10.1145/3528233.3530706 %D 2022 %B ACM SIGGRAPH %Z date of event: 2022-08-07 - 2022-08-11 %C Vancouver, Canada %B Proceedings SIGGRAPH 2022 Conference Papers Proceedings %E Nandigjav, Munkhtsetseg; Mitra, Niloy J.; Hertzmann, Aaron %P 1 - 9 %Z sequence number: 39 %I ACM %@ 978-1-4503-9337-9
Pourjafarian, N., Koelle, M., Mjaku, F., Strohmeier, P., and Steimle, J. Print-A-Sketch: A Handheld Printer for Physical Sketching of Circuits and Sensors on Everyday Surfaces. CHI ’22, CHI Conference on Human Factors in Computing Systems, ACM.
(Accepted/in press)
Export
BibTeX
@inproceedings{Pourjafarian_CHI2022, TITLE = {{Print-A-Sketch}: {A} Handheld Printer for Physical Sketching of Circuits and Sensors on Everyday Surfaces}, AUTHOR = {Pourjafarian, Narjes and Koelle, Marion and Mjaku, Fjolla and Strohmeier, Paul and Steimle, J{\"u}rgen}, LANGUAGE = {eng}, DOI = {10.1145/3491102.3502074}, PUBLISHER = {ACM}, YEAR = {2022}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {CHI '22, CHI Conference on Human Factors in Computing Systems}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Pourjafarian, Narjes %A Koelle, Marion %A Mjaku, Fjolla %A Strohmeier, Paul %A Steimle, Jürgen %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Print-A-Sketch: A Handheld Printer for Physical Sketching of Circuits and Sensors on Everyday Surfaces : %G eng %U http://hdl.handle.net/21.11116/0000-000A-215D-9 %R 10.1145/3491102.3502074 %D 2022 %B CHI Conference on Human Factors in Computing Systems %Z date of event: 2022-04-29 - 2022-05-05 %C New Orleans, LA, USA %B CHI '22 %I ACM
Rao, S., Böhle, M., and Schiele, B. Towards Better Understanding Attribution Methods. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2022), IEEE.
(Accepted/in press)
Export
BibTeX
@inproceedings{Rao_CVPR2022, TITLE = {Towards Better Understanding Attribution Methods}, AUTHOR = {Rao, Sukrut and B{\"o}hle, Moritz and Schiele, Bernt}, LANGUAGE = {eng}, PUBLISHER = {IEEE}, YEAR = {2022}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2022)}, ADDRESS = {New Orleans, LA, USA}, }
Endnote
%0 Conference Proceedings %A Rao, Sukrut %A Böhle, Moritz %A Schiele, Bernt %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society %T Towards Better Understanding Attribution Methods : %G eng %U http://hdl.handle.net/21.11116/0000-000A-6F91-6 %D 2022 %B 35th IEEE/CVF Conference on Computer Vision and Pattern Recognition %Z date of event: 2022-06-19 - 2022-06-24 %C New Orleans, LA, USA %B IEEE/CVF Conference on Computer Vision and Pattern Recognition %I IEEE
Reed, C.N., Skach, S., Strohmeier, P., and McPherson, A.P. Singing Knit: Soft Knit Biosensing for Augmenting Vocal Performances. AHs ’22, Augmented Humans International Conference, ACM.
(Accepted/in press)
Export
BibTeX
@inproceedings{Reed_AHs2022, TITLE = {Singing Knit: {S}oft Knit Biosensing for Augmenting Vocal Performances}, AUTHOR = {Reed, Courtney N. and Skach, Sophie and Strohmeier, Paul and McPherson, Andrew P.}, LANGUAGE = {eng}, DOI = {10.1145/3519391.3519412}, PUBLISHER = {ACM}, YEAR = {2022}, PUBLREMARK = {Accepted}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {AHs '22, Augmented Humans International Conference}, ADDRESS = {Munich, Germany (Hybrid)}, }
Endnote
%0 Conference Proceedings %A Reed, Courtney N. %A Skach, Sophie %A Strohmeier, Paul %A McPherson, Andrew P. %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Singing Knit: Soft Knit Biosensing for Augmenting Vocal Performances : %G eng %U http://hdl.handle.net/21.11116/0000-000A-2178-A %R 10.1145/3519391.3519412 %D 2022 %B Augmented Humans International Conference %Z date of event: 2022-03-13 - 2022-03-15 %C Munich, Germany (Hybrid) %B AHs '22 %I ACM
Shimada, S., Golyanik, V., Li, Z., Pérez, P., Xu, W., and Theobalt, C. 2022. HULC: 3D HUman Motion Capture with Pose Manifold SampLing and Dense Contact Guidance. Computer Vision -- ECCV 2022, Springer.
Export
BibTeX
@inproceedings{Shimada_ECCV2022, TITLE = {{HULC}: {3D} {HU}man Motion Capture with Pose Manifold Samp{Li}ng and Dense {C}ontact Guidance}, AUTHOR = {Shimada, Soshi and Golyanik, Vladislav and Li, Zhi and P{\'e}rez, Patrick and Xu, Weipeng and Theobalt, Christian}, LANGUAGE = {eng}, ISBN = {978-3-031-20046-5}, DOI = {10.1007/978-3-031-20047-2_30}, PUBLISHER = {Springer}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, DATE = {2022}, BOOKTITLE = {Computer Vision -- ECCV 2022}, EDITOR = {Avidan, Shai and Brostow, Gabriel and Ciss{\'e}, Moustapha and Farinella, Giovanni Maria and Hassner, Tal}, PAGES = {516--533}, SERIES = {Lecture Notes in Computer Science}, VOLUME = {13682}, ADDRESS = {Tel Aviv, Israel}, }
Endnote
%0 Conference Proceedings %A Shimada, Soshi %A Golyanik, Vladislav %A Li, Zhi %A Pérez, Patrick %A Xu, Weipeng %A Theobalt, Christian %+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T HULC: 3D HUman Motion Capture with Pose Manifold SampLing and Dense Contact Guidance : %G eng %U http://hdl.handle.net/21.11116/0000-000B-7918-3 %R 10.1007/978-3-031-20047-2_30 %D 2022 %B 17th European Conference on Computer Vision %Z date of event: 2022-10-23 - 2022-10-27 %C Tel Aviv, Israel %B Computer Vision -- ECCV 2022 %E Avidan, Shai; Brostow, Gabriel; Cissé, Moustapha; Farinella, Giovanni Maria; Hassner, Tal %P 516 - 533 %I Springer %@ 978-3-031-20046-5 %B Lecture Notes in Computer Science %N 13682 %U https://rdcu.be/c0aoZ
Wittchen, D., Spiel, K., Fruchard, B., et al. 2022. TactJam: An End-to-End Prototyping Suite for Collaborative Design of On-Body Vibrotactile Feedback. TEI ’22, Sixteenth International Conference on Tangible, Embedded, and Embodied Interaction, ACM.
Export
BibTeX
@inproceedings{Wittchen_TEI22, TITLE = {{TactJam}: {A}n End-to-End Prototyping Suite for Collaborative Design of On-Body Vibrotactile Feedback}, AUTHOR = {Wittchen, Dennis and Spiel, Katta and Fruchard, Bruno and Degraen, Donald and Schneider, Oliver and Freitag, Georg and Strohmeier, Paul}, LANGUAGE = {eng}, ISBN = {978-1-4503-9147-4}, DOI = {10.1145/3490149.3501307}, PUBLISHER = {ACM}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {TEI '22, Sixteenth International Conference on Tangible, Embedded, and Embodied Interaction}, PAGES = {1--13}, EID = {1}, ADDRESS = {Daejeon, Republic of Korea (Online)}, }
Endnote
%0 Conference Proceedings %A Wittchen, Dennis %A Spiel, Katta %A Fruchard, Bruno %A Degraen, Donald %A Schneider, Oliver %A Freitag, Georg %A Strohmeier, Paul %+ External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T TactJam: An End-to-End Prototyping Suite for Collaborative Design of On-Body Vibrotactile Feedback : %G eng %U http://hdl.handle.net/21.11116/0000-000A-20B9-1 %R 10.1145/3490149.3501307 %D 2022 %B Sixteenth International Conference on Tangible, Embedded, and Embodied Interaction %Z date of event: 2022-02-13 - 2022-02-16 %C Daejeon, Republic of Korea (Online) %B TEI '22 %P 1 - 13 %Z sequence number: 1 %I ACM %@ 978-1-4503-9147-4
Paper
Wang, C., Serrano, A., Pan, X., et al. 2022b. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. https://arxiv.org/abs/2211.12352.
(arXiv: 2211.12352)
Abstract
Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>
Export
BibTeX
@online{Wang2211.12352, TITLE = {{GlowGAN}: Unsupervised Learning of {HDR} Images from {LDR} Images in the Wild}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, X. and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2211.12352}, EPRINT = {2211.12352}, EPRINTTYPE = {arXiv}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>}, }
Endnote
%0 Report %A Wang, Chao %A Serrano, Ana %A Pan, X. %A Chen, Bin %A Seidel, Hans-Peter %A Theobalt, Christian %A Myszkowski, Karol %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild : %G eng %U http://hdl.handle.net/21.11116/0000-000B-9D08-C %U https://arxiv.org/abs/2211.12352 %D 2022 %X Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV