Last Year
Article
Ansari, N., Seidel, H.-P., and Babaei, V. 2022a. Mixed Integer Neural Inverse Design. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2022)41, 4.
Export
BibTeX
@article{Ansari22,
TITLE = {Mixed Integer Neural Inverse Design},
AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Babaei, Vahid},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3528223.3530083},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)},
VOLUME = {41},
NUMBER = {4},
PAGES = {1--14},
EID = {151},
BOOKTITLE = {Proceedings of ACM SIGGRAPH 2022},
}
Endnote
%0 Journal Article
%A Ansari, Navid
%A Seidel, Hans-Peter
%A Babaei, Vahid
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Mixed Integer Neural Inverse Design :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-1678-5
%R 10.1145/3528223.3530083
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 4
%& 1
%P 1 - 14
%Z sequence number: 151
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH 2022
%O ACM SIGGRAPH 2022
Chizhov, V., Georgiev, I., Myszkowski, K., and Singh, G. 2022. Perceptual Error Optimization for Monte Carlo Rendering. ACM Transactions on Graphics41, 3.
Export
BibTeX
@article{ChizhovTOG22,
TITLE = {Perceptual Error Optimization for {Monte Carlo} Rendering},
AUTHOR = {Chizhov, Vassillen and Georgiev, Iliyan and Myszkowski, Karol and Singh, Gurprit},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3504002},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics},
VOLUME = {41},
NUMBER = {3},
PAGES = {1--17},
EID = {26},
}
Endnote
%0 Journal Article
%A Chizhov, Vassillen
%A Georgiev, Iliyan
%A Myszkowski, Karol
%A Singh, Gurprit
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Perceptual Error Optimization for Monte Carlo Rendering :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-BA49-3
%R 10.1145/3504002
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 3
%& 1
%P 1 - 17
%Z sequence number: 26
%I ACM
%C New York, NY
%@ false
Chu, M., Liu, L., Zheng, Q., et al. 2022. Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data. ACM Transactions on Graphics41, 4.
Export
BibTeX
@article{Chu2022,
TITLE = {Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data},
AUTHOR = {Chu, Mengyu and Liu, Lingjie and Zheng, Quan and Franz, Erik and Seidel, Hans-Peter and Theobalt, Christian and Zayer, Rhaleb},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3528223.3530169},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics},
VOLUME = {41},
NUMBER = {4},
PAGES = {1--14},
EID = {119},
}
Endnote
%0 Journal Article
%A Chu, Mengyu
%A Liu, Lingjie
%A Zheng, Quan
%A Franz, Erik
%A Seidel, Hans-Peter
%A Theobalt, Christian
%A Zayer, Rhaleb
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Physics Informed Neural Fields for Smoke Reconstruction with Sparse Data :
%G eng
%U http://hdl.handle.net/21.11116/0000-000B-6561-6
%R 10.1145/3528223.3530169
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 4
%& 1
%P 1 - 14
%Z sequence number: 119
%I ACM
%C New York, NY
%@ false
%U https://people.mpi-inf.mpg.de/~mchu/projects/PI-NeRF/
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2022a. Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures. Computers and Graphics105.
Export
BibTeX
@article{Cogalan2022,
TITLE = {Learning {HDR} Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures},
AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias},
LANGUAGE = {eng},
ISSN = {0097-8493},
DOI = {10.1016/j.cag.2022.04.008},
PUBLISHER = {Elsevier},
ADDRESS = {Amsterdam},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {Computers and Graphics},
VOLUME = {105},
PAGES = {57--72},
}
Endnote
%0 Journal Article
%A Çoğalan, Uğur
%A Bemana, Mojtaba
%A Myszkowski, Karol
%A Seidel, Hans-Peter
%A Ritschel, Tobias
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-9D95-D
%R 10.1016/j.cag.2022.04.008
%7 2022
%D 2022
%J Computers and Graphics
%V 105
%& 57
%P 57 - 72
%I Elsevier
%C Amsterdam
%@ false
Hladký, J., Stengel, M., Vining, N., Kerbl, B., Seidel, H.-P., and Steinberger, M. 2022. QuadStream: A Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{HladkySIGGRAPHAsia22,
TITLE = {QuadStream: {A} Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction},
AUTHOR = {Hladk{\'y}, Jozef and Stengel, Michael and Vining, Nicholas and Kerbl, Bernhard and Seidel, Hans-Peter and Steinberger, Markus},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3550454.3555524},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)},
VOLUME = {41},
NUMBER = {6},
PAGES = {1--13},
EID = {233},
BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022},
}
Endnote
%0 Journal Article
%A Hladký, Jozef
%A Stengel, Michael
%A Vining, Nicholas
%A Kerbl, Bernhard
%A Seidel, Hans-Peter
%A Steinberger, Markus
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T QuadStream: A Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-208B-3
%R 10.1145/3550454.3555524
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 6
%& 1
%P 1 - 13
%Z sequence number: 233
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH Asia 2022
%O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Huang, X., Memari, P., Seidel, H.-P., and Singh, G. 2022. Point-Pattern Synthesis using Gabor and Random Filters. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2022)41, 4.
Export
BibTeX
@article{Huang_EGSR2022,
TITLE = {Point-Pattern Synthesis using {Gabor} and Random Filters},
AUTHOR = {Huang, Xingchang and Memari, Pooran and Seidel, Hans-Peter and Singh, Gurprit},
LANGUAGE = {eng},
ISSN = {0167-7055},
DOI = {10.1111/cgf.14596},
PUBLISHER = {Wiley-Blackwell},
ADDRESS = {Oxford},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
DATE = {2022},
JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)},
VOLUME = {41},
NUMBER = {4},
PAGES = {169--179},
BOOKTITLE = {Eurographics Symposium on Rendering 2022},
EDITOR = {Ghosh, Abhijeet and Wei, Li-Yi and Wilkie, Alexander},
}
Endnote
%0 Journal Article
%A Huang, Xingchang
%A Memari, Pooran
%A Seidel, Hans-Peter
%A Singh, Gurprit
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Point-Pattern Synthesis using Gabor and Random Filters :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-1675-8
%R 10.1111/cgf.14596
%7 2022
%D 2022
%J Computer Graphics Forum
%V 41
%N 4
%& 169
%P 169 - 179
%I Wiley-Blackwell
%C Oxford
%@ false
%B Eurographics Symposium on Rendering 2022
%O Eurographics Symposium on Rendering 2022 EGSR 2022 Prague, Czech Republic & Virtual ; 4 - 6 July 2022
%U https://onlinelibrary.wiley.com/share/X44DPUPXHCYNCUKSEBEE?target=10.1111/cgf.14596
Kopanas, G., Leimkühler, T., Rainer, G., Jambon, C., and Drettakis, G. 2022. Neural Point Catacaustics for Novel-View Synthesis of Reflections. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{KopanasSIGGRAPHAsia22,
TITLE = {Neural Point Catacaustics for Novel-View Synthesis of Reflections},
AUTHOR = {Kopanas, Georgios and Leimk{\"u}hler, Thomas and Rainer, Gilles and Jambon, Cl{\'e}ment and Drettakis, George},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3550454.3555497},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)},
VOLUME = {41},
NUMBER = {6},
PAGES = {1--15},
EID = {201},
BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022},
}
Endnote
%0 Journal Article
%A Kopanas, Georgios
%A Leimkühler, Thomas
%A Rainer, Gilles
%A Jambon, Clément
%A Drettakis, George
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T Neural Point Catacaustics for Novel-View Synthesis of Reflections :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-209B-1
%R 10.1145/3550454.3555497
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 6
%& 1
%P 1 - 15
%Z sequence number: 201
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH Asia 2022
%O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Mueller, F., Semertzidis, N., Andres, J., et al. 2022. Human–Computer Integration: Towards Integrating the Human Body with the Computational Machine. Foundations and Trends in Human-Computer Interaction16, 1.
Export
BibTeX
@article{Mueller22,
TITLE = {Human--Computer Integration: {T}owards Integrating the Human Body with the Computational Machine},
AUTHOR = {Mueller, Florian and Semertzidis, Nathan and Andres, Josh and Weigel, Martin and Nanayakkara, Suranga and Patibanda, Rakesh and Li, Zhuying and Strohmeier, Paul and Knibbe, Jarrod and Greuter, Stefan and Obrist, Marianna and Maes, Pattie and Wang, Dakuo and Wolf, Katrin and Gerber, Liz and Marshall, Joe and Kunze, Kai and Grudin, Jonathan and Reiterer, Harald and Byrne, Richard},
LANGUAGE = {eng},
ISSN = {1551-3955},
ISBN = {978-1-63828-068-2},
DOI = {10.1561/1100000086},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {Foundations and Trends in Human-Computer Interaction},
VOLUME = {16},
NUMBER = {1},
PAGES = {1--64},
}
Endnote
%0 Journal Article
%A Mueller, Florian
%A Semertzidis, Nathan
%A Andres, Josh
%A Weigel, Martin
%A Nanayakkara, Suranga
%A Patibanda, Rakesh
%A Li, Zhuying
%A Strohmeier, Paul
%A Knibbe, Jarrod
%A Greuter, Stefan
%A Obrist, Marianna
%A Maes, Pattie
%A Wang, Dakuo
%A Wolf, Katrin
%A Gerber, Liz
%A Marshall, Joe
%A Kunze, Kai
%A Grudin, Jonathan
%A Reiterer, Harald
%A Byrne, Richard
%+ External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
%T Human–Computer Integration: Towards Integrating the Human Body with the Computational Machine :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-1734-0
%R 10.1561/1100000086
%@ 978-1-63828-068-2
%7 2022
%D 2022
%J Foundations and Trends in Human-Computer Interaction
%O Foundations and Trends® in Human-Computer Interaction
%V 16
%N 1
%& 1
%P 1 - 64
%@ false
Panetta, J., Mohammadian, H., Luci, E., and Babaei, V. 2022. Shape from Release: Inverse Design and Fabrication of Controlled Release Structures. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{PanettalSIGGRAPHAsia22,
TITLE = {Shape from Release: Inverse Design and Fabrication of Controlled Release Structures},
AUTHOR = {Panetta, Julian and Mohammadian, Haleh and Luci, Emiliano and Babaei, Vahid},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3550454.3555518},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)},
VOLUME = {41},
NUMBER = {6},
PAGES = {1--14},
EID = {274},
BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022},
}
Endnote
%0 Journal Article
%A Panetta, Julian
%A Mohammadian, Haleh
%A Luci, Emiliano
%A Babaei, Vahid
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Shape from Release: Inverse Design and Fabrication of Controlled Release Structures :
%G eng
%U http://hdl.handle.net/21.11116/0000-000B-5E7D-1
%R 10.1145/3550454.3555518
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 6
%& 1
%P 1 - 14
%Z sequence number: 274
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH Asia 2022
%O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Piovarči, M., Foshey, M., Xu, J., et al. 2022. Closed-Loop Control of Direct Ink Writing via Reinforcement Learning. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2022)41, 4.
Export
BibTeX
@article{PiovarciSIGGRAPH22,
TITLE = {Closed-Loop Control of Direct Ink Writing via Reinforcement Learning},
AUTHOR = {Piovar{\v c}i, Michal and Foshey, Michael and Xu, Jie and Erps, Timmothy and Babaei, Vahid and Didyk, Piotr and Rusinkiewicz, Szymon and Matusik, Wojciech and Bickel, Bernd},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3528223.3530144},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)},
VOLUME = {41},
NUMBER = {4},
PAGES = {1--10},
EID = {112},
BOOKTITLE = {Proceedings of ACM SIGGRAPH 2022},
}
Endnote
%0 Journal Article
%A Piovarči, Michal
%A Foshey, Michael
%A Xu, Jie
%A Erps, Timmothy
%A Babaei, Vahid
%A Didyk, Piotr
%A Rusinkiewicz, Szymon
%A Matusik, Wojciech
%A Bickel, Bernd
%+ External Organizations
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
%T Closed-Loop Control of Direct Ink Writing via Reinforcement Learning :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-178D-C
%R 10.1145/3528223.3530144
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 4
%& 1
%P 1 - 10
%Z sequence number: 112
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH 2022
%O ACM SIGGRAPH 2022
Salaün, C., Georgiev, I., Seidel, H.-P., and Singh, G. 2022a. Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{SalauenSIGGRAPHAsia22,
TITLE = {Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport},
AUTHOR = {Sala{\"u}n, Corentin and Georgiev, Iliyan and Seidel, Hans-Peter and Singh, Gurprit},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3550454.3555484},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)},
VOLUME = {41},
NUMBER = {6},
PAGES = {1--14},
EID = {261},
BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022},
}
Endnote
%0 Journal Article
%A Salaün, Corentin
%A Georgiev, Iliyan
%A Seidel, Hans-Peter
%A Singh, Gurprit
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Scalable Multi-Class Sampling via Filtered Sliced Optimal Transport :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-1716-2
%R 10.1145/3550454.3555484
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 6
%& 1
%P 1 - 14
%Z sequence number: 261
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH Asia 2022
%O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Salaün, C., Gruson, A., Hua, B.-S., Hachisuka, T., and Singh, G. 2022b. Regression-based Monte Carlo integration. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2022)41, 4.
Export
BibTeX
@article{Salauen_SIGGRAPH22,
TITLE = {Regression-based {Monte Carlo} integration},
AUTHOR = {Sala{\"u}n, Corentin and Gruson, Adrien and Hua, Binh-Son and Hachisuka, Toshiya and Singh, Gurprit},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3528223.3530095},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)},
VOLUME = {41},
NUMBER = {4},
PAGES = {1--14},
EID = {79},
BOOKTITLE = {Proceedings of ACM SIGGRAPH 2022},
}
Endnote
%0 Journal Article
%A Salaün, Corentin
%A Gruson, Adrien
%A Hua, Binh-Son
%A Hachisuka, Toshiya
%A Singh, Gurprit
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T Regression-based Monte Carlo integration :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-1793-4
%R 10.1145/3528223.3530095
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 4
%& 1
%P 1 - 14
%Z sequence number: 79
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH 2022
%O ACM SIGGRAPH 2022
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2022a. Learning a self-supervised tone mapping operator via feature contrast masking loss. Computer Graphics Forum (Proc. EUROGRAPHICS 2022)41, 2.
Export
BibTeX
@article{Wang2022,
TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss},
AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana},
LANGUAGE = {eng},
ISSN = {0167-7055},
DOI = {10.1111/cgf.14459},
PUBLISHER = {Blackwell-Wiley},
ADDRESS = {Oxford},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)},
VOLUME = {41},
NUMBER = {2},
PAGES = {71--84},
BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2022)},
EDITOR = {Caine, Rapha{\"e}lle and Kim, Min H.},
}
Endnote
%0 Journal Article
%A Wang, Chao
%A Chen, Bin
%A Seidel, Hans-Peter
%A Myszkowski, Karol
%A Serrano, Ana
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Learning a self-supervised tone mapping operator via feature contrast masking loss :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-BA09-B
%R 10.1111/cgf.14459
%7 2022
%D 2022
%J Computer Graphics Forum
%O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum
%V 41
%N 2
%& 71
%P 71 - 84
%I Blackwell-Wiley
%C Oxford
%@ false
%B The European Association for Computer Graphics 43rdAnnual Conference
%O EUROGRAPHICS 2022 EG 2022 Reims, France, April 25 - 29, 2022
Wolski, K., Trutoiu, L., Dong, Z., Shen, Z., Mackenzie, K., and Chapiro, A. 2022a. Geo-Metric: A Perceptual Dataset of Distortions on Faces. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2022)41, 6.
Export
BibTeX
@article{WolskiSIGGRAPHAsia22,
TITLE = {Geo-Metric: {A} Perceptual Dataset of Distortions on Faces},
AUTHOR = {Wolski, Krzysztof and Trutoiu, Laura and Dong, Zhao and Shen, Zhengyang and Mackenzie, Kevin and Chapiro, Alexandre},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3550454.3555475},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)},
VOLUME = {41},
NUMBER = {6},
PAGES = {1--13},
EID = {215},
BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2022},
}
Endnote
%0 Journal Article
%A Wolski, Krzysztof
%A Trutoiu, Laura
%A Dong, Zhao
%A Shen, Zhengyang
%A Mackenzie, Kevin
%A Chapiro, Alexandre
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
%T Geo-Metric: A Perceptual Dataset of Distortions on Faces :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-209F-D
%R 10.1145/3550454.3555475
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 6
%& 1
%P 1 - 13
%Z sequence number: 215
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH Asia 2022
%O ACM SIGGRAPH Asia 2022 SA '22 SA 2022
Wolski, K., Zhong, F., Myszkowski, K., and Mantiuk, R.K. 2022b. Dark Stereo: Improving Depth Perception Under Low Luminance. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2022)41, 4.
Export
BibTeX
@article{Wolski_SIGGRAPH22,
TITLE = {Dark Stereo: {I}mproving Depth Perception Under Low Luminance},
AUTHOR = {Wolski, Krzysztof and Zhong, Fangcheng and Myszkowski, Karol and Mantiuk, Rafa{\l} K.},
LANGUAGE = {eng},
ISSN = {0730-0301},
DOI = {10.1145/3528223.3530136},
PUBLISHER = {ACM},
ADDRESS = {New York, NY},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)},
VOLUME = {41},
NUMBER = {4},
PAGES = {1--12},
EID = {146},
BOOKTITLE = {Proceedings of ACM SIGGRAPH 2022},
}
Endnote
%0 Journal Article
%A Wolski, Krzysztof
%A Zhong, Fangcheng
%A Myszkowski, Karol
%A Mantiuk, Rafał K.
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Dark Stereo: Improving Depth Perception Under Low Luminance :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-BA6D-B
%R 10.1145/3528223.3530136
%7 2022
%D 2022
%J ACM Transactions on Graphics
%V 41
%N 4
%& 1
%P 1 - 12
%Z sequence number: 146
%I ACM
%C New York, NY
%@ false
%B Proceedings of ACM SIGGRAPH 2022
%O ACM SIGGRAPH 2022
Book Item
Strohmeier, P., Mottelson, A., Pohl, H., et al. 2022. Body-based user interfaces. In: The Routledge Handbook of Bodily Awareness. Routledge, London.
Export
BibTeX
@incollection{strohmeier2022body,
TITLE = {Body-based user interfaces},
AUTHOR = {Strohmeier, Paul and Mottelson, Aske and Pohl, Henning and McIntosh, Jess and Knibbe, Jarrod and Bergstr{\"o}m, Joanna and Jansen, Yvonne and Hornb{\ae}k, Kasper},
LANGUAGE = {eng},
ISBN = {9780429321542},
DOI = {10.4324/9780429321542},
PUBLISHER = {Routledge},
ADDRESS = {London},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
DATE = {2022},
BOOKTITLE = {The Routledge Handbook of Bodily Awareness},
EDITOR = {Alsmith, Adrian J. T. and Longo, Matthew R.},
PAGES = {478--502},
}
Endnote
%0 Book Section
%A Strohmeier, Paul
%A Mottelson, Aske
%A Pohl, Henning
%A McIntosh, Jess
%A Knibbe, Jarrod
%A Bergström, Joanna
%A Jansen, Yvonne
%A Hornbæk, Kasper
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
%T Body-based user interfaces :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-A013-9
%D 2022
%B The Routledge Handbook of Bodily Awareness
%E Alsmith, Adrian J. T.; Longo, Matthew R.
%P 478 - 502
%I Routledge
%C London
%@ 9780429321542
Conference Paper
Ansari, N., Seidel, H.-P., Vahidi Ferdowsi, N., and Babaei, V. 2022b. Autoinverse: Uncertainty Aware Inversion of Neural Networks. Advances in Neural Information Processing Systems 35 (NeurIPS 2022), Curran Associates, Inc.
Export
BibTeX
@inproceedings{Ansari_Neurips22,
TITLE = {Autoinverse: {U}ncertainty Aware Inversion of Neural Networks},
AUTHOR = {Ansari, Navid and Seidel, Hans-Peter and Vahidi Ferdowsi, Nima and Babaei, Vahid},
LANGUAGE = {eng},
PUBLISHER = {Curran Associates, Inc},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Advances in Neural Information Processing Systems 35 (NeurIPS 2022)},
EDITOR = {Koyejo, S. and Mohamed, S. and Agarwal, A. and Belgrave, D. and Cho, K. and Oh, A.},
PAGES = {8675--8686},
ADDRESS = {New Orleans, LA, USA},
}
Endnote
%0 Conference Proceedings
%A Ansari, Navid
%A Seidel, Hans-Peter
%A Vahidi Ferdowsi, Nima
%A Babaei, Vahid
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Autoinverse: Uncertainty Aware Inversion of Neural Networks :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-16F6-6
%D 2022
%B 36th Conference on Neural Information Processing Systems
%Z date of event: 2022-11-28 - 2022-12-09
%C New Orleans, LA, USA
%B Advances in Neural Information Processing Systems 35
%E Koyejo, S.; Mohamed, S.; Agarwal, A.; Belgrave, D.; Cho, K.; Oh, A.
%P 8675 - 8686
%I Curran Associates, Inc
%U https://openreview.net/pdf?id=dNyCj1AbOb
Bemana, M., Myszkowski, K., Frisvad, J.R., Seidel, H.-P., and Ritschel, T. 2022. Eikonal Fields for Refractive Novel-View Synthesis. Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022), ACM.
Export
BibTeX
@inproceedings{Bemana_SIGGRAPH22,
TITLE = {Eikonal Fields for Refractive Novel-View Synthesis},
AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Frisvad, Jeppe Revall and Seidel, Hans-Peter and Ritschel, Tobias},
LANGUAGE = {eng},
ISBN = {978-1-4503-9337-9},
DOI = {10.1145/3528233.3530706},
PUBLISHER = {ACM},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022)},
EDITOR = {Nandigjav, Munkhtsetseg and Mitra, Niloy J. and Hertzmann, Aaron},
PAGES = {1--9},
EID = {39},
ADDRESS = {Vancouver, Canada},
}
Endnote
%0 Conference Proceedings
%A Bemana, Mojtaba
%A Myszkowski, Karol
%A Frisvad, Jeppe Revall
%A Seidel, Hans-Peter
%A Ritschel, Tobias
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Eikonal Fields for Refractive Novel-View Synthesis :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-BA61-7
%R 10.1145/3528233.3530706
%D 2022
%B ACM SIGGRAPH
%Z date of event: 2022-08-07 - 2022-08-11
%C Vancouver, Canada
%B Proceedings SIGGRAPH 2022 Conference Papers Proceedings
%E Nandigjav, Munkhtsetseg; Mitra, Niloy J.; Hertzmann, Aaron
%P 1 - 9
%Z sequence number: 39
%I ACM
%@ 978-1-4503-9337-9
Chen, B., Piovarči, M., Wang, C., et al. 2022. Gloss Management for Consistent Reproduction of Real and Virtual Objects. Proceedings SIGGRAPH Asia 2022 (ACM SIGGRAPH Asia 2022), ACM.
Export
BibTeX
@inproceedings{ChenSA22,
TITLE = {Gloss Management for Consistent Reproduction of Real and Virtual Objects},
AUTHOR = {Chen, Bin and Piovar{\v c}i, Michal and Wang, Chao and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana},
LANGUAGE = {eng},
ISBN = {978-1-4503-9470-3},
DOI = {10.1145/3550469.3555406},
PUBLISHER = {ACM},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Proceedings SIGGRAPH Asia 2022 (ACM SIGGRAPH Asia 2022)},
EDITOR = {Jung, Soon Ki and Lee, Jehee and Bargteil, Adam},
PAGES = {1--9},
EID = {35},
}
Endnote
%0 Conference Proceedings
%A Chen, Bin
%A Piovarči, Michal
%A Wang, Chao
%A Seidel, Hans-Peter
%A Didyk, Piotr
%A Myszkowski, Karol
%A Serrano, Ana
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Gloss Management for Consistent Reproduction of Real and Virtual Objects :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-167F-E
%R 10.1145/3550469.3555406
%D 2022
%B Proceedings SIGGRAPH Asia 2022
%E Jung, Soon Ki; Lee, Jehee; Bargteil, Adam
%P 1 - 9
%Z sequence number: 35
%I ACM
%@ 978-1-4503-9470-3
Pourjafarian, N., Koelle, M., Mjaku, F., Strohmeier, P., and Steimle, J. 2022. Print-A-Sketch: A Handheld Printer for Physical Sketching of Circuits and Sensors on Everyday Surfaces. CHI ’22, CHI Conference on Human Factors in Computing Systems, ACM.
Export
BibTeX
@inproceedings{Pourjafarian_CHI2022,
TITLE = {{Print-A-Sketch}: {A} Handheld Printer for Physical Sketching of Circuits and Sensors on Everyday Surfaces},
AUTHOR = {Pourjafarian, Narjes and Koelle, Marion and Mjaku, Fjolla and Strohmeier, Paul and Steimle, J{\"u}rgen},
LANGUAGE = {eng},
ISBN = {9781450391573},
DOI = {10.1145/3491102.3502074},
PUBLISHER = {ACM},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {CHI '22, CHI Conference on Human Factors in Computing Systems},
PAGES = {1--17},
EID = {270},
ADDRESS = {New Orleans, LA, USA},
}
Endnote
%0 Conference Proceedings
%A Pourjafarian, Narjes
%A Koelle, Marion
%A Mjaku, Fjolla
%A Strohmeier, Paul
%A Steimle, Jürgen
%+ External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Print-A-Sketch: A Handheld Printer for Physical Sketching of
Circuits and Sensors on Everyday Surfaces :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-215D-9
%R 10.1145/3491102.3502074
%D 2022
%B CHI Conference on Human Factors in Computing Systems
%Z date of event: 2022-04-29 - 2022-05-05
%C New Orleans, LA, USA
%B CHI '22
%P 1 - 17
%Z sequence number: 270
%I ACM
%@ 9781450391573
Rao, S., Böhle, M., and Schiele, B. 2022. Towards Better Understanding Attribution Methods. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2022), IEEE.
Export
BibTeX
@inproceedings{Rao_CVPR2022,
TITLE = {Towards Better Understanding Attribution Methods},
AUTHOR = {Rao, Sukrut and B{\"o}hle, Moritz and Schiele, Bernt},
LANGUAGE = {eng},
ISBN = {978-1-6654-6946-3},
DOI = {10.1109/CVPR52688.2022.00998},
PUBLISHER = {IEEE},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2022)},
PAGES = {10213--10222},
ADDRESS = {New Orleans, LA, USA},
}
Endnote
%0 Conference Proceedings
%A Rao, Sukrut
%A Böhle, Moritz
%A Schiele, Bernt
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
%T Towards Better Understanding Attribution Methods :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-6F91-6
%R 10.1109/CVPR52688.2022.00998
%D 2022
%B 35th IEEE/CVF Conference on Computer Vision and Pattern Recognition
%Z date of event: 2022-06-19 - 2022-06-24
%C New Orleans, LA, USA
%B IEEE/CVF Conference on Computer Vision and Pattern Recognition
%P 10213 - 10222
%I IEEE
%@ 978-1-6654-6946-3
Reed, C.N., Nordmoen, C., Martelloni, A., et al. 2022a. Exploring Experiences with New Musical Instruments through Micro-phenomenology. NIME 2022, International Conference on New Interfaces for Musical Expression, PubPub.
Export
BibTeX
@inproceedings{Reed2022Exploring,
TITLE = {Exploring Experiences with New Musical Instruments through Micro-phenomenology},
AUTHOR = {Reed, Courtney N. and Nordmoen, Charlotte and Martelloni, Andrea and Lepri, Giacomo and Robson, Nicole and Zayas-Garin, Eevee and Cotton, Kelsey and Mice, Lia and McPherson, Andrew},
LANGUAGE = {eng},
DOI = {10.21428/92fbeb44.b304e4b1},
PUBLISHER = {PubPub},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {NIME 2022, International Conference on New Interfaces for Musical Expression},
ADDRESS = {Auckland, New Zealand},
}
Endnote
%0 Conference Proceedings
%A Reed, Courtney N.
%A Nordmoen, Charlotte
%A Martelloni, Andrea
%A Lepri, Giacomo
%A Robson, Nicole
%A Zayas-Garin, Eevee
%A Cotton, Kelsey
%A Mice, Lia
%A McPherson, Andrew
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
%T Exploring Experiences with New Musical Instruments through Micro-phenomenology :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-A023-7
%R 10.21428/92fbeb44.b304e4b1
%D 2022
%B International Conference on New Interfaces for Musical Expression
%Z date of event: 2022-06-28 - 2022-07-01
%C Auckland, New Zealand
%B NIME 2022
%I PubPub
Reed, C.N., Skach, S., Strohmeier, P., and McPherson, A.P. 2022b. Singing Knit: Soft Knit Biosensing for Augmenting Vocal Performances. AHs ’22, Augmented Humans International Conference, ACM.
Export
BibTeX
@inproceedings{Reed_AHs2022,
TITLE = {Singing Knit: {S}oft Knit Biosensing for Augmenting Vocal Performances},
AUTHOR = {Reed, Courtney N. and Skach, Sophie and Strohmeier, Paul and McPherson, Andrew P.},
LANGUAGE = {eng},
DOI = {10.1145/3519391.3519412},
PUBLISHER = {ACM},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {AHs '22, Augmented Humans International Conference},
PAGES = {170--183},
ADDRESS = {Munich, Germany (Hybrid)},
}
Endnote
%0 Conference Proceedings
%A Reed, Courtney N.
%A Skach, Sophie
%A Strohmeier, Paul
%A McPherson, Andrew P.
%+ External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Singing Knit: Soft Knit Biosensing for Augmenting Vocal
Performances :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-2178-A
%R 10.1145/3519391.3519412
%D 2022
%B Augmented Humans International Conference
%Z date of event: 2022-03-13 - 2022-03-15
%C Munich, Germany (Hybrid)
%B AHs '22
%P 170 - 183
%I ACM
Ruan, L., Chen, B., Li, J., and Lam, M. 2022. Learning to Deblur using Light Field Generated and Real Defocus Images. IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2022), IEEE.
Abstract
Although considerable progress has been made in semantic scene understanding<br>under clear weather, it is still a tough problem under adverse weather<br>conditions, such as dense fog, due to the uncertainty caused by imperfect<br>observations. Besides, difficulties in collecting and labeling foggy images<br>hinder the progress of this field. Considering the success in semantic scene<br>understanding under clear weather, we think it is reasonable to transfer<br>knowledge learned from clear images to the foggy domain. As such, the problem<br>becomes to bridge the domain gap between clear images and foggy images. Unlike<br>previous methods that mainly focus on closing the domain gap caused by fog --<br>defogging the foggy images or fogging the clear images, we propose to alleviate<br>the domain gap by considering fog influence and style variation simultaneously.<br>The motivation is based on our finding that the style-related gap and the<br>fog-related gap can be divided and closed respectively, by adding an<br>intermediate domain. Thus, we propose a new pipeline to cumulatively adapt<br>style, fog and the dual-factor (style and fog). Specifically, we devise a<br>unified framework to disentangle the style factor and the fog factor<br>separately, and then the dual-factor from images in different domains.<br>Furthermore, we collaborate the disentanglement of three factors with a novel<br>cumulative loss to thoroughly disentangle these three factors. Our method<br>achieves the state-of-the-art performance on three benchmarks and shows<br>generalization ability in rainy and snowy scenes.<br>
Export
BibTeX
@inproceedings{Ruan_CVPR2022,
TITLE = {Learning to Deblur using Light Field Generated and Real Defocus Images},
AUTHOR = {Ruan, Lingyan and Chen, Bin and Li, Jizhou and Lam, Miuling},
LANGUAGE = {eng},
ISBN = {978-1-6654-6946-3},
DOI = {10.1109/CVPR52688.2022.01582},
PUBLISHER = {IEEE},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Although considerable progress has been made in semantic scene understanding<br>under clear weather, it is still a tough problem under adverse weather<br>conditions, such as dense fog, due to the uncertainty caused by imperfect<br>observations. Besides, difficulties in collecting and labeling foggy images<br>hinder the progress of this field. Considering the success in semantic scene<br>understanding under clear weather, we think it is reasonable to transfer<br>knowledge learned from clear images to the foggy domain. As such, the problem<br>becomes to bridge the domain gap between clear images and foggy images. Unlike<br>previous methods that mainly focus on closing the domain gap caused by fog --<br>defogging the foggy images or fogging the clear images, we propose to alleviate<br>the domain gap by considering fog influence and style variation simultaneously.<br>The motivation is based on our finding that the style-related gap and the<br>fog-related gap can be divided and closed respectively, by adding an<br>intermediate domain. Thus, we propose a new pipeline to cumulatively adapt<br>style, fog and the dual-factor (style and fog). Specifically, we devise a<br>unified framework to disentangle the style factor and the fog factor<br>separately, and then the dual-factor from images in different domains.<br>Furthermore, we collaborate the disentanglement of three factors with a novel<br>cumulative loss to thoroughly disentangle these three factors. Our method<br>achieves the state-of-the-art performance on three benchmarks and shows<br>generalization ability in rainy and snowy scenes.<br>},
BOOKTITLE = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR 2022)},
PAGES = {16283--16292},
ADDRESS = {New Orleans, LA, USA},
}
Endnote
%0 Conference Proceedings
%A Ruan, Lingyan
%A Chen, Bin
%A Li, Jizhou
%A Lam, Miuling
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
%T Learning to Deblur using Light Field Generated and Real Defocus Images :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-4354-A
%R 10.1109/CVPR52688.2022.01582
%D 2022
%B 35th IEEE/CVF Conference on Computer Vision and Pattern Recognition
%Z date of event: 2022-06-19 - 2022-06-24
%C New Orleans, LA, USA
%X Although considerable progress has been made in semantic scene understanding<br>under clear weather, it is still a tough problem under adverse weather<br>conditions, such as dense fog, due to the uncertainty caused by imperfect<br>observations. Besides, difficulties in collecting and labeling foggy images<br>hinder the progress of this field. Considering the success in semantic scene<br>understanding under clear weather, we think it is reasonable to transfer<br>knowledge learned from clear images to the foggy domain. As such, the problem<br>becomes to bridge the domain gap between clear images and foggy images. Unlike<br>previous methods that mainly focus on closing the domain gap caused by fog --<br>defogging the foggy images or fogging the clear images, we propose to alleviate<br>the domain gap by considering fog influence and style variation simultaneously.<br>The motivation is based on our finding that the style-related gap and the<br>fog-related gap can be divided and closed respectively, by adding an<br>intermediate domain. Thus, we propose a new pipeline to cumulatively adapt<br>style, fog and the dual-factor (style and fog). Specifically, we devise a<br>unified framework to disentangle the style factor and the fog factor<br>separately, and then the dual-factor from images in different domains.<br>Furthermore, we collaborate the disentanglement of three factors with a novel<br>cumulative loss to thoroughly disentangle these three factors. Our method<br>achieves the state-of-the-art performance on three benchmarks and shows<br>generalization ability in rainy and snowy scenes.<br>
%K Computer Science, Computer Vision and Pattern Recognition, cs.CV
%B IEEE/CVF Conference on Computer Vision and Pattern Recognition
%P 16283 - 16292
%I IEEE
%@ 978-1-6654-6946-3
Schneider, O., Fruchard, B., Wittchen, D., et al. 2022. Sustainable Haptic Design: Improving Collaboration, Sharing, and Reuse in Haptic Design Research. CHI ’22, CHI Conference on Human Factors in Computing Systems, ACM.
Export
BibTeX
@inproceedings{Schneider_CHIEA22,
TITLE = {Sustainable Haptic Design: {I}mproving Collaboration, Sharing, and Reuse in Haptic Design Research},
AUTHOR = {Schneider, Oliver and Fruchard, Bruno and Wittchen, Dennis and Joshi, Bibhushan Raj and Freitag, Georg and Degraen, Donald and Strohmeier, Paul},
LANGUAGE = {eng},
ISBN = {9781450391566},
DOI = {10.1145/3491101.3503734},
PUBLISHER = {ACM},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {CHI '22, CHI Conference on Human Factors in Computing Systems},
EDITOR = {Barbosa, Simone and Lampe, Cliff and Appert, Caroline and Shamma, David A.},
PAGES = {1--5},
EID = {79},
ADDRESS = {New Orleans, LA, USA},
}
Endnote
%0 Conference Proceedings
%A Schneider, Oliver
%A Fruchard, Bruno
%A Wittchen, Dennis
%A Joshi, Bibhushan Raj
%A Freitag, Georg
%A Degraen, Donald
%A Strohmeier, Paul
%+ External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T Sustainable Haptic Design: Improving Collaboration, Sharing, and Reuse in Haptic Design Research :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-1748-A
%R 10.1145/3491101.3503734
%D 2022
%B CHI Conference on Human Factors in Computing Systems
%Z date of event: 2022-04-30 - 2022-05-05
%C New Orleans, LA, USA
%B CHI '22
%E Barbosa, Simone; Lampe, Cliff; Appert, Caroline; Shamma, David A.
%P 1 - 5
%Z sequence number: 79
%I ACM
%@ 9781450391566
Shen, Z., Lin, C., Liao, K., Nie, L., Zheng, Z., and Zhao, Y. 2022. PanoFormer: Panorama Transformer for Indoor 360° Depth Estimation. Computer Vision -- ECCV 2022, Springer.
Export
BibTeX
@inproceedings{Shen_ECCV2022,
TITLE = {{PanoFormer}: {P}anorama Transformer for Indoor 360$^{\circ}$ Depth Estimation},
AUTHOR = {Shen, Zhijie and Lin, Chunyu and Liao, Kang and Nie, Lang and Zheng, Zishuo and Zhao, Yao},
LANGUAGE = {eng},
ISBN = {978-3-031-19768-0},
DOI = {10.1007/978-3-031-19769-7_12},
PUBLISHER = {Springer},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
DATE = {2022},
BOOKTITLE = {Computer Vision -- ECCV 2022},
EDITOR = {Avidan, Shai and Brostow, Gabriel and Ciss{\'e}, Moustapha and Farinella, Giovanni Maria and Hassner, Tal},
PAGES = {195--211},
SERIES = {Lecture Notes in Computer Science},
VOLUME = {13661},
ADDRESS = {Tel Aviv, Israel},
}
Endnote
%0 Conference Proceedings
%A Shen, Zhijie
%A Lin, Chunyu
%A Liao, Kang
%A Nie, Lang
%A Zheng, Zishuo
%A Zhao, Yao
%+ External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
%T PanoFormer: Panorama Transformer for Indoor 360°
Depth Estimation :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-9585-5
%R 10.1007/978-3-031-19769-7_12
%D 2022
%B 17th European Conference on Computer Vision
%Z date of event: 2022-10-23 - 2022-10-27
%C Tel Aviv, Israel
%B Computer Vision -- ECCV 2022
%E Avidan, Shai; Brostow, Gabriel; Cissé, Moustapha; Farinella, Giovanni Maria; Hassner, Tal
%P 195 - 211
%I Springer
%@ 978-3-031-19768-0
%B Lecture Notes in Computer Science
%N 13661
%U https://rdcu.be/c5Ays
Shimada, S., Golyanik, V., Li, Z., Pérez, P., Xu, W., and Theobalt, C. 2022. HULC: 3D HUman Motion Capture with Pose Manifold SampLing and Dense Contact Guidance. Computer Vision -- ECCV 2022, Springer.
Export
BibTeX
@inproceedings{Shimada_ECCV2022,
TITLE = {{HULC}: {3D} {HU}man Motion Capture with Pose Manifold Samp{Li}ng and Dense {C}ontact Guidance},
AUTHOR = {Shimada, Soshi and Golyanik, Vladislav and Li, Zhi and P{\'e}rez, Patrick and Xu, Weipeng and Theobalt, Christian},
LANGUAGE = {eng},
ISBN = {978-3-031-20046-5},
DOI = {10.1007/978-3-031-20047-2_30},
PUBLISHER = {Springer},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
DATE = {2022},
BOOKTITLE = {Computer Vision -- ECCV 2022},
EDITOR = {Avidan, Shai and Brostow, Gabriel and Ciss{\'e}, Moustapha and Farinella, Giovanni Maria and Hassner, Tal},
PAGES = {516--533},
SERIES = {Lecture Notes in Computer Science},
VOLUME = {13682},
ADDRESS = {Tel Aviv, Israel},
}
Endnote
%0 Conference Proceedings
%A Shimada, Soshi
%A Golyanik, Vladislav
%A Li, Zhi
%A Pérez, Patrick
%A Xu, Weipeng
%A Theobalt, Christian
%+ Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T HULC: 3D HUman Motion Capture with Pose Manifold SampLing and Dense Contact Guidance :
%G eng
%U http://hdl.handle.net/21.11116/0000-000B-7918-3
%R 10.1007/978-3-031-20047-2_30
%D 2022
%B 17th European Conference on Computer Vision
%Z date of event: 2022-10-23 - 2022-10-27
%C Tel Aviv, Israel
%B Computer Vision -- ECCV 2022
%E Avidan, Shai; Brostow, Gabriel; Cissé, Moustapha; Farinella, Giovanni Maria; Hassner, Tal
%P 516 - 533
%I Springer
%@ 978-3-031-20046-5
%B Lecture Notes in Computer Science
%N 13682
%U https://rdcu.be/c0aoZ
Wang, Y., Chen, H., Fan, Y., et al. 2022b. USB: A Unified Semi-supervised Learning Benchmark for Classification. Advances in Neural Information Processing Systems 35 (NeurIPS 2022), Curran Associates, Inc.
Export
BibTeX
@inproceedings{Wang_Neurips22,
TITLE = {{USB}: {A} Unified Semi-supervised Learning Benchmark for Classification},
AUTHOR = {Wang, Yidong and Chen, Hao and Fan, Yue and Sun, Wang and Tao, Ran and Hou, Wenxin and Wang, Renjie and Yang, Linyi and Zhou, Zhi and Guo, Lan-Zhe and Qi, Heli and Wu, Zhen and Li, Yu-Feng and Nakamura, Satoshi and Ye, Wei and Savvides, Marios and Raj, Bhiksha and Shinozaki, Takahiro and Schiele, Bernt and Wang, Jindong and Xie, Xing and Zhang, Yue},
LANGUAGE = {eng},
PUBLISHER = {Curran Associates, Inc.},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {Advances in Neural Information Processing Systems 35 (NeurIPS 2022)},
EDITOR = {Koyejo, S. and Mohamed, S. and Agarwal, A. and Belgrave, D. and Cho, K. and Oh, A.},
PAGES = {3938--3961},
ADDRESS = {New Orleans, LA, USA},
}
Endnote
%0 Conference Proceedings
%A Wang, Yidong
%A Chen, Hao
%A Fan, Yue
%A Sun, Wang
%A Tao, Ran
%A Hou, Wenxin
%A Wang, Renjie
%A Yang, Linyi
%A Zhou, Zhi
%A Guo, Lan-Zhe
%A Qi, Heli
%A Wu, Zhen
%A Li, Yu-Feng
%A Nakamura, Satoshi
%A Ye, Wei
%A Savvides, Marios
%A Raj, Bhiksha
%A Shinozaki, Takahiro
%A Schiele, Bernt
%A Wang, Jindong
%A Xie, Xing
%A Zhang, Yue
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Vision and Machine Learning, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T USB: A Unified Semi-supervised Learning Benchmark for Classification :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-184A-7
%D 2022
%B 36th Conference on Neural Information Processing Systems
%Z date of event: 2022-11-28 - 2022-12-09
%C New Orleans, LA, USA
%B Advances in Neural Information Processing Systems 35
%E Koyejo, S.; Mohamed, S.; Agarwal, A.; Belgrave, D.; Cho, K.; Oh, A.
%P 3938 - 3961
%I Curran Associates, Inc.
%U https://openreview.net/pdf?id=QeuwINa96C
Wittchen, D., Spiel, K., Fruchard, B., et al. 2022. TactJam: An End-to-End Prototyping Suite for Collaborative Design of On-Body Vibrotactile Feedback. TEI ’22, Sixteenth International Conference on Tangible, Embedded, and Embodied Interaction, ACM.
Export
BibTeX
@inproceedings{Wittchen_TEI22,
TITLE = {{TactJam}: {A}n End-to-End Prototyping Suite for Collaborative Design of On-Body Vibrotactile Feedback},
AUTHOR = {Wittchen, Dennis and Spiel, Katta and Fruchard, Bruno and Degraen, Donald and Schneider, Oliver and Freitag, Georg and Strohmeier, Paul},
LANGUAGE = {eng},
ISBN = {978-1-4503-9147-4},
DOI = {10.1145/3490149.3501307},
PUBLISHER = {ACM},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
BOOKTITLE = {TEI '22, Sixteenth International Conference on Tangible, Embedded, and Embodied Interaction},
PAGES = {1--13},
EID = {1},
ADDRESS = {Daejeon, Republic of Korea (Online)},
}
Endnote
%0 Conference Proceedings
%A Wittchen, Dennis
%A Spiel, Katta
%A Fruchard, Bruno
%A Degraen, Donald
%A Schneider, Oliver
%A Freitag, Georg
%A Strohmeier, Paul
%+ External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
%T TactJam: An End-to-End Prototyping Suite for Collaborative Design of On-Body Vibrotactile Feedback :
%G eng
%U http://hdl.handle.net/21.11116/0000-000A-20B9-1
%R 10.1145/3490149.3501307
%D 2022
%B Sixteenth International Conference on Tangible, Embedded, and Embodied Interaction
%Z date of event: 2022-02-13 - 2022-02-16
%C Daejeon, Republic of Korea (Online)
%B TEI '22
%P 1 - 13
%Z sequence number: 1
%I ACM
%@ 978-1-4503-9147-4
Paper
Arabadzhiyska, E., Tursun, C., Seidel, H.-P., and Didyk, P. 2022. Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model. https://arxiv.org/abs/2205.01624.
(arXiv: 2205.01624) Abstract
Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br>
Export
BibTeX
@online{Arabadzhiyska2205.01624,
TITLE = {Practical Saccade Prediction for Head-Mounted Displays: Towards a Comprehensive Model},
AUTHOR = {Arabadzhiyska, Elena and Tursun, Cara and Seidel, Hans-Peter and Didyk, Piotr},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2205.01624},
EPRINT = {2205.01624},
EPRINTTYPE = {arXiv},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br>},
}
Endnote
%0 Report
%A Arabadzhiyska, Elena
%A Tursun, Cara
%A Seidel, Hans-Peter
%A Didyk, Piotr
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
%T Practical Saccade Prediction for Head-Mounted Displays: Towards a
Comprehensive Model :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-16E3-B
%U https://arxiv.org/abs/2205.01624
%D 2022
%X Eye-tracking technology is an integral component of new display devices such<br>as virtual and augmented reality headsets. Applications of gaze information<br>range from new interaction techniques exploiting eye patterns to<br>gaze-contingent digital content creation. However, system latency is still a<br>significant issue in many of these applications because it breaks the<br>synchronization between the current and measured gaze positions. Consequently,<br>it may lead to unwanted visual artifacts and degradation of user experience. In<br>this work, we focus on foveated rendering applications where the quality of an<br>image is reduced towards the periphery for computational savings. In foveated<br>rendering, the presence of latency leads to delayed updates to the rendered<br>frame, making the quality degradation visible to the user. To address this<br>issue and to combat system latency, recent work proposes to use saccade landing<br>position prediction to extrapolate the gaze information from delayed<br>eye-tracking samples. While the benefits of such a strategy have already been<br>demonstrated, the solutions range from simple and efficient ones, which make<br>several assumptions about the saccadic eye movements, to more complex and<br>costly ones, which use machine learning techniques. Yet, it is unclear to what<br>extent the prediction can benefit from accounting for additional factors. This<br>paper presents a series of experiments investigating the importance of<br>different factors for saccades prediction in common virtual and augmented<br>reality applications. In particular, we investigate the effects of saccade<br>orientation in 3D space and smooth pursuit eye-motion (SPEM) and how their<br>influence compares to the variability across users. We also present a simple<br>yet efficient correction method that adapts the existing saccade prediction<br>methods to handle these factors without performing extensive data collection.<br>
%K Computer Science, Human-Computer Interaction, cs.HC,Computer Science, Graphics, cs.GR
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2022b. Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors. https://arxiv.org/abs/2206.09485.
(arXiv: 2206.09485) Abstract
Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>
Export
BibTeX
@online{Cogalan2206.09485,
TITLE = {Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors},
AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2206.09485},
EPRINT = {2206.09485},
EPRINTTYPE = {arXiv},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>},
}
Endnote
%0 Report
%A Çoğalan, Uğur
%A Bemana, Mojtaba
%A Seidel, Hans-Peter
%A Myszkowski, Karol
%+ Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
%T Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-16E8-6
%U https://arxiv.org/abs/2206.09485
%D 2022
%X Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>
%K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Li, J., Chen, B., Zan, G., Qian, G., Pianetta, P., and Liu, Y. 2022. Subspace Modeling for Fast and High-sensitivity X-ray Chemical Imaging. https://arxiv.org/abs/2201.00259.
(arXiv: 2201.00259) Abstract
Resolving morphological chemical phase transformations at the nanoscale is of<br>vital importance to many scientific and industrial applications across various<br>disciplines. The TXM-XANES imaging technique, by combining full field<br>transmission X-ray microscopy (TXM) and X-ray absorption near edge structure<br>(XANES), has been an emerging tool which operates by acquiring a series of<br>microscopy images with multi-energy X-rays and fitting to obtain the chemical<br>map. Its capability, however, is limited by the poor signal-to-noise ratios due<br>to the system errors and low exposure illuminations for fast acquisition. In<br>this work, by exploiting the intrinsic properties and subspace modeling of the<br>TXM-XANES imaging data, we introduce a simple and robust denoising approach to<br>improve the image quality, which enables fast and high-sensitivity chemical<br>imaging. Extensive experiments on both synthetic and real datasets demonstrate<br>the superior performance of the proposed method.<br>
Export
BibTeX
@online{li2022subspace,
TITLE = {Subspace Modeling for Fast and High-sensitivity X-ray Chemical Imaging},
AUTHOR = {Li, Jizhou and Chen, Bin and Zan, Guibin and Qian, Guannan and Pianetta, Piero and Liu, Yijin},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2201.00259},
EPRINT = {2201.00259},
EPRINTTYPE = {arXiv},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Resolving morphological chemical phase transformations at the nanoscale is of<br>vital importance to many scientific and industrial applications across various<br>disciplines. The TXM-XANES imaging technique, by combining full field<br>transmission X-ray microscopy (TXM) and X-ray absorption near edge structure<br>(XANES), has been an emerging tool which operates by acquiring a series of<br>microscopy images with multi-energy X-rays and fitting to obtain the chemical<br>map. Its capability, however, is limited by the poor signal-to-noise ratios due<br>to the system errors and low exposure illuminations for fast acquisition. In<br>this work, by exploiting the intrinsic properties and subspace modeling of the<br>TXM-XANES imaging data, we introduce a simple and robust denoising approach to<br>improve the image quality, which enables fast and high-sensitivity chemical<br>imaging. Extensive experiments on both synthetic and real datasets demonstrate<br>the superior performance of the proposed method.<br>},
}
Endnote
%0 Report
%A Li, Jizhou
%A Chen, Bin
%A Zan, Guibin
%A Qian, Guannan
%A Pianetta, Piero
%A Liu, Yijin
%+ External Organizations
Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
External Organizations
External Organizations
External Organizations
%T Subspace Modeling for Fast and High-sensitivity X-ray Chemical Imaging :
%G eng
%U http://hdl.handle.net/21.11116/0000-000C-C7BE-E
%U https://arxiv.org/abs/2201.00259
%D 2022
%X Resolving morphological chemical phase transformations at the nanoscale is of<br>vital importance to many scientific and industrial applications across various<br>disciplines. The TXM-XANES imaging technique, by combining full field<br>transmission X-ray microscopy (TXM) and X-ray absorption near edge structure<br>(XANES), has been an emerging tool which operates by acquiring a series of<br>microscopy images with multi-energy X-rays and fitting to obtain the chemical<br>map. Its capability, however, is limited by the poor signal-to-noise ratios due<br>to the system errors and low exposure illuminations for fast acquisition. In<br>this work, by exploiting the intrinsic properties and subspace modeling of the<br>TXM-XANES imaging data, we introduce a simple and robust denoising approach to<br>improve the image quality, which enables fast and high-sensitivity chemical<br>imaging. Extensive experiments on both synthetic and real datasets demonstrate<br>the superior performance of the proposed method.<br>
%K eess.IV,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Multimedia, cs.MM
Wang, C., Serrano, A., Pan, X., et al. 2022c. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. https://arxiv.org/abs/2211.12352.
(arXiv: 2211.12352) Abstract
Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>
Export
BibTeX
@online{Wang2211.12352,
TITLE = {{GlowGAN}: Unsupervised Learning of {HDR} Images from {LDR} Images in the Wild},
AUTHOR = {Wang, Chao and Serrano, Ana and Pan, X. and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas},
LANGUAGE = {eng},
URL = {https://arxiv.org/abs/2211.12352},
EPRINT = {2211.12352},
EPRINTTYPE = {arXiv},
YEAR = {2022},
MARGINALMARK = {$\bullet$},
ABSTRACT = {Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>},
}
Endnote
%0 Report
%A Wang, Chao
%A Serrano, Ana
%A Pan, X.
%A Chen, Bin
%A Seidel, Hans-Peter
%A Theobalt, Christian
%A Myszkowski, Karol
%A Leimkühler, Thomas
%+ Computer Graphics, MPI for Informatics, Max Planck Society
External Organizations
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
Computer Graphics, MPI for Informatics, Max Planck Society
Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society
%T GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild :
%G eng
%U http://hdl.handle.net/21.11116/0000-000B-9D08-C
%U https://arxiv.org/abs/2211.12352
%D 2022
%X Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>
%K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV