karol

2023
Balint, M., Wolski, K., Myszkowski, K., Seidel, H.-P., and Mantiuk, R. 2023. Neural Partitioning Pyramids for Denoising Monte Carlo Renderings. Proceedings SIGGRAPH 2023 Conference Papers, ACM.
Export
BibTeX
@inproceedings{Balint_SIGGRAPH23, TITLE = {Neural Partitioning Pyramids for Denoising {Monte Carlo} Renderings}, AUTHOR = {Balint, Maritin and Wolski, Krzysztof and Myszkowski, Karol and Seidel, Hans-Peter and Mantiuk, Rafa{\l}}, LANGUAGE = {eng}, ISBN = {979-8-4007-0159-7}, DOI = {10.1145/3588432.3591562}, PUBLISHER = {ACM}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {Proceedings SIGGRAPH 2023 Conference Papers}, EDITOR = {Brunvand, Erik and Sheffer, Alla and Wimmer, Michael}, PAGES = {1--11}, EID = {60}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Balint, Maritin %A Wolski, Krzysztof %A Myszkowski, Karol %A Seidel, Hans-Peter %A Mantiuk, Rafał %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Neural Partitioning Pyramids for Denoising Monte Carlo Renderings : %G eng %U http://hdl.handle.net/21.11116/0000-000E-3740-C %R 10.1145/3588432.3591562 %D 2023 %B ACM SIGGRAPH Conference %Z date of event: 2023-08-06 - 2023-08-10 %C Los Angeles, CA, USA %B Proceedings SIGGRAPH 2023 Conference Papers %E Brunvand, Erik; Sheffer, Alla; Wimmer, Michael %P 1 - 11 %Z sequence number: 60 %I ACM %@ 979-8-4007-0159-7
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2023. Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors. Computer Graphics Forum (Proc. EUROGRAPHICS 2023)42, 2.
Export
BibTeX
@article{Cogalan_Eurographics23, TITLE = {Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14748}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {42}, NUMBER = {2}, PAGES = {119--131}, BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2023)}, }
Endnote
%0 Journal Article %A Çoğalan, Uğur %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors : %G eng %U http://hdl.handle.net/21.11116/0000-000C-F953-E %R 10.1111/cgf.14748 %7 2023 %D 2023 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 42 %N 2 %& 119 %P 119 - 131 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 43rdAnnual Conference %O EUROGRAPHICS 2023 EG 2023 Saarbrücken, Germany, May 8-12, 2023
Ruan, L., Bemana, M., Seidel, H.-P., Myszkowski, K., and Chen, B. 2023. Revisiting Image Deblurring with an Efficient ConvNet. https://arxiv.org/abs/2302.02234.
(arXiv: 2302.02234)
Abstract
Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>
Export
BibTeX
@online{ruan2023revisiting, TITLE = {Revisiting Image Deblurring with an Efficient {ConvNet}}, AUTHOR = {Ruan, Lingyan and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol and Chen, Bin}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2302.02234}, EPRINT = {2302.02234}, EPRINTTYPE = {arXiv}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br>}, }
Endnote
%0 Report %A Ruan, Lingyan %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %A Chen, Bin %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Revisiting Image Deblurring with an Efficient ConvNet : %G eng %U http://hdl.handle.net/21.11116/0000-000C-C7B9-3 %U https://arxiv.org/abs/2302.02234 %D 2023 %X Image deblurring aims to recover the latent sharp image from its blurry<br>counterpart and has a wide range of applications in computer vision. The<br>Convolution Neural Networks (CNNs) have performed well in this domain for many<br>years, and until recently an alternative network architecture, namely<br>Transformer, has demonstrated even stronger performance. One can attribute its<br>superiority to the multi-head self-attention (MHSA) mechanism, which offers a<br>larger receptive field and better input content adaptability than CNNs.<br>However, as MHSA demands high computational costs that grow quadratically with<br>respect to the input resolution, it becomes impractical for high-resolution<br>image deblurring tasks. In this work, we propose a unified lightweight CNN<br>network that features a large effective receptive field (ERF) and demonstrates<br>comparable or even better performance than Transformers while bearing less<br>computational costs. Our key design is an efficient CNN block dubbed LaKD,<br>equipped with a large kernel depth-wise convolution and spatial-channel mixing<br>structure, attaining comparable or larger ERF than Transformers but with a<br>smaller parameter scale. Specifically, we achieve +0.17dB / +0.43dB PSNR over<br>the state-of-the-art Restormer on defocus / motion deblurring benchmark<br>datasets with 32% fewer parameters and 39% fewer MACs. Extensive experiments<br>demonstrate the superior performance of our network and the effectiveness of<br>each module. Furthermore, we propose a compact and intuitive ERFMeter metric<br>that quantitatively characterizes ERF, and shows a high correlation to the<br>network performance. We hope this work can inspire the research community to<br>further explore the pros and cons of CNN and Transformer architectures beyond<br>image deblurring tasks.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Surace, L., Wernikowski, M., Tursun, C., Myszkowski, K., Mantiuk, R., and Didyk, P. 2023. Learning GAN-based Foveated Reconstruction to Recover Perceptually Important Image Features. ACM Transactions on Applied Perception.
Export
BibTeX
@article{Surace23, TITLE = {Learning {GAN}-based Foveated Reconstruction to Recover Perceptually Important Image Features}, AUTHOR = {Surace, Luca and Wernikowski, Marek and Tursun, Cara and Myszkowski, Karol and Mantiuk, Rados{\l}aw and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/3583072}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Applied Perception}, }
Endnote
%0 Journal Article %A Surace, Luca %A Wernikowski, Marek %A Tursun, Cara %A Myszkowski, Karol %A Mantiuk, Rados&#322;aw %A Didyk, Piotr %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Learning GAN-based Foveated Reconstruction to Recover Perceptually Important Image Features : %G eng %U http://hdl.handle.net/21.11116/0000-000C-A00D-1 %R 10.1145/3583072 %7 2023 %D 2023 %J ACM Transactions on Applied Perception %I ACM %C New York, NY %@ false
Wang, C., Serrano, A., Pan, X., et al. 2023a. An Implicit Neural Representation for the Image Stack: Depth, All in Focus, and High Dynamic Range. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2023)42, 6.
Export
BibTeX
@article{Wang_SIGGRAPHASIA23, TITLE = {An Implicit Neural Representation for the Image Stack: {D}epth, All in Focus, and High Dynamic Range}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, Xingang and Wolski, Krzysztof and Chen, Bin and Myszkowski, Karol and Seidel, Hans-Peter and Theobalt, Christian and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3618367}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {42}, NUMBER = {6}, PAGES = {1--11}, EID = {221}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2023}, }
Endnote
%0 Journal Article %A Wang, Chao %A Serrano, Ana %A Pan, Xingang %A Wolski, Krzysztof %A Chen, Bin %A Myszkowski, Karol %A Seidel, Hans-Peter %A Theobalt, Christian %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T An Implicit Neural Representation for the Image Stack: Depth, All in Focus, and High Dynamic Range : %G eng %U http://hdl.handle.net/21.11116/0000-000D-B80B-8 %R 10.1145/3618367 %7 2023 %D 2023 %J ACM Transactions on Graphics %V 42 %N 6 %& 1 %P 1 - 11 %Z sequence number: 221 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2023 %O ACM SIGGRAPH Asia 2023 Sydney, Australia, 12-15 December 2023 SA '23 SA 2023
Wang, C., Serrano, A., Pan, X., et al. 2023b. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. IEEE/CVF International Conference on Computer Vision (ICCV 2023), IEEE.
Export
BibTeX
@inproceedings{wang2023glowgan, TITLE = {{GlowGAN}: {U}nsupervised Learning of {HDR} Images from {LDR} Images in the Wild}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, Xingang and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, ISBN = {979-8-3503-0718-4}, DOI = {10.1109/ICCV51070.2023.00964}, PUBLISHER = {IEEE}, YEAR = {2023}, MARGINALMARK = {$\bullet$}, DATE = {2023}, BOOKTITLE = {IEEE/CVF International Conference on Computer Vision (ICCV 2023)}, PAGES = {10475--10485}, ADDRESS = {Paris, France}, }
Endnote
%0 Conference Proceedings %A Wang, Chao %A Serrano, Ana %A Pan, Xingang %A Chen, Bin %A Seidel, Hans-Peter %A Theobalt, Christian %A Myszkowski, Karol %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild : %G eng %U http://hdl.handle.net/21.11116/0000-000D-B7FC-9 %R 10.1109/ICCV51070.2023.00964 %D 2023 %B IEEE/CVF International Conference on Computer Vision %Z date of event: 2023-10-02 - 2023-10-06 %C Paris, France %B IEEE/CVF International Conference on Computer Vision %P 10475 - 10485 %I IEEE %@ 979-8-3503-0718-4
2022
Bemana, M., Myszkowski, K., Frisvad, J.R., Seidel, H.-P., and Ritschel, T. 2022. Eikonal Fields for Refractive Novel-View Synthesis. Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022), ACM.
Export
BibTeX
@inproceedings{Bemana_SIGGRAPH22, TITLE = {Eikonal Fields for Refractive Novel-View Synthesis}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Frisvad, Jeppe Revall and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISBN = {978-1-4503-9337-9}, DOI = {10.1145/3528233.3530706}, PUBLISHER = {ACM}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings SIGGRAPH 2022 Conference Papers Proceedings (ACM SIGGRAPH 2022)}, EDITOR = {Nandigjav, Munkhtsetseg and Mitra, Niloy J. and Hertzmann, Aaron}, PAGES = {1--9}, EID = {39}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Bemana, Mojtaba %A Myszkowski, Karol %A Frisvad, Jeppe Revall %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Eikonal Fields for Refractive Novel-View Synthesis : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA61-7 %R 10.1145/3528233.3530706 %D 2022 %B ACM SIGGRAPH %Z date of event: 2022-08-07 - 2022-08-11 %C Vancouver, Canada %B Proceedings SIGGRAPH 2022 Conference Papers Proceedings %E Nandigjav, Munkhtsetseg; Mitra, Niloy J.; Hertzmann, Aaron %P 1 - 9 %Z sequence number: 39 %I ACM %@ 978-1-4503-9337-9
Chen, B., Piovarči, M., Wang, C., et al. 2022. Gloss Management for Consistent Reproduction of Real and Virtual Objects. Proceedings SIGGRAPH Asia 2022 (ACM SIGGRAPH Asia 2022), ACM.
Export
BibTeX
@inproceedings{ChenSA22, TITLE = {Gloss Management for Consistent Reproduction of Real and Virtual Objects}, AUTHOR = {Chen, Bin and Piovar{\v c}i, Michal and Wang, Chao and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISBN = {978-1-4503-9470-3}, DOI = {10.1145/3550469.3555406}, PUBLISHER = {ACM}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, BOOKTITLE = {Proceedings SIGGRAPH Asia 2022 (ACM SIGGRAPH Asia 2022)}, EDITOR = {Jung, Soon Ki and Lee, Jehee and Bargteil, Adam}, PAGES = {1--9}, EID = {35}, }
Endnote
%0 Conference Proceedings %A Chen, Bin %A Piovar&#269;i, Michal %A Wang, Chao %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Gloss Management for Consistent Reproduction of Real and Virtual Objects : %G eng %U http://hdl.handle.net/21.11116/0000-000C-167F-E %R 10.1145/3550469.3555406 %D 2022 %B Proceedings SIGGRAPH Asia 2022 %E Jung, Soon Ki; Lee, Jehee; Bargteil, Adam %P 1 - 9 %Z sequence number: 35 %I ACM %@ 978-1-4503-9470-3
Chizhov, V., Georgiev, I., Myszkowski, K., and Singh, G. 2022. Perceptual Error Optimization for Monte Carlo Rendering. ACM Transactions on Graphics41, 3.
Export
BibTeX
@article{ChizhovTOG22, TITLE = {Perceptual Error Optimization for {Monte Carlo} Rendering}, AUTHOR = {Chizhov, Vassillen and Georgiev, Iliyan and Myszkowski, Karol and Singh, Gurprit}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3504002}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {41}, NUMBER = {3}, PAGES = {1--17}, EID = {26}, }
Endnote
%0 Journal Article %A Chizhov, Vassillen %A Georgiev, Iliyan %A Myszkowski, Karol %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Error Optimization for Monte Carlo Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA49-3 %R 10.1145/3504002 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 3 %& 1 %P 1 - 17 %Z sequence number: 26 %I ACM %C New York, NY %@ false
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2022a. Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures. Computers and Graphics105.
Export
BibTeX
@article{Cogalan2022, TITLE = {Learning {HDR} Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2022.04.008}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computers and Graphics}, VOLUME = {105}, PAGES = {57--72}, }
Endnote
%0 Journal Article %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning HDR Video Reconstruction for Dual-Exposure Sensors with Temporally-alternating Exposures : %G eng %U http://hdl.handle.net/21.11116/0000-000A-9D95-D %R 10.1016/j.cag.2022.04.008 %7 2022 %D 2022 %J Computers and Graphics %V 105 %& 57 %P 57 - 72 %I Elsevier %C Amsterdam %@ false
Çoğalan, U., Bemana, M., Seidel, H.-P., and Myszkowski, K. 2022b. Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors. https://arxiv.org/abs/2206.09485.
(arXiv: 2206.09485)
Abstract
Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>
Export
BibTeX
@online{Cogalan2206.09485, TITLE = {Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Seidel, Hans-Peter and Myszkowski, Karol}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2206.09485}, EPRINT = {2206.09485}, EPRINTTYPE = {arXiv}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br>}, }
Endnote
%0 Report %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Seidel, Hans-Peter %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Frame Interpolation for High Dynamic Range Sequences Captured with Dual-exposure Sensors : %G eng %U http://hdl.handle.net/21.11116/0000-000C-16E8-6 %U https://arxiv.org/abs/2206.09485 %D 2022 %X Video frame interpolation (VFI) enables many important applications that<br>might involve the temporal domain, such as slow motion playback, or the spatial<br>domain, such as stop motion sequences. We are focusing on the former task,<br>where one of the key challenges is handling high dynamic range (HDR) scenes in<br>the presence of complex motion. To this end, we explore possible advantages of<br>dual-exposure sensors that readily provide sharp short and blurry long<br>exposures that are spatially registered and whose ends are temporally aligned.<br>This way, motion blur registers temporally continuous information on the scene<br>motion that, combined with the sharp reference, enables more precise motion<br>sampling within a single camera shot. We demonstrate that this facilitates a<br>more complex motion reconstruction in the VFI task, as well as HDR frame<br>reconstruction that so far has been considered only for the originally captured<br>frames, not in-between interpolated frames. We design a neural network trained<br>in these tasks that clearly outperforms existing solutions. We also propose a<br>metric for scene motion complexity that provides important insights into the<br>performance of VFI methods at the test time.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Wang, C., Serrano, A., Pan, X., et al. 2022a. GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild. https://arxiv.org/abs/2211.12352.
(arXiv: 2211.12352)
Abstract
Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>
Export
BibTeX
@online{Wang2211.12352, TITLE = {{GlowGAN}: Unsupervised Learning of {HDR} Images from {LDR} Images in the Wild}, AUTHOR = {Wang, Chao and Serrano, Ana and Pan, X. and Chen, Bin and Seidel, Hans-Peter and Theobalt, Christian and Myszkowski, Karol and Leimk{\"u}hler, Thomas}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2211.12352}, EPRINT = {2211.12352}, EPRINTTYPE = {arXiv}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br>}, }
Endnote
%0 Report %A Wang, Chao %A Serrano, Ana %A Pan, X. %A Chen, Bin %A Seidel, Hans-Peter %A Theobalt, Christian %A Myszkowski, Karol %A Leimk&#252;hler, Thomas %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Visual Computing and Artificial Intelligence, MPI for Informatics, Max Planck Society %T GlowGAN: Unsupervised Learning of HDR Images from LDR Images in the Wild : %G eng %U http://hdl.handle.net/21.11116/0000-000B-9D08-C %U https://arxiv.org/abs/2211.12352 %D 2022 %X Most in-the-wild images are stored in Low Dynamic Range (LDR) form, serving<br>as a partial observation of the High Dynamic Range (HDR) visual world. Despite<br>limited dynamic range, these LDR images are often captured with different<br>exposures, implicitly containing information about the underlying HDR image<br>distribution. Inspired by this intuition, in this work we present, to the best<br>of our knowledge, the first method for learning a generative model of HDR<br>images from in-the-wild LDR image collections in a fully unsupervised manner.<br>The key idea is to train a generative adversarial network (GAN) to generate HDR<br>images which, when projected to LDR under various exposures, are<br>indistinguishable from real LDR images. The projection from HDR to LDR is<br>achieved via a camera model that captures the stochasticity in exposure and<br>camera response function. Experiments show that our method GlowGAN can<br>synthesize photorealistic HDR images in many challenging cases such as<br>landscapes, lightning, or windows, where previous supervised generative models<br>produce overexposed images. We further demonstrate the new application of<br>unsupervised inverse tone mapping (ITM) enabled by GlowGAN. Our ITM method does<br>not need HDR images or paired multi-exposure images for training, yet it<br>reconstructs more plausible information for overexposed regions than<br>state-of-the-art supervised learning models trained on such data.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2022b. Learning a self-supervised tone mapping operator via feature contrast masking loss. Computer Graphics Forum (Proc. EUROGRAPHICS 2022)41, 2.
Export
BibTeX
@article{Wang2022, TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss}, AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.14459}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {41}, NUMBER = {2}, PAGES = {71--84}, BOOKTITLE = {The European Association for Computer Graphics 43rdAnnual Conference (EUROGRAPHICS 2022)}, EDITOR = {Caine, Rapha{\"e}lle and Kim, Min H.}, }
Endnote
%0 Journal Article %A Wang, Chao %A Chen, Bin %A Seidel, Hans-Peter %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning a self-supervised tone mapping operator via feature contrast masking loss : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA09-B %R 10.1111/cgf.14459 %7 2022 %D 2022 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 41 %N 2 %& 71 %P 71 - 84 %I Blackwell-Wiley %C Oxford %@ false %B The European Association for Computer Graphics 43rdAnnual Conference %O EUROGRAPHICS 2022 EG 2022 Reims, France, April 25 - 29, 2022
Wolski, K., Zhong, F., Myszkowski, K., and Mantiuk, R.K. 2022. Dark Stereo: Improving Depth Perception Under Low Luminance. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2022)41, 4.
Export
BibTeX
@article{Wolski_SIGGRAPH22, TITLE = {Dark Stereo: {I}mproving Depth Perception Under Low Luminance}, AUTHOR = {Wolski, Krzysztof and Zhong, Fangcheng and Myszkowski, Karol and Mantiuk, Rafa{\l} K.}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3528223.3530136}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2022}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {41}, NUMBER = {4}, PAGES = {1--12}, EID = {146}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2022}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Zhong, Fangcheng %A Myszkowski, Karol %A Mantiuk, Rafa&#322; K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dark Stereo: Improving Depth Perception Under Low Luminance : %G eng %U http://hdl.handle.net/21.11116/0000-000A-BA6D-B %R 10.1145/3528223.3530136 %7 2022 %D 2022 %J ACM Transactions on Graphics %V 41 %N 4 %& 1 %P 1 - 12 %Z sequence number: 146 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2022 %O ACM SIGGRAPH 2022
2021
Chen, B., Wang, C., Piovarči, M., et al. 2021. The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories. The Visual Computer37.
Export
BibTeX
@article{Chen2021b, TITLE = {The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories}, AUTHOR = {Chen, Bin and Wang, Chao and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-021-02227-x}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, JOURNAL = {The Visual Computer}, VOLUME = {37}, PAGES = {2975--2987}, }
Endnote
%0 Journal Article %A Chen, Bin %A Wang, Chao %A Piovar&#269;i, Michal %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T The Effect of Geometry and Illumination on Appearance Perception of Different Material Categories : %G eng %U http://hdl.handle.net/21.11116/0000-0008-F05C-2 %R 10.1007/s00371-021-02227-x %7 2021 %D 2021 %J The Visual Computer %V 37 %& 2975 %P 2975 - 2987 %I Springer %C Berlin %@ false
Elek, O., Zhang, R., Sumin, D., et al. 2021. Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for 3D Printing. Optics Express29, 5.
Export
BibTeX
@article{Elek2021, TITLE = {Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for {3D} Printing}, AUTHOR = {Elek, Oskar and Zhang, Ran and Sumin, Denis and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and Krivanek, Jaroslav and Weyrich, Tim}, LANGUAGE = {eng}, ISSN = {1094-4087}, DOI = {10.1364/OE.406095}, PUBLISHER = {Optical Society of America}, ADDRESS = {Washington, DC}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, JOURNAL = {Optics Express}, VOLUME = {29}, NUMBER = {5}, PAGES = {7568--7588}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Zhang, Ran %A Sumin, Denis %A Myszkowski, Karol %A Bickel, Bernd %A Wilkie, Alexander %A Krivanek, Jaroslav %A Weyrich, Tim %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations %T Robust and Practical Measurement of Volume Transport Parameters in Solid Photo-polymer Materials for 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-E013-6 %R 10.1364/OE.406095 %7 2021 %D 2021 %J Optics Express %O Opt. Express %V 29 %N 5 %& 7568 %P 7568 - 7588 %I Optical Society of America %C Washington, DC %@ false
Jindal, A., Wolski, K., Mantiuk, R.K., and Myszkowski, K. 2021. Perceptual Model for Adaptive Local Shading and Refresh Rate. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2021)40, 6.
Export
BibTeX
@article{JindalSIGGRAPHAsia21, TITLE = {Perceptual Model for Adaptive Local Shading and Refresh Rate}, AUTHOR = {Jindal, Akshay and Wolski, Krzysztof and Mantiuk, Rafa{\l} K. and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3478513.3480514}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {40}, NUMBER = {6}, PAGES = {1--18}, EID = {281}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2021}, }
Endnote
%0 Journal Article %A Jindal, Akshay %A Wolski, Krzysztof %A Mantiuk, Rafa&#322; K. %A Myszkowski, Karol %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Model for Adaptive Local Shading and Refresh Rate : %G eng %U http://hdl.handle.net/21.11116/0000-0009-9B45-B %R 10.1145/3478513.3480514 %7 2021 %D 2021 %J ACM Transactions on Graphics %V 40 %N 6 %& 1 %P 1 - 18 %Z sequence number: 281 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2021 %O ACM SIGGRAPH Asia 2021 SA '21 SA 2021
Rittig, T., Sumin, D., Babaei, V., et al. 2021. Neural Acceleration of Scattering-Aware Color 3D Printing. Computer Graphics Forum (Proc. EUROGRAPHICS 2021)40, 2.
Export
BibTeX
@article{rittig2021neural, TITLE = {Neural Acceleration of Scattering-Aware Color {3D} Printing}, AUTHOR = {Rittig, Tobias and Sumin, Denis and Babaei, Vahid and Didyk, Piotr and Voloboy, Alexei and Wilkie, Alexander and Bickel, Bernd and Myszkowski, Karol and Weyrich, Tim and Krivanek, Jaroslav}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.142626}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, EDITOR = {Mitra, Niloy and Violoa, Ivan}, VOLUME = {40}, NUMBER = {2}, PAGES = {205--219}, BOOKTITLE = {42nd Annual Conference of the European Association for Computer Graphics (EUROGRAPHICS 2021)}, }
Endnote
%0 Journal Article %A Rittig, Tobias %A Sumin, Denis %A Babaei, Vahid %A Didyk, Piotr %A Voloboy, Alexei %A Wilkie, Alexander %A Bickel, Bernd %A Myszkowski, Karol %A Weyrich, Tim %A Krivanek, Jaroslav %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Neural Acceleration of Scattering-Aware Color 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-F073-8 %R 10.1111/cgf.142626 %7 2021 %D 2021 %J Computer Graphics Forum %O Computer Graphics Forum : journal of the European Association for Computer Graphics Comput. Graph. Forum %V 40 %N 2 %& 205 %P 205 - 219 %I Blackwell-Wiley %C Oxford %@ false %B 42nd Annual Conference of the European Association for Computer Graphics %O EUROGRAPHICS 2021 EG 2021
Serrano, A., Chen, B., Wang, C., et al. 2021. The Effect of Shape and Illumination on Material Perception. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2021)40, 4.
Export
BibTeX
@article{SIGG2021_Materials, TITLE = {The Effect of Shape and Illumination on Material Perception}, AUTHOR = {Serrano, Ana and Chen, Bin and Wang, Chao and Piovar{\v c}i, Michal and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3450626.3459813}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, DATE = {2021}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {40}, NUMBER = {4}, PAGES = {1--16}, EID = {125}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2021}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Chen, Bin %A Wang, Chao %A Piovar&#269;i, Michal %A Seidel, Hans-Peter %A Didyk, Piotr %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T The Effect of Shape and Illumination on Material Perception : Model and Applications %G eng %U http://hdl.handle.net/21.11116/0000-0009-0565-0 %R 10.1145/3450626.3459813 %7 2021 %D 2021 %J ACM Transactions on Graphics %V 40 %N 4 %& 1 %P 1 - 16 %Z sequence number: 125 %I Association for Computing Machinery %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2021 %O ACM SIGGRAPH 2021
Surace, L., Wernikowski, M., Tursun, O.T., Myszkowski, K., Mantiuk, R., and Didyk, P. 2021. Learning Foveated Reconstruction to Preserve Perceived Image Statistics. https://arxiv.org/abs/2108.03499.
(arXiv: 2108.03499)
Abstract
Foveated image reconstruction recovers full image from a sparse set of<br>samples distributed according to the human visual system's retinal sensitivity<br>that rapidly drops with eccentricity. Recently, the use of Generative<br>Adversarial Networks was shown to be a promising solution for such a task as<br>they can successfully hallucinate missing image information. Like for other<br>supervised learning approaches, also for this one, the definition of the loss<br>function and training strategy heavily influences the output quality. In this<br>work, we pose the question of how to efficiently guide the training of foveated<br>reconstruction techniques such that they are fully aware of the human visual<br>system's capabilities and limitations, and therefore, reconstruct visually<br>important image features. Due to the nature of GAN-based solutions, we<br>concentrate on the human's sensitivity to hallucination for different input<br>sample densities. We present new psychophysical experiments, a dataset, and a<br>procedure for training foveated image reconstruction. The strategy provides<br>flexibility to the generator network by penalizing only perceptually important<br>deviations in the output. As a result, the method aims to preserve perceived<br>image statistics rather than natural image statistics. We evaluate our strategy<br>and compare it to alternative solutions using a newly trained objective metric<br>and user experiments.<br>
Export
BibTeX
@online{Surace2108.03499, TITLE = {Learning Foveated Reconstruction to Preserve Perceived Image Statistics}, AUTHOR = {Surace, Luca and Wernikowski, Marek and Tursun, Okan Tarhan and Myszkowski, Karol and Mantiuk, Rados{\l}aw and Didyk, Piotr}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2108.03499}, EPRINT = {2108.03499}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {Foveated image reconstruction recovers full image from a sparse set of<br>samples distributed according to the human visual system's retinal sensitivity<br>that rapidly drops with eccentricity. Recently, the use of Generative<br>Adversarial Networks was shown to be a promising solution for such a task as<br>they can successfully hallucinate missing image information. Like for other<br>supervised learning approaches, also for this one, the definition of the loss<br>function and training strategy heavily influences the output quality. In this<br>work, we pose the question of how to efficiently guide the training of foveated<br>reconstruction techniques such that they are fully aware of the human visual<br>system's capabilities and limitations, and therefore, reconstruct visually<br>important image features. Due to the nature of GAN-based solutions, we<br>concentrate on the human's sensitivity to hallucination for different input<br>sample densities. We present new psychophysical experiments, a dataset, and a<br>procedure for training foveated image reconstruction. The strategy provides<br>flexibility to the generator network by penalizing only perceptually important<br>deviations in the output. As a result, the method aims to preserve perceived<br>image statistics rather than natural image statistics. We evaluate our strategy<br>and compare it to alternative solutions using a newly trained objective metric<br>and user experiments.<br>}, }
Endnote
%0 Report %A Surace, Luca %A Wernikowski, Marek %A Tursun, Okan Tarhan %A Myszkowski, Karol %A Mantiuk, Rados&#322;aw %A Didyk, Piotr %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Learning Foveated Reconstruction to Preserve Perceived Image Statistics : %G eng %U http://hdl.handle.net/21.11116/0000-0009-73D9-1 %U https://arxiv.org/abs/2108.03499 %D 2021 %X Foveated image reconstruction recovers full image from a sparse set of<br>samples distributed according to the human visual system's retinal sensitivity<br>that rapidly drops with eccentricity. Recently, the use of Generative<br>Adversarial Networks was shown to be a promising solution for such a task as<br>they can successfully hallucinate missing image information. Like for other<br>supervised learning approaches, also for this one, the definition of the loss<br>function and training strategy heavily influences the output quality. In this<br>work, we pose the question of how to efficiently guide the training of foveated<br>reconstruction techniques such that they are fully aware of the human visual<br>system's capabilities and limitations, and therefore, reconstruct visually<br>important image features. Due to the nature of GAN-based solutions, we<br>concentrate on the human's sensitivity to hallucination for different input<br>sample densities. We present new psychophysical experiments, a dataset, and a<br>procedure for training foveated image reconstruction. The strategy provides<br>flexibility to the generator network by penalizing only perceptually important<br>deviations in the output. As a result, the method aims to preserve perceived<br>image statistics rather than natural image statistics. We evaluate our strategy<br>and compare it to alternative solutions using a newly trained objective metric<br>and user experiments.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV
Wang, C., Chen, B., Seidel, H.-P., Myszkowski, K., and Serrano, A. 2021. Learning a self-supervised tone mapping operator via feature contrast masking loss. https://arxiv.org/abs/2110.09866.
(arXiv: 2110.09866)
Abstract
High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>
Export
BibTeX
@online{Wang_2110.09866, TITLE = {Learning a self-supervised tone mapping operator via feature contrast masking loss}, AUTHOR = {Wang, Chao and Chen, Bin and Seidel, Hans-Peter and Myszkowski, Karol and Serrano, Ana}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2110.09866}, EPRINT = {2110.09866}, EPRINTTYPE = {arXiv}, YEAR = {2021}, MARGINALMARK = {$\bullet$}, ABSTRACT = {High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br>}, }
Endnote
%0 Report %A Wang, Chao %A Chen, Bin %A Seidel, Hans-Peter %A Myszkowski, Karol %A Serrano, Ana %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Learning a self-supervised tone mapping operator via feature contrast masking loss : %G eng %U http://hdl.handle.net/21.11116/0000-0009-710E-9 %U https://arxiv.org/abs/2110.09866 %D 2021 %X High Dynamic Range (HDR) content is becoming ubiquitous due to the rapid<br>development of capture technologies. Nevertheless, the dynamic range of common<br>display devices is still limited, therefore tone mapping (TM) remains a key<br>challenge for image visualization. Recent work has demonstrated that neural<br>networks can achieve remarkable performance in this task when compared to<br>traditional methods, however, the quality of the results of these<br>learning-based methods is limited by the training data. Most existing works use<br>as training set a curated selection of best-performing results from existing<br>traditional tone mapping operators (often guided by a quality metric),<br>therefore, the quality of newly generated results is fundamentally limited by<br>the performance of such operators. This quality might be even further limited<br>by the pool of HDR content that is used for training. In this work we propose a<br>learning-based self-supervised tone mapping operator that is trained at test<br>time specifically for each HDR image and does not need any data labeling. The<br>key novelty of our approach is a carefully designed loss function built upon<br>fundamental knowledge on contrast perception that allows for directly comparing<br>the content in the HDR and tone mapped images. We achieve this goal by<br>reformulating classic VGG feature maps into feature contrast maps that<br>normalize local feature differences by their average magnitude in a local<br>neighborhood, allowing our loss to account for contrast masking effects. We<br>perform extensive ablation studies and exploration of parameters and<br>demonstrate that our solution outperforms existing approaches with a single set<br>of fixed parameters, as confirmed by both objective and subjective metrics.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,eess.IV
2020
Ansari, N., Alizadeh-Mousavi, O., Seidel, H.-P., and Babaei, V. 2020. Mixed Integer Ink Selection for Spectral Reproduction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Ansari_ToG2020, TITLE = {Mixed Integer Ink Selection for Spectral Reproduction}, AUTHOR = {Ansari, Navid and Alizadeh-Mousavi, Omid and Seidel, Hans-Peter and Babaei, Vahid}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417761}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {255}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Ansari, Navid %A Alizadeh-Mousavi, Omid %A Seidel, Hans-Peter %A Babaei, Vahid %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mixed Integer Ink Selection for Spectral Reproduction : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B23-3 %R 10.1145/3414685.3417761 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 255 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020a. X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Bemana2020, TITLE = {X-{F}ields: {I}mplicit Neural View-, Light- and Time-Image Interpolation}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417827}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {257}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation : %G eng %U http://hdl.handle.net/21.11116/0000-0006-FBF0-0 %R 10.1145/3414685.3417827 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 257 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020b. X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation. https://arxiv.org/abs/2010.00450.
(arXiv: 2010.00450)
Abstract
We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br>
Export
BibTeX
@online{Bemana_arXiv2010.00450, TITLE = {X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2010.00450}, EPRINT = {2010.00450}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br>}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T X-Fields: Implicit Neural View-, Light- and Time-Image Interpolation : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B6EC-2 %U https://arxiv.org/abs/2010.00450 %D 2020 %X We suggest to represent an X-Field -a set of 2D images taken across different<br>view, time or illumination conditions, i.e., video, light field, reflectance<br>fields or combinations thereof-by learning a neural network (NN) to map their<br>view, time or light coordinates to 2D images. Executing this NN at new<br>coordinates results in joint view, time or light interpolation. The key idea to<br>make this workable is a NN that already knows the "basic tricks" of graphics<br>(lighting, 3D projection, occlusion) in a hard-coded and differentiable form.<br>The NN represents the input to that rendering as an implicit map, that for any<br>view, time, or light coordinate and for any pixel can quantify how it will move<br>if view, time or light coordinates change (Jacobian of pixel position with<br>respect to view, time, illumination, etc.). Our X-Field representation is<br>trained for one scene within minutes, leading to a compact set of trainable<br>parameters and hence real-time navigation in view, time and illumination.<br> %K Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Graphics, cs.GR
Chizhov, V., Georgiev, I., Myszkowski, K., and Singh, G. 2020. Perceptual Error Optimization for Monte Carlo Rendering. https://arxiv.org/abs/2012.02344.
(arXiv: 2012.02344)
Abstract
Realistic image synthesis involves computing high-dimensional light transport<br>integrals which in practice are numerically estimated using Monte Carlo<br>integration. The error of this estimation manifests itself in the image as<br>visually displeasing aliasing or noise. To ameliorate this, we develop a<br>theoretical framework for optimizing screen-space error distribution. Our model<br>is flexible and works for arbitrary target error power spectra. We focus on<br>perceptual error optimization by leveraging models of the human visual system's<br>(HVS) point spread function (PSF) from halftoning literature. This results in a<br>specific optimization problem whose solution distributes the error as visually<br>pleasing blue noise in image space. We develop a set of algorithms that provide<br>a trade-off between quality and speed, showing substantial improvements over<br>prior state of the art. We perform evaluations using both quantitative and<br>perceptual error metrics to support our analysis, and provide extensive<br>supplemental material to help evaluate the perceptual improvements achieved by<br>our methods.<br>
Export
BibTeX
@online{Chizhov_arXiv2012.02344, TITLE = {Perceptual Error Optimization for {Monte Carlo} Rendering}, AUTHOR = {Chizhov, Vassillen and Georgiev, Iliyan and Myszkowski, Karol and Singh, Gurprit}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.02344}, EPRINT = {2012.02344}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {Realistic image synthesis involves computing high-dimensional light transport<br>integrals which in practice are numerically estimated using Monte Carlo<br>integration. The error of this estimation manifests itself in the image as<br>visually displeasing aliasing or noise. To ameliorate this, we develop a<br>theoretical framework for optimizing screen-space error distribution. Our model<br>is flexible and works for arbitrary target error power spectra. We focus on<br>perceptual error optimization by leveraging models of the human visual system's<br>(HVS) point spread function (PSF) from halftoning literature. This results in a<br>specific optimization problem whose solution distributes the error as visually<br>pleasing blue noise in image space. We develop a set of algorithms that provide<br>a trade-off between quality and speed, showing substantial improvements over<br>prior state of the art. We perform evaluations using both quantitative and<br>perceptual error metrics to support our analysis, and provide extensive<br>supplemental material to help evaluate the perceptual improvements achieved by<br>our methods.<br>}, }
Endnote
%0 Report %A Chizhov, Vassillen %A Georgiev, Iliyan %A Myszkowski, Karol %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Error Optimization for Monte Carlo Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0007-CEB7-3 %U https://arxiv.org/abs/2012.02344 %D 2020 %X Realistic image synthesis involves computing high-dimensional light transport<br>integrals which in practice are numerically estimated using Monte Carlo<br>integration. The error of this estimation manifests itself in the image as<br>visually displeasing aliasing or noise. To ameliorate this, we develop a<br>theoretical framework for optimizing screen-space error distribution. Our model<br>is flexible and works for arbitrary target error power spectra. We focus on<br>perceptual error optimization by leveraging models of the human visual system's<br>(HVS) point spread function (PSF) from halftoning literature. This results in a<br>specific optimization problem whose solution distributes the error as visually<br>pleasing blue noise in image space. We develop a set of algorithms that provide<br>a trade-off between quality and speed, showing substantial improvements over<br>prior state of the art. We perform evaluations using both quantitative and<br>perceptual error metrics to support our analysis, and provide extensive<br>supplemental material to help evaluate the perceptual improvements achieved by<br>our methods.<br> %K Computer Science, Graphics, cs.GR
Çoğalan, U., Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2020. HDR Denoising and Deblurring by Learning Spatio-temporal Distortion Models. https://arxiv.org/abs/2012.12009.
(arXiv: 2012.12009)
Abstract
We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br>
Export
BibTeX
@online{Cogalan_arXiv2012.12009, TITLE = {{HDR} Denoising and Deblurring by Learning Spatio-temporal Distortion Model}, AUTHOR = {{\c C}o{\u g}alan, U{\u g}ur and Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {https://arxiv.org/abs/2012.12009}, EPRINT = {2012.12009}, EPRINTTYPE = {arXiv}, YEAR = {2020}, ABSTRACT = {We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br>}, }
Endnote
%0 Report %A &#199;o&#287;alan, U&#287;ur %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T HDR Denoising and Deblurring by Learning Spatio-temporal Distortion Models : %G eng %U http://hdl.handle.net/21.11116/0000-0007-B721-5 %U https://arxiv.org/abs/2012.12009 %D 2020 %X We seek to reconstruct sharp and noise-free high-dynamic range (HDR) video<br>from a dual-exposure sensor that records different low-dynamic range (LDR)<br>information in different pixel columns: Odd columns provide low-exposure,<br>sharp, but noisy information; even columns complement this with less noisy,<br>high-exposure, but motion-blurred data. Previous LDR work learns to deblur and<br>denoise (DISTORTED->CLEAN) supervised by pairs of CLEAN and DISTORTED images.<br>Regrettably, capturing DISTORTED sensor readings is time-consuming; as well,<br>there is a lack of CLEAN HDR videos. We suggest a method to overcome those two<br>limitations. First, we learn a different function instead: CLEAN->DISTORTED,<br>which generates samples containing correlated pixel noise, and row and column<br>noise, as well as motion blur from a low number of CLEAN sensor readings.<br>Second, as there is not enough CLEAN HDR video available, we devise a method to<br>learn from LDR video in-stead. Our approach compares favorably to several<br>strong baselines, and can boost existing methods when they are re-trained on<br>our data. Combined with spatial and temporal super-resolution, it enables<br>applications such as re-lighting with low noise or blur.<br> %K eess.IV,Computer Science, Computer Vision and Pattern Recognition, cs.CV
Dunn, D., Tursun, O., Yu, H., Didyk, P., Myszkowski, K., and Fuchs, H. 2020. Stimulating the Human Visual System Beyond Real World Performance in Future Augmented Reality Displays. IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2020), IEEE.
Export
BibTeX
@inproceedings{Dunn2020, TITLE = {Stimulating the Human Visual System Beyond Real World Performance in Future Augmented Reality Displays}, AUTHOR = {Dunn, David and Tursun, Okan and Yu, Hyeonseung and Didyk, Piotr and Myszkowski, Karol and Fuchs, Henry}, LANGUAGE = {eng}, ISBN = {978-1-7281-8508-8}, DOI = {10.1109/ISMAR50242.2020.00029}, PUBLISHER = {IEEE}, YEAR = {2020}, DATE = {2020}, BOOKTITLE = {IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2020)}, PAGES = {90--100}, ADDRESS = {Recife/Porto de Galinhas, Brazil (Virtual Conference)}, }
Endnote
%0 Conference Proceedings %A Dunn, David %A Tursun, Okan %A Yu, Hyeonseung %A Didyk, Piotr %A Myszkowski, Karol %A Fuchs, Henry %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Stimulating the Human Visual System Beyond Real World Performance in Future Augmented Reality Displays : %G eng %U http://hdl.handle.net/21.11116/0000-0006-FBDF-5 %R 10.1109/ISMAR50242.2020.00029 %D 2020 %B International Symposium on Mixed and Augmented Reality %Z date of event: 2020-11-09 - 2020-11-13 %C Recife/Porto de Galinhas, Brazil (Virtual Conference) %B IEEE International Symposium on Mixed and Augmented Reality %P 90 - 100 %I IEEE %@ 978-1-7281-8508-8
Elgharib, M., Mendiratta, M., Thies, J., et al. 2020. Egocentric Videoconferencing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Elgharib_ToG2020, TITLE = {Egocentric Videoconferencing}, AUTHOR = {Elgharib, Mohamed and Mendiratta, Mohit and Thies, Justus and Nie{\ss}ner, Matthias and Seidel, Hans-Peter and Tewari, Ayush and Golyanik, Vladislav and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417808}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {268}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Elgharib, Mohamed %A Mendiratta, Mohit %A Thies, Justus %A Nie&#223;ner, Matthias %A Seidel, Hans-Peter %A Tewari, Ayush %A Golyanik, Vladislav %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Egocentric Videoconferencing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B36-E %R 10.1145/3414685.3417808 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 268 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Meka, A., Pandey, R., Häne, C., et al. 2020. Deep Relightable Textures Volumetric Performance Capture with Neural Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Meka_ToG2020, TITLE = {Deep Relightable Textures Volumetric Performance Capture with Neural Rendering}, AUTHOR = {Meka, Abhimitra and Pandey, Rohit and H{\"a}ne, Christian and Orts-Escolano, Sergio and Barnum, Peter and David-Son, Philip and Erickson, Daniel and Zhang, Yinda and Taylor, Jonathan and Bouaziz, Sofien and Legendre, Chloe and Ma, Wan-Chun and Overbeck, Ryan and Beeler, Thabo and Debevec, Paul and Izadi, Shahram and Theobalt, Christian and Rhemann, Christoph and Fanello, Sean}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417814}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {259}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Meka, Abhimitra %A Pandey, Rohit %A H&#228;ne, Christian %A Orts-Escolano, Sergio %A Barnum, Peter %A David-Son, Philip %A Erickson, Daniel %A Zhang, Yinda %A Taylor, Jonathan %A Bouaziz, Sofien %A Legendre, Chloe %A Ma, Wan-Chun %A Overbeck, Ryan %A Beeler, Thabo %A Debevec, Paul %A Izadi, Shahram %A Theobalt, Christian %A Rhemann, Christoph %A Fanello, Sean %+ External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Deep Relightable Textures Volumetric Performance Capture with Neural Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0007-A6FA-4 %R 10.1145/3414685.3417814 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 259 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020 %U https://dl.acm.org/doi/pdf/10.1145/3414685.3417814
Piovarči, M., Foshey, M., Babaei, V., Rusinkiewicz, S., Matusik, W., and Didyk, P. 2020. Towards Spatially Varying Gloss Reproduction for 3D Printing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Piovarci_ToG2020, TITLE = {Towards Spatially Varying Gloss Reproduction for {3D} Printing}, AUTHOR = {Piovar{\v c}i, Michal and Foshey, Michael and Babaei, Vahid and Rusinkiewicz, Szymon and Matusik, Wojciech and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417850}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {206}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Piovar&#269;i, Michal %A Foshey, Michael %A Babaei, Vahid %A Rusinkiewicz, Szymon %A Matusik, Wojciech %A Didyk, Piotr %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Towards Spatially Varying Gloss Reproduction for 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-0007-A6FE-0 %R 10.1145/3414685.3417850 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 206 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Serrano, A., Martin, D., Gutierrez, D., Myszkowski, K., and Masia, B. 2020. Imperceptible Manipulation of Lateral Camera Motion for Improved Virtual Reality Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Serrano2020, TITLE = {Imperceptible Manipulation of Lateral Camera Motion for Improved Virtual Reality Applications}, AUTHOR = {Serrano, Ana and Martin, Daniel and Gutierrez, Diego and Myszkowski, Karol and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417773}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {268}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Martin, Daniel %A Gutierrez, Diego %A Myszkowski, Karol %A Masia, Belen %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Imperceptible Manipulation of Lateral Camera Motion for Improved Virtual Reality Applications : %G eng %U http://hdl.handle.net/21.11116/0000-0006-FBE8-A %R 10.1145/3414685.3417773 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 268 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Shimada, S., Golyanik, V., Xu, W., and Theobalt, C. 2020. PhysCap: Physically Plausible Monocular 3D Motion Capture in Real Time. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Shimada_ToG2020, TITLE = {{PhysCap}: {P}hysically Plausible Monocular {3D} Motion Capture in Real Time}, AUTHOR = {Shimada, Soshi and Golyanik, Vladislav and Xu, Weipeng and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417877}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {235}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Shimada, Soshi %A Golyanik, Vladislav %A Xu, Weipeng %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T PhysCap: Physically Plausible Monocular 3D Motion Capture in Real Time : %G eng %U http://hdl.handle.net/21.11116/0000-0007-A709-3 %R 10.1145/3414685.3417877 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 235 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Tewari, A., Elgharib, M., Mallikarjun B R, et al. 2020. PIE: Portrait Image Embedding for Semantic Control. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Tewari_ToG2020, TITLE = {{PIE}: {P}ortrait Image Embedding for Semantic Control}, AUTHOR = {Tewari, Ayush and Elgharib, Mohamed and Mallikarjun B R and Bernard, Florian and Seidel, Hans-Peter and P{\'e}rez, Patrick and Zollh{\"o}fer, Michael and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417803}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {223}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Tewari, Ayush %A Elgharib, Mohamed %A Mallikarjun B R, %A Bernard, Florian %A Seidel, Hans-Peter %A P&#233;rez, Patrick %A Zollh&#246;fer, Michael %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T PIE: Portrait Image Embedding for Semantic Control : %G eng %U http://hdl.handle.net/21.11116/0000-0007-9B0C-E %R 10.1145/3414685.3417803 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 223 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Tong, X., Myszkowski, K., and Huang, J. 2020. Foreword to the Special Section on the International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics) 2019. Computers and Graphics86.
Export
BibTeX
@article{Tong_CAD19, TITLE = {Foreword to the Special Section on the {International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)} 2019}, AUTHOR = {Tong, Xin and Myszkowski, Karol and Huang, Jin}, LANGUAGE = {eng}, ISSN = {0097-8493}, DOI = {10.1016/j.cag.2019.12.002}, PUBLISHER = {Elsevier}, ADDRESS = {Amsterdam}, YEAR = {2020}, DATE = {2020}, JOURNAL = {Computers and Graphics}, VOLUME = {86}, PAGES = {A5--A6}, }
Endnote
%0 Journal Article %A Tong, Xin %A Myszkowski, Karol %A Huang, Jin %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Foreword to the Special Section on the International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics) 2019 : %G eng %U http://hdl.handle.net/21.11116/0000-0007-CEAF-D %R 10.1016/j.cag.2019.12.002 %7 2019 %D 2020 %J Computers and Graphics %V 86 %& A5 %P A5 - A6 %I Elsevier %C Amsterdam %@ false
Wang, J., Mueller, F., Bernard, F., et al. 2020. RGB2Hands: Real-Time Tracking of 3D Hand Interactions from Monocular RGB Video. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Wang_ToG2020, TITLE = {{RGB2Hands}: {R}eal-Time Tracking of {3D} Hand Interactions from Monocular {RGB} Video}, AUTHOR = {Wang, Jiayi and Mueller, Franziska and Bernard, Florian and Sorli, Suzanne and Sotnychenko, Oleksandr and Qian, Neng and Otaduy, Miguel A. and Casas, Dan and Theobalt, Christian}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417852}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {218}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Wang, Jiayi %A Mueller, Franziska %A Bernard, Florian %A Sorli, Suzanne %A Sotnychenko, Oleksandr %A Qian, Neng %A Otaduy, Miguel A. %A Casas, Dan %A Theobalt, Christian %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T RGB2Hands: Real-Time Tracking of 3D Hand Interactions from Monocular RGB Video : %G eng %U http://hdl.handle.net/21.11116/0000-0007-CF20-C %R 10.1145/3414685.3417852 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 218 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2020 %O ACM SIGGRAPH Asia 2020 SA'20 SA 2020
Zheng, Q., Babaei, V., Wetzstein, G., Seidel, H.-P., Zwicker, M., and Singh, G. 2020. Neural Light Field 3D Printing. ACM Transactions on Graphics (Proc. SIGGRAPH Asia 2020)39, 6.
Export
BibTeX
@article{Zheng_TOG2020, TITLE = {Neural Light Field {3D} Printing}, AUTHOR = {Zheng, Quan and Babaei, Vahid and Wetzstein, Gordon and Seidel, Hans-Peter and Zwicker, Matthias and Singh, Gurprit}, ISSN = {0730-0301}, DOI = {10.1145/3414685.3417879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2020}, JOURNAL = {ACM Transactions on Graphics (Proc. SIGGRAPH Asia)}, VOLUME = {39}, NUMBER = {6}, EID = {207}, BOOKTITLE = {Proceedings of the SIGGRAPH Asia 2020}, EDITOR = {Myszkowski, Karol}, }
Endnote
%0 Journal Article %A Zheng, Quan %A Babaei, Vahid %A Wetzstein, Gordon %A Seidel, Hans-Peter %A Zwicker, Matthias %A Singh, Gurprit %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Neural Light Field 3D Printing : %U http://hdl.handle.net/21.11116/0000-0007-9AA8-E %R 10.1145/3414685.3417879 %7 2020 %D 2020 %J ACM Transactions on Graphics %V 39 %N 6 %Z sequence number: 207 %I ACM %C New York, NY %@ false %B Proceedings of the SIGGRAPH Asia 2020 %O SIGGRAPH Asia 2020 SA'20 SA 2020
2019
Bemana, M., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2019a. Neural View-Interpolation for Sparse Light Field Video. http://arxiv.org/abs/1910.13921.
(arXiv: 1910.13921)
Abstract
We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br>
Export
BibTeX
@online{Bemana_arXiv1910.13921, TITLE = {Neural View-Interpolation for Sparse Light Field Video}, AUTHOR = {Bemana, Mojtaba and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1910.13921}, EPRINT = {1910.13921}, EPRINTTYPE = {arXiv}, YEAR = {2019}, ABSTRACT = {We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br>}, }
Endnote
%0 Report %A Bemana, Mojtaba %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Neural View-Interpolation for Sparse Light Field Video : %G eng %U http://hdl.handle.net/21.11116/0000-0005-7B16-9 %U http://arxiv.org/abs/1910.13921 %D 2019 %X We suggest representing light field (LF) videos as "one-off" neural networks<br>(NN), i.e., a learned mapping from view-plus-time coordinates to<br>high-resolution color values, trained on sparse views. Initially, this sounds<br>like a bad idea for three main reasons: First, a NN LF will likely have less<br>quality than a same-sized pixel basis representation. Second, only few training<br>data, e.g., 9 exemplars per frame are available for sparse LF videos. Third,<br>there is no generalization across LFs, but across view and time instead.<br>Consequently, a network needs to be trained for each LF video. Surprisingly,<br>these problems can turn into substantial advantages: Other than the linear<br>pixel basis, a NN has to come up with a compact, non-linear i.e., more<br>intelligent, explanation of color, conditioned on the sparse view and time<br>coordinates. As observed for many NN however, this representation now is<br>interpolatable: if the image output for sparse view coordinates is plausible,<br>it is for all intermediate, continuous coordinates as well. Our specific<br>network architecture involves a differentiable occlusion-aware warping step,<br>which leads to a compact set of trainable parameters and consequently fast<br>learning and fast execution.<br> %K Computer Science, Graphics, cs.GR,Computer Science, Computer Vision and Pattern Recognition, cs.CV,Computer Science, Learning, cs.LG,eess.IV
Bemana, M., Keinert, J., Myszkowski, K., et al. 2019b. Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image. Computer Graphics Forum (Proc. Pacific Graphics 2019)38, 7.
Export
BibTeX
@article{Bemana_PG2019, TITLE = {Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image}, AUTHOR = {Bemana, Mojtaba and Keinert, Joachim and Myszkowski, Karol and B{\"a}tz, Michel and Ziegler, Matthias and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13862}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2019}, DATE = {2019}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {38}, NUMBER = {7}, PAGES = {579--589}, BOOKTITLE = {27th Annual International Conference on Computer Graphics and Applications (Pacific Graphics 2019)}, }
Endnote
%0 Journal Article %A Bemana, Mojtaba %A Keinert, Joachim %A Myszkowski, Karol %A B&#228;tz, Michel %A Ziegler, Matthias %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning to Predict Image-based Rendering Artifacts with Respect to a Hidden Reference Image : %G eng %U http://hdl.handle.net/21.11116/0000-0004-9BC5-F %R 10.1111/cgf.13862 %7 2019 %D 2019 %J Computer Graphics Forum %V 38 %N 7 %& 579 %P 579 - 589 %I Wiley-Blackwell %C Oxford, UK %@ false %B 27th Annual International Conference on Computer Graphics and Applications %O Pacific Graphics 2019 PG 2019 Seoul, October 14-17, 2019
Leimkühler, T., Singh, G., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2019. Deep Point Correlation Design. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2019)38, 6.
Export
BibTeX
@article{Leimkuehler_SA2019, TITLE = {Deep Point Correlation Design}, AUTHOR = {Leimk{\"u}hler, Thomas and Singh, Gurprit and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3355089.3356562}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {38}, NUMBER = {6}, EID = {226}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2019}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Singh, Gurprit %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Deep Point Correlation Design : %G eng %U http://hdl.handle.net/21.11116/0000-0004-9BF3-B %R 10.1145/3355089.3356562 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 6 %Z sequence number: 226 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2019 %O ACM SIGGRAPH Asia 2019 Brisbane, Australia, 17 - 20 November 2019 SA'19 SA 2019
Shekhar, S., Semmo, A., Trapp, M., et al. 2019. Consistent Filtering of Videos and Dense Light-Fields without Optic-Flow. Vision, Modeling and Visualization 2019 (VMV 2019), Eurographics Association.
Export
BibTeX
@inproceedings{Shekhar_VMV2019, TITLE = {Consistent Filtering of Videos and Dense Light-Fields without Optic-Flow}, AUTHOR = {Shekhar, Sumit and Semmo, Amir and Trapp, Matthias and Tursun, Okan Tarhan and Pasewaldt, Sebastian and Myszkowski, Karol and D{\"o}llner, J{\"u}rgen}, LANGUAGE = {eng}, ISBN = {978-3-03868-098-7}, DOI = {10.2312/vmv.20191326}, PUBLISHER = {Eurographics Association}, YEAR = {2019}, DATE = {2019}, BOOKTITLE = {Vision, Modeling and Visualization 2019 (VMV 2019)}, PAGES = {125--134}, ADDRESS = {Rostock, Germany}, }
Endnote
%0 Conference Proceedings %A Shekhar, Sumit %A Semmo, Amir %A Trapp, Matthias %A Tursun, Okan Tarhan %A Pasewaldt, Sebastian %A Myszkowski, Karol %A D&#246;llner, J&#252;rgen %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Consistent Filtering of Videos and Dense Light-Fields without Optic-Flow : %G eng %U http://hdl.handle.net/21.11116/0000-0004-9C10-A %R 10.2312/vmv.20191326 %D 2019 %B 24th International Symposium on Vision, Modeling, and Visualization %Z date of event: 2019-09-30 - 2019-10-02 %C Rostock, Germany %B Vision, Modeling and Visualization 2019 %P 125 - 134 %I Eurographics Association %@ 978-3-03868-098-7
Sumin, D., Rittig, T., Babaei, V., et al. 2019. Geometry-Aware Scattering Compensation for 3D Printing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2019)38, 4.
Export
BibTeX
@article{SuminRittig2019, TITLE = {Geometry-Aware Scattering Compensation for {3D} Printing}, AUTHOR = {Sumin, Denis and Rittig, Tobias and Babaei, Vahid and Nindel, Thomas and Wilkie, Alexander and Didyk, Piotr and Bickel, Bernd and K{\v r}iv{\'a}nek, Jaroslav and Myszkowski, Karol and Weyrich, Tim}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3306346.3322992}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {38}, NUMBER = {4}, EID = {111}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2019}, }
Endnote
%0 Journal Article %A Sumin, Denis %A Rittig, Tobias %A Babaei, Vahid %A Nindel, Thomas %A Wilkie, Alexander %A Didyk, Piotr %A Bickel, Bernd %A K&#345;iv&#225;nek, Jaroslav %A Myszkowski, Karol %A Weyrich, Tim %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Geometry-Aware Scattering Compensation for 3D Printing : %G eng %U http://hdl.handle.net/21.11116/0000-0003-7D65-0 %R 10.1145/3306346.3322992 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 4 %Z sequence number: 111 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2019 %O ACM SIGGRAPH 2019 Los Angeles, CA, USA, 28 July - 1 August
Tursun, O.T., Arabadzhiyska, E., Wernikowski, M., et al. 2019. Luminance-Contrast-Aware Foveated Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2019)38, 4.
Export
BibTeX
@article{Tursun2019Luminance, TITLE = {Luminance-Contrast-Aware Foveated Rendering}, AUTHOR = {Tursun, Okan Tarhan and Arabadzhiyska, Elena and Wernikowski, Marek and Mantiuk, Rados{\l}aw and Seidel, Hans-Peter and Myszkowski, Karol and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3306346.3322985}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {38}, NUMBER = {4}, EID = {98}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2019}, }
Endnote
%0 Journal Article %A Tursun, Okan Tarhan %A Arabadzhiyska, Elena %A Wernikowski, Marek %A Mantiuk, Rados&#322;aw %A Seidel, Hans-Peter %A Myszkowski, Karol %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Luminance-Contrast-Aware Foveated Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-0003-75D5-9 %R 10.1145/3306346.3322985 %7 2019 %D 2019 %J ACM Transactions on Graphics %V 38 %N 4 %Z sequence number: 98 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2019 %O ACM SIGGRAPH 2019 Los Angeles, CA, USA, 28 July - 1 August
Wolski, K., Giunchi,, D., Kinuwaki, S., et al. 2019. Selecting Texture Resolution Using a Task-specific Visibility Metric. Computer Graphics Forum (Proc. Pacific Graphics 2019)38, 7.
Export
BibTeX
@article{Wolski_PG2019, TITLE = {Selecting Texture Resolution Using a Task-specific Visibility Metric}, AUTHOR = {Wolski, Krzysztof and Giunchi,, Daniele and Kinuwaki, Shinichi and Didyk, Piotr and Myszkowski, Karol and Steed, Anthony and Mantiuk, Rafa{\l} K.}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13871}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2019}, DATE = {2019}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {38}, NUMBER = {7}, PAGES = {685--696}, BOOKTITLE = {27th Annual International Conference on Computer Graphics and Applications (Pacific Graphics 2019)}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Giunchi,, Daniele %A Kinuwaki, Shinichi %A Didyk, Piotr %A Myszkowski, Karol %A Steed, Anthony %A Mantiuk, Rafa&#322; K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Selecting Texture Resolution Using a Task-specific Visibility Metric : %G eng %U http://hdl.handle.net/21.11116/0000-0004-9BB3-3 %R 10.1111/cgf.13871 %7 2019 %D 2019 %J Computer Graphics Forum %V 38 %N 7 %& 685 %P 685 - 696 %I Wiley-Blackwell %C Oxford, UK %@ false %B 27th Annual International Conference on Computer Graphics and Applications %O Pacific Graphics 2019 PG 2019 Seoul, October 14-17, 2019
Yu, H., Bemana, M., Wernikowski, M., et al. 2019. A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2019)25, 5.
Export
BibTeX
@article{Yu_VR2019, TITLE = {A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays}, AUTHOR = {Yu, Hyeonseung and Bemana, Mojtaba and Wernikowski, Marek and Chwesiuk, Micha{\l} and Tursun, Okan Tarhan and Singh, Gurprit and Myszkowski, Karol and Mantiuk, Rados{\l}aw and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2019.2898821}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2019}, DATE = {2019}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, VOLUME = {25}, NUMBER = {5}, PAGES = {1940--1950}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2019 (IEEE VR 2019)}, EDITOR = {Thomas, Bruce and Welch, Greg and Kuhlen, Torsten and Johnson, Kyle}, }
Endnote
%0 Journal Article %A Yu, Hyeonseung %A Bemana, Mojtaba %A Wernikowski, Marek %A Chwesiuk, Micha&#322; %A Tursun, Okan Tarhan %A Singh, Gurprit %A Myszkowski, Karol %A Mantiuk, Rados&#322;aw %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays : %G eng %U http://hdl.handle.net/21.11116/0000-0002-DCB5-A %R 10.1109/TVCG.2019.2898821 %7 2019 %D 2019 %J IEEE Transactions on Visualization and Computer Graphics %V 25 %N 5 %& 1940 %P 1940 - 1950 %I IEEE Computer Society %C New York, NY %@ false %B Selected Proceedings IEEE Virtual Reality 2019 %O IEEE VR 2019 Osaka, Japan, 23rd - 27th March
Ziegler, M., Bemana, M., Keinert, J., and Myszkowski, K. 2019. Near Real-time Light Field Reconstruction and Rendering for On-set Capture Quality Evaluation. European Light Field Imaging Workshop (ELFI 2019), EURASIP.
Export
BibTeX
@inproceedings{bemana2019near, TITLE = {Near Real-time Light Field Reconstruction and Rendering for On-set Capture Quality Evaluation}, AUTHOR = {Ziegler, Matthias and Bemana, Mojtaba and Keinert, Joachim and Myszkowski, Karol}, LANGUAGE = {eng}, URL = {https://www.eurasip.org/Proceedings/Ext/ELFI_2019/Proceedings.html}, PUBLISHER = {EURASIP}, YEAR = {2019}, BOOKTITLE = {European Light Field Imaging Workshop (ELFI 2019)}, ADDRESS = {Borovets, Bulgaria}, }
Endnote
%0 Conference Proceedings %A Ziegler, Matthias %A Bemana, Mojtaba %A Keinert, Joachim %A Myszkowski, Karol %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Near Real-time Light Field Reconstruction and Rendering for On-set Capture Quality Evaluation : %G eng %U http://hdl.handle.net/21.11116/0000-0007-DDC0-7 %D 2019 %B European Light Field Imaging Workshop %Z date of event: 2019-06-04 - 2019-06-06 %C Borovets, Bulgaria %B European Light Field Imaging Workshop %I EURASIP %U https://www.eurasip.org/Proceedings/Ext/ELFI_2019/Proceedings.html
2018
Beigpour, S., Shekhar, S., Mansouryar, M., Myszkowski, K., and Seidel, H.-P. 2018. Light-Field Appearance Editing Based on Intrinsic Decomposition. Journal of Perceptual Imaging1, 1.
Export
BibTeX
@article{Beigpour2018, TITLE = {Light-Field Appearance Editing Based on Intrinsic Decomposition}, AUTHOR = {Beigpour, Shida and Shekhar, Sumit and Mansouryar, Mohsen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.2352/J.Percept.Imaging.2018.1.1.010502}, YEAR = {2018}, JOURNAL = {Journal of Perceptual Imaging}, VOLUME = {1}, NUMBER = {1}, PAGES = {1--15}, EID = {10502}, }
Endnote
%0 Journal Article %A Beigpour, Shida %A Shekhar, Sumit %A Mansouryar, Mohsen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Light-Field Appearance Editing Based on Intrinsic Decomposition : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F88-C %R 10.2352/J.Percept.Imaging.2018.1.1.010502 %7 2018 %D 2018 %J Journal of Perceptual Imaging %O JPI %V 1 %N 1 %& 1 %P 1 - 15 %Z sequence number: 10502
Leimkühler, T., Singh, G., Myszkowski, K., Seidel, H.-P., and Ritschel, T. 2018a. End-to-end Sampling Patterns. http://arxiv.org/abs/1806.06710.
(arXiv: 1806.06710)
Abstract
Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties.
Export
BibTeX
@online{Leimkuehler_arXiv1806.06710, TITLE = {End-to-end Sampling Patterns}, AUTHOR = {Leimk{\"u}hler, Thomas and Singh, Gurprit and Myszkowski, Karol and Seidel, Hans-Peter and Ritschel, Tobias}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.06710}, EPRINT = {1806.06710}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties.}, }
Endnote
%0 Report %A Leimk&#252;hler, Thomas %A Singh, Gurprit %A Myszkowski, Karol %A Seidel, Hans-Peter %A Ritschel, Tobias %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T End-to-end Sampling Patterns : %G eng %U http://hdl.handle.net/21.11116/0000-0002-1376-4 %U http://arxiv.org/abs/1806.06710 %D 2018 %X Sample patterns have many uses in Computer Graphics, ranging from procedural object placement over Monte Carlo image synthesis to non-photorealistic depiction. Their properties such as discrepancy, spectra, anisotropy, or progressiveness have been analyzed extensively. However, designing methods to produce sampling patterns with certain properties can require substantial hand-crafting effort, both in coding, mathematical derivation and compute time. In particular, there is no systematic way to derive the best sampling algorithm for a specific end-task. Tackling this issue, we suggest another level of abstraction: a toolkit to end-to-end optimize over all sampling methods to find the one producing user-prescribed properties such as discrepancy or a spectrum that best fit the end-task. A user simply implements the forward losses and the sampling method is found automatically -- without coding or mathematical derivation -- by making use of back-propagation abilities of modern deep learning frameworks. While this optimization takes long, at deployment time the sampling method is quick to execute as iterated unstructured non-linear filtering using radial basis functions (RBFs) to represent high-dimensional kernels. Several important previous methods are special cases of this approach, which we compare to previous work and demonstrate its usefulness in several typical Computer Graphics applications. Finally, we propose sampling patterns with properties not shown before, such as high-dimensional blue noise with projective properties. %K Computer Science, Graphics, cs.GR
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2018b. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. IEEE Transactions on Visualization and Computer Graphics24, 6.
Export
BibTeX
@article{Leimkuehler2018, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2017.2703612}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {24}, NUMBER = {6}, PAGES = {2037--2050}, }
Endnote
%0 Journal Article %A Leimk&#252;hler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/21.11116/0000-0001-409A-9 %R 10.1109/TVCG.2017.2703612 %7 2018 %D 2018 %J IEEE Transactions on Visualization and Computer Graphics %V 24 %N 6 %& 2037 %P 2037 - 2050 %I IEEE Computer Society %C New York, NY %@ false
Myszkowski, K., Tursun, O.T., Kellnhofer, P., et al. 2018. Perceptual Display: Apparent Enhancement of Scene Detail and Depth. Electronic Imaging (Proc. HVEI 2018), SPIE/IS&T.
(Keynote Talk)
Abstract
Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.
Export
BibTeX
@inproceedings{Myszkowski2018Perceptual, TITLE = {Perceptual Display: Apparent Enhancement of Scene Detail and Depth}, AUTHOR = {Myszkowski, Karol and Tursun, Okan Tarhan and Kellnhofer, Petr and Templin, Krzysztof and Arabadzhiyska, Elena and Didyk, Piotr and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {2470-1173}, DOI = {10.2352/ISSN.2470-1173.2018.14.HVEI-501}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2018}, ABSTRACT = {Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.}, BOOKTITLE = {Human Vision and Electronic Imaging (HVEI 2018)}, PAGES = {1--10}, EID = {501}, JOURNAL = {Electronic Imaging (Proc. HVEI)}, VOLUME = {2018}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %A Tursun, Okan Tarhan %A Kellnhofer, Petr %A Templin, Krzysztof %A Arabadzhiyska, Elena %A Didyk, Piotr %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Display: Apparent Enhancement of Scene Detail and Depth : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F64-5 %R 10.2352/ISSN.2470-1173.2018.14.HVEI-501 %D 2018 %B Human Vision and Electronic Imaging %Z date of event: 2018-01-28 - 2018-02-02 %C San Francisco, CA, USA %X Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations. %B Human Vision and Electronic Imaging %P 1 - 10 %Z sequence number: 501 %I SPIE/IS&T %J Electronic Imaging %V 2018 %@ false
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2018. An Intuitive Control Space for Material Appearance. http://arxiv.org/abs/1806.04950.
(arXiv: 1806.04950)
Abstract
Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br>
Export
BibTeX
@online{Serrano_arXiv1806.04950, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, URL = {http://arxiv.org/abs/1806.04950}, EPRINT = {1806.04950}, EPRINTTYPE = {arXiv}, YEAR = {2018}, ABSTRACT = {Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br>}, }
Endnote
%0 Report %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/21.11116/0000-0002-151E-6 %U http://arxiv.org/abs/1806.04950 %D 2018 %X Many different techniques for measuring material appearance have been<br>proposed in the last few years. These have produced large public datasets,<br>which have been used for accurate, data-driven appearance modeling. However,<br>although these datasets have allowed us to reach an unprecedented level of<br>realism in visual appearance, editing the captured data remains a challenge. In<br>this paper, we present an intuitive control space for predictable editing of<br>captured BRDF data, which allows for artistic creation of plausible novel<br>material appearances, bypassing the difficulty of acquiring novel samples. We<br>first synthesize novel materials, extending the existing MERL dataset up to 400<br>mathematically valid BRDFs. We then design a large-scale experiment, gathering<br>56,000 subjective ratings on the high-level perceptual attributes that best<br>describe our extended dataset of materials. Using these ratings, we build and<br>train networks of radial basis functions to act as functionals mapping the<br>perceptual attributes to an underlying PCA-based representation of BRDFs. We<br>show that our functionals are excellent predictors of the perceived attributes<br>of appearance. Our control space enables many applications, including intuitive<br>material editing of a wide range of visual properties, guidance for gamut<br>mapping, analysis of the correlation between perceptual attributes, or novel<br>appearance similarity metrics. Moreover, our methodology can be used to derive<br>functionals applicable to classic analytic BRDF representations. We release our<br>code and dataset publicly, in order to support and encourage further research<br>in this direction.<br> %K Computer Science, Graphics, cs.GR
Shekhar, S., Kunz Beigpour, S., Ziegler, M., et al. 2018. Light-Field Intrinsic Dataset. Proceedings of the British Machine Vision Conference 2018 (BMVC), British Machine Vision Association.
Export
BibTeX
@inproceedings{Shekhar_BMVC2018, TITLE = {Light-Field Intrinsic Dataset}, AUTHOR = {Shekhar, Sumit and Kunz Beigpour, Shida and Ziegler, Matthias and Chwesiuk, Micha{\l} and Pale{\'n}, Dawid and Myszkowski, Karol and Keinert, Joachim and Mantiuk, Rados{\l}aw and Didyk, Piotr}, LANGUAGE = {eng}, URL = {http://bmvc2018.org/programme/BMVC2018.zip}, PUBLISHER = {British Machine Vision Association}, YEAR = {2018}, BOOKTITLE = {Proceedings of the British Machine Vision Conference 2018 (BMVC)}, EID = {0120}, ADDRESS = {Newcastle, UK}, }
Endnote
%0 Conference Proceedings %A Shekhar, Sumit %A Kunz Beigpour, Shida %A Ziegler, Matthias %A Chwesiuk, Micha&#322; %A Pale&#324;, Dawid %A Myszkowski, Karol %A Keinert, Joachim %A Mantiuk, Rados&#322;aw %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Light-Field Intrinsic Dataset : %G eng %U http://hdl.handle.net/21.11116/0000-0002-0E38-1 %D 2018 %B British Machine Vision Conference 2018 (BMVC) %Z date of event: 2018-09-03 - 2018-09-06 %C Newcastle, UK %K Forschungsgruppe Geiger %B Proceedings of the British Machine Vision Conference 2018 (BMVC) %Z sequence number: 0120 %I British Machine Vision Association %U http://bmvc2018.org/programme/BMVC2018.zip
Wolski, K., Giunchi, D., Ye, N., et al. 2018. Dataset and Metrics for Predicting Local Visible Differences. ACM Transactions on Graphics37, 5.
Export
BibTeX
@article{wolski2018dataset, TITLE = {Dataset and Metrics for Predicting Local Visible Differences}, AUTHOR = {Wolski, Krzysztof and Giunchi, Daniele and Ye, Nanyang and Didyk, Piotr and Myszkowski, Karol and Mantiuk, Rados{\textbackslash}l{\textbraceleft}{\textbraceright}aw and Seidel, Hans-Peter and Steed, Anthony and Mantiuk, Rafa{\l} K.}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3196493}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2018}, DATE = {2018}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {37}, NUMBER = {5}, EID = {172}, }
Endnote
%0 Journal Article %A Wolski, Krzysztof %A Giunchi, Daniele %A Ye, Nanyang %A Didyk, Piotr %A Myszkowski, Karol %A Mantiuk, Rados\l{}aw %A Seidel, Hans-Peter %A Steed, Anthony %A Mantiuk, Rafa&#322; K. %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Dataset and Metrics for Predicting Local Visible Differences : %G eng %U http://hdl.handle.net/21.11116/0000-0001-5F75-2 %R 10.1145/3196493 %7 2018 %D 2018 %J ACM Transactions on Graphics %V 37 %N 5 %Z sequence number: 172 %I ACM %C New York, NY %@ false
2017
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017a. Towards a Quality Metric for Dense Light Fields. http://arxiv.org/abs/1704.07576.
(arXiv: 1704.07576)
Abstract
Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.
Export
BibTeX
@online{AdhikarlaArXiv17, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} K. and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, URL = {http://arxiv.org/abs/1704.07576}, EPRINT = {1704.07576}, EPRINTTYPE = {arXiv}, YEAR = {2017}, ABSTRACT = {Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics.}, }
Endnote
%0 Report %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafa&#322; K. %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %U http://hdl.handle.net/11858/00-001M-0000-002D-2C2C-1 %U http://arxiv.org/abs/1704.07576 %D 2017 %X Light fields become a popular representation of three dimensional scenes, and there is interest in their processing, resampling, and compression. As those operations often result in loss of quality, there is a need to quantify it. In this work, we collect a new dataset of dense reference and distorted light fields as well as the corresponding quality scores which are scaled in perceptual units. The scores were acquired in a subjective experiment using an interactive light-field viewing setup. The dataset contains typical artifacts that occur in light-field processing chain due to light-field reconstruction, multi-view compression, and limitations of automultiscopic displays. We test a number of existing objective quality metrics to determine how well they can predict the quality of light fields. We find that the existing image quality metrics provide good measures of light-field quality, but require dense reference light- fields for optimal performance. For more complex tasks of comparing two distorted light fields, their performance drops significantly, which reveals the need for new, light-field-specific metrics. %K Computer Science, Computer Vision and Pattern Recognition, cs.CV
Adhikarla, V.K., Vinkler, M., Sumin, D., et al. 2017b. Towards a Quality Metric for Dense Light Fields. 30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017), IEEE Computer Society.
Export
BibTeX
@inproceedings{Vamsi2017, TITLE = {Towards a Quality Metric for Dense Light Fields}, AUTHOR = {Adhikarla, Vamsi Kiran and Vinkler, Marek and Sumin, Denis and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISBN = {978-1-5386-0458-8}, DOI = {10.1109/CVPR.2017.396}, PUBLISHER = {IEEE Computer Society}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {30th IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2017)}, PAGES = {3720--3729}, ADDRESS = {Honolulu, HI, USA}, }
Endnote
%0 Conference Proceedings %A Adhikarla, Vamsi Kiran %A Vinkler, Marek %A Sumin, Denis %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Towards a Quality Metric for Dense Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E476-3 %R 10.1109/CVPR.2017.396 %D 2017 %B 30th IEEE Conference on Computer Vision and Pattern Recognition %Z date of event: 2017-07-21 - 2017-07-26 %C Honolulu, HI, USA %B 30th IEEE Conference on Computer Vision and Pattern Recognition %P 3720 - 3729 %I IEEE Computer Society %@ 978-1-5386-0458-8
Arabadzhiyska, E., Tursun, O.T., Myszkowski, K., Seidel, H.-P., and Didyk, P. 2017. Saccade Landing Position Prediction for Gaze-Contingent Rendering. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2017)36, 4.
Export
BibTeX
@article{ArabadzhiyskaSIGGRAPH2017, TITLE = {Saccade Landing Position Prediction for Gaze-Contingent Rendering}, AUTHOR = {Arabadzhiyska, Elena and Tursun, Okan Tarhan and Myszkowski, Karol and Seidel, Hans-Peter and Didyk, Piotr}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3072959.3073642}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {36}, NUMBER = {4}, PAGES = {1--12}, EID = {50}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2017}, }
Endnote
%0 Journal Article %A Arabadzhiyska, Elena %A Tursun, Okan Tarhan %A Myszkowski, Karol %A Seidel, Hans-Peter %A Didyk, Piotr %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Saccade Landing Position Prediction for Gaze-Contingent Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-7D82-9 %R 10.1145/3072959.3073642 %7 2017 %D 2017 %J ACM Transactions on Graphics %V 36 %N 4 %& 1 %P 1 - 12 %Z sequence number: 50 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2017 %O ACM SIGGRAPH 2017 Los Angeles, California, 30 July - 3 August
Dunn, D., Tippets, C., Torell, K., et al. 2017a. Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors. IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR 2017)23, 4.
(Best Paper Award)
Export
BibTeX
@article{DunnVR2017, TITLE = {Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors}, AUTHOR = {Dunn, David and Tippets, Cary and Torell, Kent and Kellnhofer, Petr and Ak{\c s}it, Kaan and Didyk, Piotr and Myszkowski, Karol and Luebke, David and Fuchs, Henry}, LANGUAGE = {eng}, ISSN = {1077-2626}, DOI = {10.1109/TVCG.2017.2657058}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics (Proc. IEEE VR)}, VOLUME = {23}, NUMBER = {4}, PAGES = {1322--1331}, BOOKTITLE = {Selected Proceedings IEEE Virtual Reality 2017 (IEEE VR 2017)}, }
Endnote
%0 Journal Article %A Dunn, David %A Tippets, Cary %A Torell, Kent %A Kellnhofer, Petr %A Ak&#351;it, Kaan %A Didyk, Piotr %A Myszkowski, Karol %A Luebke, David %A Fuchs, Henry %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors : (Best Paper Award) %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-3095-4 %R 10.1109/TVCG.2017.2657058 %7 2017 %D 2017 %J IEEE Transactions on Visualization and Computer Graphics %V 23 %N 4 %& 1322 %P 1322 - 1331 %I IEEE Computer Society %C New York, NY %@ false %B Selected Proceedings IEEE Virtual Reality 2017 %O IEEE VR 2017 Los Angeles, California on March 18-22, 2017 %U http://telepresence.web.unc.edu/research/dynamic-focus-augmented-reality-display/
Dunn, D., Tippets, C., Torell, K., et al. 2017b. Membrane AR: Varifocal, Wide Field of View Augmented Reality Display from Deformable Membranes. ACM SIGGRAPH 2017 Emerging Technologies, ACM.
(Digital Content Association of Japan Award)
Export
BibTeX
@inproceedings{Dunn2017, TITLE = {Membrane {AR}: {V}arifocal, Wide Field of View Augmented Reality Display from Deformable Membranes}, AUTHOR = {Dunn, David and Tippets, Cary and Torell, Kent and Fuchs, Henry and Kellnhofer, Petr and Myszkowski, Karol and Didyk, Piotr and Ak{\c s}it, Kaan and Luebke, David}, LANGUAGE = {eng}, ISBN = {978-1-4503-5012-9}, DOI = {10.1145/3084822.3084846}, PUBLISHER = {ACM}, YEAR = {2017}, DATE = {2017}, BOOKTITLE = {ACM SIGGRAPH 2017 Emerging Technologies}, PAGES = {1--2}, EID = {15}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Dunn, David %A Tippets, Cary %A Torell, Kent %A Fuchs, Henry %A Kellnhofer, Petr %A Myszkowski, Karol %A Didyk, Piotr %A Ak&#351;it, Kaan %A Luebke, David %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Membrane AR: Varifocal, Wide Field of View Augmented Reality Display from Deformable Membranes : (Digital Content Association of Japan Award) %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E47B-A %R 10.1145/3084822.3084846 %D 2017 %B 44th SIGGRAPH Conference on Computer Graphics and Interactive Techniques %Z date of event: 2017-07-30 - 2017-08-03 %C Los Angeles, CA, USA %B ACM SIGGRAPH 2017 Emerging Technologies %P 1 - 2 %Z sequence number: 15 %I ACM %@ 978-1-4503-5012-9
Elek, O., Sumin, D., Zhang, R., et al. 2017. Scattering-aware Texture Reproduction for 3D Printing. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2017)36, 6.
Export
BibTeX
@article{ElekSIGASIA2017, TITLE = {Scattering-aware Texture Reproduction for {3D} Printing}, AUTHOR = {Elek, Oskar and Sumin, Denis and Zhang, Ran and Weyrich, Tim and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and K{\v r}iv{\'a}nek, Jaroslav}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/3130800.3130890}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2017}, DATE = {2017}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {36}, NUMBER = {6}, EID = {241}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2017}, }
Endnote
%0 Journal Article %A Elek, Oskar %A Sumin, Denis %A Zhang, Ran %A Weyrich, Tim %A Myszkowski, Karol %A Bickel, Bernd %A Wilkie, Alexander %A K&#345;iv&#225;nek, Jaroslav %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations %T Scattering-aware Texture Reproduction for 3D Printing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002D-E485-1 %R 10.1145/3130800.3130890 %7 2017 %D 2017 %J ACM Transactions on Graphics %O TOG %V 36 %N 6 %Z sequence number: 241 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2017 %O ACM SIGGRAPH Asia 2017 Bangkok, Thailand, 27 - 30 November 2017
Weier, M., Stengel, M., Roth, T., et al. 2017. Perception-driven Accelerated Rendering. Computer Graphics Forum36, 2.
Export
BibTeX
@article{WeierEG2017STAR, TITLE = {Perception-driven Accelerated Rendering}, AUTHOR = {Weier, Martin and Stengel, Michael and Roth, Thorsten and Didyk, Piotr and Eisemann, Elmar and Eisemann, Martin and Grogorick, Steve and Hinkenjann, Andr{\'e} and Krujiff, Elmar and Magnor, Marcus A. and Myszkowski, Karol and Slusallek, Philipp}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.13150}, PUBLISHER = {Blackwell-Wiley}, ADDRESS = {Oxford}, YEAR = {2017}, DATE = {2017}, JOURNAL = {Computer Graphics Forum}, VOLUME = {36}, NUMBER = {2}, PAGES = {611--643}, }
Endnote
%0 Journal Article %A Weier, Martin %A Stengel, Michael %A Roth, Thorsten %A Didyk, Piotr %A Eisemann, Elmar %A Eisemann, Martin %A Grogorick, Steve %A Hinkenjann, Andr&#233; %A Krujiff, Elmar %A Magnor, Marcus A. %A Myszkowski, Karol %A Slusallek, Philipp %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Perception-driven Accelerated Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-3496-8 %R 10.1111/cgf.13150 %7 2017 %D 2017 %J Computer Graphics Forum %V 36 %N 2 %& 611 %P 611 - 643 %I Blackwell-Wiley %C Oxford %@ false
2016
Dąbała, Ł., Ziegler, M., Didyk, P., et al. 2016. Efficient Multi-image Correspondences for On-line Light Field Video Processing. Computer Graphics Forum (Proc. Pacific Graphics 2016)35, 7.
Export
BibTeX
@article{DabalaPG2016, TITLE = {Efficient Multi-image Correspondences for On-line Light Field Video Processing}, AUTHOR = {D{\c a}ba{\l}a, {\L}ukasz and Ziegler, Matthias and Didyk, Piotr and Zilly, Frederik and Keinert, Joachim and Myszkowski, Karol and Rokita, Przemyslaw and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {1467-8659}, DOI = {10.1111/cgf.13037}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {35}, NUMBER = {7}, PAGES = {401--410}, BOOKTITLE = {The 24th Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2016)}, }
Endnote
%0 Journal Article %A D&#261;ba&#322;a, &#321;ukasz %A Ziegler, Matthias %A Didyk, Piotr %A Zilly, Frederik %A Keinert, Joachim %A Myszkowski, Karol %A Rokita, Przemyslaw %A Ritschel, Tobias %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Multi-image Correspondences for On-line Light Field Video Processing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82BA-5 %R 10.1111/cgf.13037 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 7 %& 401 %P 401 - 410 %I Wiley-Blackwell %C Oxford, UK %@ false %B The 24th Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2016 PG 2016
Gryaditskaya, Y., Masia, B., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Gloss Editing in Light Fields. VMV 2016 Vision, Modeling and Visualization, Eurographics Association.
Export
BibTeX
@inproceedings{jgryadit2016, TITLE = {Gloss Editing in Light Fields}, AUTHOR = {Gryaditskaya, Yulia and Masia, Belen and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-03868-025-3}, DOI = {10.2312/vmv.20161351}, PUBLISHER = {Eurographics Association}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {VMV 2016 Vision, Modeling and Visualization}, EDITOR = {Hullin, Matthias and Stamminger, Marc and Weinkauf, Tino}, PAGES = {127--135}, ADDRESS = {Bayreuth, Germany}, }
Endnote
%0 Conference Proceedings %A Gryaditskaya, Yulia %A Masia, Belen %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Gloss Editing in Light Fields : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82C5-B %R 10.2312/vmv.20161351 %D 2016 %B 21st International Symposium on Vision, Modeling and Visualization %Z date of event: 2016-10-10 - 2016-10-12 %C Bayreuth, Germany %B VMV 2016 Vision, Modeling and Visualization %E Hullin, Matthias; Stamminger, Marc; Weinkauf, Tino %P 127 - 135 %I Eurographics Association %@ 978-3-03868-025-3
Havran, V., Filip, J., and Myszkowski, K. 2016. Perceptually Motivated BRDF Comparison using Single Image. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2016)35, 4.
Export
BibTeX
@article{havran2016perceptually, TITLE = {Perceptually Motivated {BRDF} Comparison using Single Image}, AUTHOR = {Havran, Vlastimil and Filip, Jiri and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12944}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {35}, NUMBER = {4}, PAGES = {1--12}, BOOKTITLE = {Eurographics Symposium on Rendering 2016}, EDITOR = {Eisemann, Elmar and Fiume, Eugene}, }
Endnote
%0 Journal Article %A Havran, Vlastimil %A Filip, Jiri %A Myszkowski, Karol %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually Motivated BRDF Comparison using Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82C0-6 %R 10.1111/cgf.12944 %7 2016 %D 2016 %J Computer Graphics Forum %V 35 %N 4 %& 1 %P 1 - 12 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2016 %O Eurographics Symposium on Rendering 2016 EGSR 2016 Dublin, Ireland, 22-24 June 2016
Kellnhofer, P., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2016a. GazeStereo3D: Seamless Disparity Manipulations. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016)35, 4.
Export
BibTeX
@article{KellnhoferSIGGRAPH2016, TITLE = {{GazeStereo3D}: {S}eamless Disparity Manipulations}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925866}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {68}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T GazeStereo3D: Seamless Disparity Manipulations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0190-4 %R 10.1145/2897824.2925866 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 68 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016b. Transformation-aware Perceptual Image Metric. Journal of Electronic Imaging25, 5.
Export
BibTeX
@article{Kellnhofer2016jei, TITLE = {Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1017-9909}, DOI = {10.1117/1.JEI.25.5.053014}, PUBLISHER = {SPIE}, ADDRESS = {Bellingham, WA}, YEAR = {2016}, DATE = {2016}, JOURNAL = {Journal of Electronic Imaging}, VOLUME = {25}, NUMBER = {5}, EID = {053014}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B3-4 %R 10.1117/1.JEI.25.5.053014 %7 2016 %D 2016 %J Journal of Electronic Imaging %V 25 %N 5 %Z sequence number: 053014 %I SPIE %C Bellingham, WA %@ false
Kellnhofer, P., Didyk, P., Ritschel, T., Masia, B., Myszkowski, K., and Seidel, H.-P. 2016c. Motion Parallax in Stereo 3D: Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Kellnhofer2016SGA, TITLE = {Motion Parallax in Stereo {3D}: {M}odel and Applications}, AUTHOR = {Kellnhofer, Petr and Didyk, Piotr and Ritschel, Tobias and Masia, Belen and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980230}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {176}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Didyk, Piotr %A Ritschel, Tobias %A Masia, Belen %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Parallax in Stereo 3D: Model and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B6-D %R 10.1145/2980179.2980230 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 176 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Lavoué, G., Liu, H., Myszkowski, K., and Lin, W. 2016. Quality Assessment and Perception in Computer Graphics. IEEE Computer Graphics and Applications36, 4.
Export
BibTeX
@article{Lavoue2016, TITLE = {Quality Assessment and Perception in Computer Graphics}, AUTHOR = {Lavou{\'e}, Guillaume and Liu, Hantao and Myszkowski, Karol and Lin, Weisi}, LANGUAGE = {eng}, ISSN = {0272-1716}, DOI = {10.1109/MCG.2016.72}, PUBLISHER = {IEEE Computer Society}, ADDRESS = {Los Alamitos, CA}, YEAR = {2016}, DATE = {2016}, JOURNAL = {IEEE Computer Graphics and Applications}, VOLUME = {36}, NUMBER = {4}, PAGES = {21--22}, }
Endnote
%0 Journal Article %A Lavou&#233;, Guillaume %A Liu, Hantao %A Myszkowski, Karol %A Lin, Weisi %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Quality Assessment and Perception in Computer Graphics : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-8411-2 %R 10.1109/MCG.2016.72 %7 2016-07-29 %D 2016 %J IEEE Computer Graphics and Applications %V 36 %N 4 %& 21 %P 21 - 22 %I IEEE Computer Society %C Los Alamitos, CA %@ false
Leimkühler, T., Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2016. Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion. Graphics Interface 2016, 42nd Graphics Interface Conference, Canadian Information Processing Society.
(Best Student Paper Award)
Export
BibTeX
@inproceedings{LeimkuehlerGI2016, TITLE = {Perceptual real-time {2D}-to-{3D} conversion using cue fusion}, AUTHOR = {Leimk{\"u}hler, Thomas and Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-9947868-1-4}, DOI = {10.20380/GI2016.02}, PUBLISHER = {Canadian Information Processing Society}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {Graphics Interface 2016, 42nd Graphics Interface Conference}, EDITOR = {Popa, Tiberiu and Moffatt, Karyn}, PAGES = {5--12}, ADDRESS = {Victoria, Canada}, }
Endnote
%0 Conference Proceedings %A Leimk&#252;hler, Thomas %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Real-Time 2D-to-3D Conversion Using Cue Fusion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-823D-1 %R 10.20380/GI2016.02 %D 2016 %B 42nd Graphics Interface Conference %Z date of event: 2016-06-01 - 2016-06-03 %C Victoria, Canada %B Graphics Interface 2016 %E Popa, Tiberiu; Moffatt, Karyn %P 5 - 12 %I Canadian Information Processing Society %@ 978-0-9947868-1-4
Mantiuk, R.K. and Myszkowski, K. 2016. Perception-Inspired High Dynamic Range Video Coding and Compression. In: CHIPS 2020 VOL. 2. Springer, New York, NY.
Export
BibTeX
@incollection{Mantiuk_Chips2020, TITLE = {Perception-Inspired High Dynamic Range Video Coding and Compression}, AUTHOR = {Mantiuk, Rafa{\l} K. and Myszkowski, Karol}, LANGUAGE = {eng}, ISBN = {978-3-319-22092-5}, DOI = {10.1007/978-3-319-22093-2_14}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {CHIPS 2020 VOL. 2}, EDITOR = {Hoefflinger, Bernd}, PAGES = {211--220}, SERIES = {The Frontiers Collection}, }
Endnote
%0 Book Section %A Mantiuk, Rafa&#322; K. %A Myszkowski, Karol %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-Inspired High Dynamic Range Video Coding and Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002C-2DE8-3 %R 10.1007/978-3-319-22093-2_14 %D 2016 %B CHIPS 2020 VOL. 2 %E Hoefflinger, Bernd %P 211 - 220 %I Springer %C New York, NY %@ 978-3-319-22092-5 %S The Frontiers Collection
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016a. Intuitive Editing of Material Appearance. ACM SIGGRAPH 2016 Posters.
Export
BibTeX
@inproceedings{SerranoSIGGRAPH2016, TITLE = {Intuitive Editing of Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISBN = {978-1-4503-4371-8}, DOI = {10.1145/2945078.2945141}, PUBLISHER = {ACM}, YEAR = {2016}, DATE = {2016}, BOOKTITLE = {ACM SIGGRAPH 2016 Posters}, PAGES = {1--2}, EID = {63}, ADDRESS = {Anaheim, CA, USA}, }
Endnote
%0 Generic %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Intuitive Editing of Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-0170-C %R 10.1145/2945078.2945141 %D 2016 %Z name of event: the 43rd International Conference and Exhibition on Computer Graphics & Interactive Techniques %Z date of event: 2016-07-24 - 2016-07-28 %Z place of event: Anaheim, CA, USA %B ACM SIGGRAPH 2016 Posters %P 1 - 2 %Z sequence number: 63 %@ 978-1-4503-4371-8
Serrano, A., Gutierrez, D., Myszkowski, K., Seidel, H.-P., and Masia, B. 2016b. An Intuitive Control Space for Material Appearance. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2016)35, 6.
Export
BibTeX
@article{Serrano_MaterialAppearance_2016, TITLE = {An Intuitive Control Space for Material Appearance}, AUTHOR = {Serrano, Ana and Gutierrez, Diego and Myszkowski, Karol and Seidel, Hans-Peter and Masia, Belen}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2980179.2980242}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {35}, NUMBER = {6}, EID = {186}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2016}, }
Endnote
%0 Journal Article %A Serrano, Ana %A Gutierrez, Diego %A Myszkowski, Karol %A Seidel, Hans-Peter %A Masia, Belen %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Intuitive Control Space for Material Appearance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-82B8-9 %R 10.1145/2980179.2980242 %7 2016 %D 2016 %J ACM Transactions on Graphics %O TOG %V 35 %N 6 %Z sequence number: 186 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2016 %O ACM SIGGRAPH Asia 2016
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2016. Emulating Displays with Continuously Varying Frame Rates. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2016)35, 4.
Export
BibTeX
@article{TemplinSIGGRAPH2016, TITLE = {Emulating Displays with Continuously Varying Frame Rates}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2897824.2925879}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2016}, DATE = {2016}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {35}, NUMBER = {4}, EID = {67}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2016}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Emulating Displays with Continuously Varying Frame Rates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-002B-018D-E %R 10.1145/2897824.2925879 %7 2016 %D 2016 %J ACM Transactions on Graphics %V 35 %N 4 %Z sequence number: 67 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2016 %O ACM SIGGRAPH 2016 Anaheim, California
2015
Arpa, S., Ritschel, T., Myszkowski, K., Çapin, T., and Seidel, H.-P. 2015. Purkinje Images: Conveying Different Content for Different Luminance Adaptations in a Single Image. Computer Graphics Forum34, 1.
Export
BibTeX
@article{arpa2014purkinje, TITLE = {Purkinje Images: {Conveying} Different Content for Different Luminance Adaptations in a Single Image}, AUTHOR = {Arpa, Sami and Ritschel, Tobias and Myszkowski, Karol and {\c C}apin, Tolga and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12463}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum}, VOLUME = {34}, NUMBER = {1}, PAGES = {116--126}, }
Endnote
%0 Journal Article %A Arpa, Sami %A Ritschel, Tobias %A Myszkowski, Karol %A &#199;apin, Tolga %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Purkinje Images: Conveying Different Content for Different Luminance Adaptations in a Single Image : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5D0B-6 %R 10.1111/cgf.12463 %7 2014-10-18 %D 2015 %J Computer Graphics Forum %V 34 %N 1 %& 116 %P 116 - 126 %I Wiley-Blackwell %C Oxford
Gryaditskaya, Y., Pouli, T., Reinhard, E., Myszkowski, K., and Seidel, H.-P. 2015. Motion Aware Exposure Bracketing for HDR Video. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2015)34, 4.
Export
BibTeX
@article{Gryaditskaya2015, TITLE = {Motion Aware Exposure Bracketing for {HDR} Video}, AUTHOR = {Gryaditskaya, Yulia and Pouli, Tania and Reinhard, Erik and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12684}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {34}, NUMBER = {4}, PAGES = {119--130}, BOOKTITLE = {Eurographics Symposium on Rendering 2015}, EDITOR = {Lehtinen, Jaakko and Nowrouzezahra, Derek}, }
Endnote
%0 Journal Article %A Gryaditskaya, Yulia %A Pouli, Tania %A Reinhard, Erik %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Motion Aware Exposure Bracketing for HDR Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-15D2-B %R 10.1111/cgf.12684 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 4 %& 119 %P 119 - 130 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2015 %O Eurographics Symposium on Rendering 2015 EGSR 2015 Darmstadt, Germany, June 24th - 26th, 2015
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2015a. A Transformation-aware Perceptual Image Metric. Human Vision and Electronic Imaging XX (HVEI 2015), SPIE/IS&T.
(Best Student Paper Award)
Abstract
Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.
Export
BibTeX
@inproceedings{Kellnhofer2015, TITLE = {A Transformation-aware Perceptual Image Metric}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9781628414844}, DOI = {10.1117/12.2076754}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2015}, DATE = {2015}, ABSTRACT = {Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations.}, BOOKTITLE = {Human Vision and Electronic Imaging XX (HVEI 2015)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and de Ridder, Huib}, EID = {939408}, SERIES = {Proceedings of SPIE}, VOLUME = {9394}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Transformation-aware Perceptual Image Metric : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-544A-4 %R 10.1117/12.2076754 %D 2015 %B Human Vision and Electronic Imaging XX %Z date of event: 2015-02-08 - 2015-02-12 %C San Francisco, CA, USA %X Predicting human visual perception of image differences has several applications such as compression, rendering, editing and retargeting. Current approaches however, ignore the fact that the human visual system compensates for geometric transformations, e.g. we see that an image and a rotated copy are identical. Instead, they will report a large, false-positive difference. At the same time, if the transformations become too strong or too spatially incoherent, comparing two images indeed gets increasingly difficult. Between these two extremes, we propose a system to quantify the effect of transformations, not only on the perception of image differences, but also on saliency and motion parallax. To this end, we first fit local homographies to a given optical flow field and then convert this field into a field of elementary transformations such as translation, rotation, scaling, and perspective. We conduct a perceptual experiment quantifying the increase of difficulty when compensating for elementary transformations. Transformation entropy is proposed as a novel measure of complexity in a flow field. This representation is then used for applications, such as comparison of non-aligned images, where transformations cause threshold elevation, and detection of salient transformations. %B Human Vision and Electronic Imaging XX %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; de Ridder, Huib %Z sequence number: 939408 %I SPIE/IS&T %@ 9781628414844 %B Proceedings of SPIE %N 9394
Kellnhofer, P., Leimkühler, T., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2015b. What Makes 2D-to-3D Stereo Conversion Perceptually Plausible? Proceedings SAP 2015, ACM.
(Best Presentation Award)
Export
BibTeX
@inproceedings{Kellnhofer2015SAP, TITLE = {What Makes {2D}-to-{3D} Stereo Conversion Perceptually Plausible?}, AUTHOR = {Kellnhofer, Petr and Leimk{\"u}hler, Thomas and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, ISBN = {978-1-4503-3812-7}, DOI = {10.1145/2804408.2804409}, PUBLISHER = {ACM}, YEAR = {2015}, DATE = {2015}, BOOKTITLE = {Proceedings SAP 2015}, PAGES = {59--66}, ADDRESS = {T{\"u}bingen, Germany}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Leimk&#252;hler, Thomas %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T What Makes 2D-to-3D Stereo Conversion Perceptually Plausible? : %U http://hdl.handle.net/11858/00-001M-0000-0029-2460-7 %R 10.1145/2804408.2804409 %D 2015 %B ACM SIGGRAPH Symposium on Applied Perception %Z date of event: 2015-09-13 - 2015-09-14 %C T&#252;bingen, Germany %B Proceedings SAP 2015 %P 59 - 66 %I ACM %@ 978-1-4503-3812-7 %U http://resources.mpi-inf.mpg.de/StereoCueFusion/WhatMakes3D/
Kellnhofer, P., Ritschel, T., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2015c. Modeling Luminance Perception at Absolute Threshold. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2015)34, 4.
Export
BibTeX
@article{Kellnhofer2015a, TITLE = {Modeling Luminance Perception at Absolute Threshold}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12687}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2015}, DATE = {2015}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {34}, NUMBER = {4}, PAGES = {155--164}, BOOKTITLE = {Eurographics Symposium on Rendering 2014}, EDITOR = {Lehtinen, Jaakko and Nowrouzezahra, Derek}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Modeling Luminance Perception at Absolute Threshold : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0028-8E8D-4 %R 10.1111/cgf.12687 %7 2015 %D 2015 %J Computer Graphics Forum %V 34 %N 4 %& 155 %P 155 - 164 %I Wiley-Blackwell %C Oxford %@ false %B Eurographics Symposium on Rendering 2014 %O Eurographics Symposium on Rendering 2015 EGSR 2015 Darmstadt, Germany, June 24th - 26th, 2015
Vangorp, P., Myszkowski, K., Graf, E., and Mantiuk, R. 2015. An Empirical Model for Local Luminance Adaptation in the Fovea (Oral Presentation). Perception (Proc. ECVP 2015)44, S1.
Export
BibTeX
@article{VangeropECVP2015, TITLE = {An Empirical Model for Local Luminance Adaptation in the Fovea (Oral Presentation)}, AUTHOR = {Vangorp, Peter and Myszkowski, Karol and Graf, Erich and Mantiuk, Rafa{\l}}, LANGUAGE = {eng}, ISSN = {0301-0066}, DOI = {10.1177/0301006615598674}, PUBLISHER = {SAGE}, ADDRESS = {London}, YEAR = {2015}, DATE = {2015-08}, JOURNAL = {Perception (Proc. ECVP)}, VOLUME = {44}, NUMBER = {S1}, PAGES = {98--98}, EID = {1T3C001}, BOOKTITLE = {38th European Conference on Visual Perception (ECVP 2015)}, }
Endnote
%0 Journal Article %A Vangorp, Peter %A Myszkowski, Karol %A Graf, Erich %A Mantiuk, Rafa&#322; %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T An Empirical Model for Local Luminance Adaptation in the Fovea (Oral Presentation) : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0029-245C-4 %R 10.1177/0301006615598674 %7 2015 %D 2015 %J Perception %V 44 %N S1 %& 98 %P 98 - 98 %Z sequence number: 1T3C001 %I SAGE %C London %@ false %B 38th European Conference on Visual Perception %O ECVP 2015 Liverpool
2014
Dabala, L., Kellnhofer, P., Ritschel, T., et al. 2014. Manipulating Refractive and Reflective Binocular Disparity. Computer Graphics Forum (Proc. EUROGRAPHICS 2014)33, 2.
Abstract
Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.
Export
BibTeX
@article{Kellnhofer2014b, TITLE = {Manipulating Refractive and Reflective Binocular Disparity}, AUTHOR = {Dabala, Lukasz and Kellnhofer, Petr and Ritschel, Tobias and Didyk, Piotr and Templin, Krzysztof and Rokita, Przemyslaw and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12290}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {33}, NUMBER = {2}, PAGES = {53--62}, BOOKTITLE = {EUROGRAPHICS 2014}, EDITOR = {L{\'e}vy, Bruno and Kautz, Jan}, }
Endnote
%0 Journal Article %A Dabala, Lukasz %A Kellnhofer, Petr %A Ritschel, Tobias %A Didyk, Piotr %A Templin, Krzysztof %A Rokita, Przemyslaw %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Manipulating Refractive and Reflective Binocular Disparity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0019-EEF9-6 %R 10.1111/cgf.12290 %7 2014-06-01 %D 2014 %X Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes. %J Computer Graphics Forum %V 33 %N 2 %& 53 %P 53 - 62 %I Wiley-Blackwell %C Oxford, UK %B EUROGRAPHICS 2014 %O The European Association for Computer Graphics 35th Annual Conference ; Strasbourg, France, April 7th &#8211; 11th, 2014 EUROGRAPHICS 2014 EG 2014
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2014a. Improving Perception of Binocular Stereo Motion on 3D Display Devices. Stereoscopic Displays and Applications XXV, SPIE.
Abstract
This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations.
Export
BibTeX
@inproceedings{Kellnhofer2014a, TITLE = {Improving Perception of Binocular Stereo Motion on {3D} Display Devices}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {9780819499288}, DOI = {10.1117/12.2032389}, PUBLISHER = {SPIE}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations.}, BOOKTITLE = {Stereoscopic Displays and Applications XXV}, EDITOR = {Woods, Andrew J. and Holliman, Nicolas S. and Favalora, Gregg E.}, PAGES = {1--11}, EID = {901116}, SERIES = {Proceedings of SPIE-IS\&T Electronic Imaging}, VOLUME = {9011}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Improving Perception of Binocular Stereo Motion on 3D Display Devices : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-318D-7 %R 10.1117/12.2032389 %D 2014 %B Stereoscopic Displays and Applications XXV %Z date of event: 2014-02-03 - 2014-02-05 %C San Francisco, CA, USA %X This paper studies the presentation of moving stereo images on different display devices. We address three representative issues. First, we propose temporal compensation for the Pulfrich effect found when using anaglyph glasses. Second, we describe, how content-adaptive capture protocols can reduce false motion-in-depth sensation for time-multiplexing based displays. Third, we conclude with a recommendation how to improve rendering of synthetic stereo animations. %B Stereoscopic Displays and Applications XXV %E Woods, Andrew J.; Holliman, Nicolas S.; Favalora, Gregg E. %P 1 - 11 %Z sequence number: 901116 %I SPIE %@ 9780819499288 %B Proceedings of SPIE-IS&T Electronic Imaging %N 9011 %@ false
Kellnhofer, P., Ritschel, T., Vangorp, P., Myszkowski, K., and Seidel, H.-P. 2014b. Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision. ACM Transactions on Applied Perception11, 3.
Export
BibTeX
@article{kellnhofer:2014c:DarkStereo, TITLE = {Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Vangorp, Peter and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/2644813}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {11}, NUMBER = {3}, EID = {15}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Vangorp, Peter %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Stereo Day-for-Night: Retargeting Disparity for Scotopic Vision : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE0E-E %R 10.1145/2644813 %7 2014 %D 2014 %J ACM Transactions on Applied Perception %V 11 %N 3 %Z sequence number: 15 %I ACM %C New York, NY %@ false
Pajak, D., Herzog, R., Mantiuk, R., et al. 2014. Perceptual Depth Compression for Stereo Applications. Computer Graphics Forum (Proc. EUROGRAPHICS 2014)33, 2.
Abstract
Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.
Export
BibTeX
@article{PajakEG2014, TITLE = {Perceptual Depth Compression for Stereo Applications}, AUTHOR = {Pajak, Dawid and Herzog, Robert and Mantiuk, Rados{\l}aw and Didyk, Piotr and Eisemann, Elmar and Myszkowski, Karol and Pulli, Kari}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12293}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2014}, DATE = {2014}, ABSTRACT = {Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {33}, NUMBER = {2}, PAGES = {195--204}, BOOKTITLE = {EUROGRAPHICS 2014}, EDITOR = {L{\'e}vy, Bruno and Kautz, Jan}, }
Endnote
%0 Journal Article %A Pajak, Dawid %A Herzog, Robert %A Mantiuk, Rados&#322;aw %A Didyk, Piotr %A Eisemann, Elmar %A Myszkowski, Karol %A Pulli, Kari %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Perceptual Depth Compression for Stereo Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-3C0C-0 %R 10.1111/cgf.12293 %7 2014-06-01 %D 2014 %X Presenting stereoscopic content on 3D displays is a challenging task, usually requiring manual adjustments. A number of techniques have been developed to aid this process, but they account for binocular disparity of surfaces that are diffuse and opaque only. However, combinations of transparent as well as specular materials are common in the real and virtual worlds, and pose a significant problem. For example, excessive disparities can be created which cannot be fused by the observer. Also, multiple stereo interpretations become possible, e. g., for glass, that both reflects and refracts, which may confuse the observer and result in poor 3D experience. In this work, we propose an efficient method for analyzing and controlling disparities in computer-generated images of such scenes where surface positions and a layer decomposition are available. Instead of assuming a single per-pixel disparity value, we estimate all possibly perceived disparities at each image location. Based on this representation, we define an optimization to find the best per-pixel camera parameters, assuring that all disparities can be easily fused by a human. A preliminary perceptual study indicates, that our approach combines comfortable viewing with realistic depiction of typical specular scenes. %J Computer Graphics Forum %V 33 %N 2 %& 195 %P 195 - 204 %I Wiley-Blackwell %C Oxford, UK %B EUROGRAPHICS 2014 %O The European Association for Computer Graphics 35th Annual Conference ; Strasbourg, France, April 7th &#8211; 11th, 2014 EUROGRAPHICS 2014 EG 2014
Templin, K., Didyk, P., Myszkowski, K., Hefeeda, M.M., Seidel, H.-P., and Matusik, W. 2014a. Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2014)33, 4.
Export
BibTeX
@article{Templin:2014:MOE:2601097.2601148, TITLE = {Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Hefeeda, Mohamed M. and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2601097.2601148}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2014}, DATE = {2014}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {33}, NUMBER = {4}, PAGES = {1--8}, EID = {145}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2014}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Hefeeda, Mohamed M. %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T Modeling and Optimizing Eye Vergence Response to Stereoscopic Cuts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-EE16-9 %R 10.1145/2601097.2601148 %7 2014 %D 2014 %K S3D, binocular, eye&#8208;tracking %J ACM Transactions on Graphics %V 33 %N 4 %& 1 %P 1 - 8 %Z sequence number: 145 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2014 %O ACM SIGGRAPH 2014 Vancouver, BC, Canada
Templin, K., Didyk, P., Myszkowski, K., and Seidel, H.-P. 2014b. Perceptually-motivated Stereoscopic Film Grain. Computer Graphics Forum (Proc. Pacific Graphics 2014)33, 7.
Export
BibTeX
@article{Templin2014b, TITLE = {Perceptually-motivated Stereoscopic Film Grain}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12503}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2014}, DATE = {2014}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {33}, NUMBER = {7}, PAGES = {349--358}, BOOKTITLE = {22nd Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2014)}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually-motivated Stereoscopic Film Grain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5DF2-B %R 10.1111/cgf.12503 %7 2014-10-28 %D 2014 %J Computer Graphics Forum %V 33 %N 7 %& 349 %P 349 - 358 %I Wiley-Blackwell %C Oxford %B 22nd Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2014 PG 2014 8 to 10 Oct 2014, Seoul, South Korea
Vangorp, P., Mantiuk, R., Bazyluk, B., et al. 2014. Depth from HDR: Depth Induction or Increased Realism? SAP 2014, ACM Symposium on Applied Perception, ACM.
Export
BibTeX
@inproceedings{Vangorp2014, TITLE = {Depth from {HDR}: {Depth} Induction or Increased Realism?}, AUTHOR = {Vangorp, Peter and Mantiuk, Rafal and Bazyluk, Bartosz and Myszkowski, Karol and Mantiuk, Rados{\textbackslash}law and Watt, Simon J. and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-3009-1}, DOI = {10.1145/2628257.2628258}, PUBLISHER = {ACM}, YEAR = {2014}, DATE = {2014}, BOOKTITLE = {SAP 2014, ACM Symposium on Applied Perception}, EDITOR = {Bailey, Reynold and Kuhl, Scott}, PAGES = {71--78}, ADDRESS = {Vancouver, Canada}, }
Endnote
%0 Conference Proceedings %A Vangorp, Peter %A Mantiuk, Rafal %A Bazyluk, Bartosz %A Myszkowski, Karol %A Mantiuk, Rados\law %A Watt, Simon J. %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Depth from HDR: Depth Induction or Increased Realism? : %G eng %U http://hdl.handle.net/11858/00-001M-0000-001A-34DB-5 %R 10.1145/2628257.2628258 %D 2014 %B ACM Symposium on Applied Perception %Z date of event: 2014-08-08 - 2014-08-09 %C Vancouver, Canada %K binocular disparity, contrast, luminance, stereo 3D %B SAP 2014 %E Bailey, Reynold; Kuhl, Scott %P 71 - 78 %I ACM %@ 978-1-4503-3009-1
2013
Čadík, M., Herzog, R., Mantiuk, R., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2013. Learning to Predict Localized Distortions in Rendered Images. Computer Graphics Forum (Proc. Pacific Graphics 2013)32, 7.
Export
BibTeX
@article{CadikPG2013, TITLE = {Learning to Predict Localized Distortions in Rendered Images}, AUTHOR = {{\v C}ad{\'i}k, Martin and Herzog, Robert and Mantiuk, Rafa{\l} and Mantiuk, Rados{\l}aw and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/cgf.12248}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford}, YEAR = {2013}, DATE = {2013}, JOURNAL = {Computer Graphics Forum (Proc. Pacific Graphics)}, VOLUME = {32}, NUMBER = {7}, PAGES = {401--410}, BOOKTITLE = {21st Pacific Conference on Computer Graphics and Applications (Pacific Graphics 2013)}, }
Endnote
%0 Journal Article %A &#268;ad&#237;k, Martin %A Herzog, Robert %A Mantiuk, Rafa&#322; %A Mantiuk, Rados&#322;aw %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Learning to Predict Localized Distortions in Rendered Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0024-5DF9-E %R 10.1111/cgf.12248 %7 2014-11-25 %D 2013 %J Computer Graphics Forum %V 32 %N 7 %& 401 %P 401 - 410 %I Wiley-Blackwell %C Oxford %B 21st Pacific Conference on Computer Graphics and Applications %O Pacific Graphics 2013 PG 2013 October 7-9, 2013, Singapore
Kellnhofer, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2013. Optimizing Disparity for Motion in Depth. Computer Graphics Forum (Proc. Eurographics Symposium on Rendering 2013)32, 4.
Abstract
Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion.
Export
BibTeX
@article{Kellnhofer2013, TITLE = {Optimizing Disparity for Motion in Depth}, AUTHOR = {Kellnhofer, Petr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/cgf.12160}, LOCALID = {Local-ID: AAA9E8B7CDD4AD1FC1257BFD004E5D30-Kellnhofer2013}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2013}, DATE = {2013}, ABSTRACT = {Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion.}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics Symposium on Rendering)}, VOLUME = {32}, NUMBER = {4}, PAGES = {143--152}, BOOKTITLE = {Eurographics Symposium on Rendering 2013}, EDITOR = {Holzschuch, N. and Rusinkiewicz, S.}, }
Endnote
%0 Journal Article %A Kellnhofer, Petr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Optimizing Disparity for Motion in Depth : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-3D13-B %R 10.1111/cgf.12160 %F OTHER: Local-ID: AAA9E8B7CDD4AD1FC1257BFD004E5D30-Kellnhofer2013 %7 2013 %D 2013 %X Beyond the careful design of stereo acquisition equipment and rendering algorithms, disparity post-processing has recently received much attention, where one of the key tasks is to compress the originally large disparity range to avoid viewing discomfort. The perception of dynamic stereo content however, relies on reproducing the full disparity-time volume that a scene point undergoes in motion. This volume can be strongly distorted in manipulation, which is only concerned with changing disparity at one instant in time, even if the temporal coherence of that change is maintained. We propose an optimization to preserve stereo motion of content that was subject to an arbitrary disparity manipulation, based on a perceptual model of temporal disparity changes. Furthermore, we introduce a novel 3D warping technique to create stereo image pairs that conform to this optimized disparity map. The paper concludes with perceptual studies of motion reproduction quality and task performance in a simple game, showing how our optimization can achieve both viewing comfort and faithful stereo motion. %J Computer Graphics Forum %V 32 %N 4 %& 143 %P 143 - 152 %I Wiley-Blackwell %C Oxford, UK %@ false %B Eurographics Symposium on Rendering 2013 %O EGSR 2013 Eurographics Symposium on Rendering 2013 Zaragoza, 19 - 21 June, 2013
2012
Banterle, F., Artusi, A., Aydin, T.O., et al. 2012. Mapping Images to Target Devices: Spatial, Temporal, Stereo, Tone, and Color. EG 2012 - Tutorials (EUROGRAPHICS 2012), Eurographics Association.
Export
BibTeX
@inproceedings{Didyk2012Course, TITLE = {Mapping Images to Target Devices: {Spatial}, Temporal, Stereo, Tone, and Color}, AUTHOR = {Banterle, Francesco and Artusi, Alessandro and Aydin, Tunc O. and Didyk, Piotr and Eisemann, Elmar and Gutierrez, Diego and Mantiuk, Rafal and Myszkowski, Karol and Ritschel, Tobias}, LANGUAGE = {eng}, ISSN = {1017-4656}, DOI = {10.2312/conf/EG2012/tutorials/t1}, PUBLISHER = {Eurographics Association}, YEAR = {2012}, BOOKTITLE = {EG 2012 -- Tutorials (EUROGRAPHICS 2012)}, EDITOR = {Pajarola, Renato and Spagnuolo, Michela}, EID = {T1}, ADDRESS = {Cagliari, Sardinia, Italy}, }
Endnote
%0 Conference Proceedings %A Banterle, Francesco %A Artusi, Alessandro %A Aydin, Tunc O. %A Didyk, Piotr %A Eisemann, Elmar %A Gutierrez, Diego %A Mantiuk, Rafal %A Myszkowski, Karol %A Ritschel, Tobias %+ External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Mapping Images to Target Devices: Spatial, Temporal, Stereo, Tone, and Color : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F3BC-E %R 10.2312/conf/EG2012/tutorials/t1 %D 2012 %B The European Association for Computer Graphics 33rd Annual Conference %Z date of event: 2012-05-06 - 2012-05-09 %C Cagliari, Sardinia, Italy %B EG 2012 - Tutorials %E Pajarola, Renato; Spagnuolo, Michela %Z sequence number: T1 %I Eurographics Association %@ false
Čadík, M., Herzog, R., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2012. New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Export
BibTeX
@article{cadik12iqm_evaluation, TITLE = {New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts}, AUTHOR = {{\v C}ad{\'i}k, Martin and Herzog, Robert and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366166}, LOCALID = {Local-ID: 1D6D7862B7800D8DC1257AD7003415AE-cadik12iqm_evaluation}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {1--10}, EID = {147}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A &#268;ad&#237;k, Martin %A Herzog, Robert %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T New Measurements Reveal Weaknesses of Image Quality Metrics in Evaluating Graphics Artifacts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-166E-6 %R 10.1145/2366145.2366166 %F OTHER: Local-ID: 1D6D7862B7800D8DC1257AD7003415AE-cadik12iqm_evaluation %7 2012 %D 2012 %J ACM Transactions on Graphics %V 31 %N 6 %& 1 %P 1 - 10 %Z sequence number: 147 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O ACM SIGGRAPH Asia 2012 Singapore, 28 November - 1 December 2012
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2012a. Apparent Stereo: The Cornsweet Illusion Can Enhance Perceived Depth. Human Vision and Electronic Imaging XVII (HVEI 2012), SPIE/IS&T.
Export
BibTeX
@inproceedings{Didyk2012Cornsweet, TITLE = {Apparent Stereo: The {Cornsweet} Illusion Can Enhance Perceived Depth}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {9780819489388}, DOI = {10.1117/12.907612}, LOCALID = {Local-ID: B0D8F2F7DF789CF4C1257A710043B8CF-Didyk2012Cornsweet}, PUBLISHER = {SPIE/IS\&T}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Human Vision and Electronic Imaging XVII (HVEI 2012)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and de Ridder, Huib}, PAGES = {1--12}, SERIES = {Proceedings of SPIE}, VOLUME = {8291}, ADDRESS = {Burlingame, CA}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Stereo: The Cornsweet Illusion Can Enhance Perceived Depth : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13C8-5 %R 10.1117/12.907612 %F OTHER: Local-ID: B0D8F2F7DF789CF4C1257A710043B8CF-Didyk2012Cornsweet %D 2012 %B Human Vision and Electronic Imaging XVII %Z date of event: 2012-01-23 - 2012-01-26 %C Burlingame, CA %B Human Vision and Electronic Imaging XVII %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; de Ridder, Huib %P 1 - 12 %I SPIE/IS&T %@ 9780819489388 %B Proceedings of SPIE %N 8291 %@ false
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., Seidel, H.-P., and Matusik, W. 2012b. A Luminance-contrast-aware Disparity Model and Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2012)31, 6.
Export
BibTeX
@article{Didyk2012SigAsia, TITLE = {A Luminance-contrast-aware Disparity Model and Applications}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter and Matusik, Wojciech}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2366145.2366203}, LOCALID = {Local-ID: C754E5AADEF5EA2AC1257AFE0056029B-Didyk2012SigAsia}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {31}, NUMBER = {6}, PAGES = {184:1--184:10}, EID = {184}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2012}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %A Matusik, Wojciech %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T A Luminance-contrast-aware Disparity Model and Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-F3C4-9 %R 10.1145/2366145.2366203 %F OTHER: Local-ID: C754E5AADEF5EA2AC1257AFE0056029B-Didyk2012SigAsia %D 2012 %J ACM Transactions on Graphics %V 31 %N 6 %& 184:1 %P 184:1 - 184:10 %Z sequence number: 184 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2012 %O Singapore, 28 November - 1 December ACM SIGGRAPH Asia 2012
Didyk, P., Ritschel, T., Eisemann, E., and Myszkowski, K. 2012c. Exceeding Physical Limitations: Apparent Display Qualities. In: Perceptual Digital Imaging. CRC, Boca Raton, FL.
Export
BibTeX
@incollection{Didyk2012Chapter, TITLE = {Exceeding Physical Limitations: {Apparent} Display Qualities}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol}, LANGUAGE = {eng}, ISBN = {9781439868560}, LOCALID = {Local-ID: 68CF453A32B7C773C1257A710045D6CB-Didyk2012Chapter}, PUBLISHER = {CRC}, ADDRESS = {Boca Raton, FL}, YEAR = {2012}, DATE = {2012}, BOOKTITLE = {Perceptual Digital Imaging}, EDITOR = {Lukac, Ratislav}, PAGES = {469--501}, }
Endnote
%0 Book Section %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Exceeding Physical Limitations: Apparent Display Qualities : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13CF-8 %F OTHER: Local-ID: 68CF453A32B7C773C1257A710045D6CB-Didyk2012Chapter %D 2012 %B Perceptual Digital Imaging %E Lukac, Ratislav %P 469 - 501 %I CRC %C Boca Raton, FL %@ 9781439868560
Herzog, R., Cadík, M., Aydin, T.O., Kim, K.I., Myszkowski, K., and Seidel, H.-P. 2012. NoRM: No-reference Image Quality Metric for Realistic Image Synthesis. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Abstract
Synthetically generating images and video frames of complex 3D scenes using some photo-realistic rendering software is often prone to artifacts and requires expert knowledge to tune the parameters. The manual work required for detecting and preventing artifacts can be automated through objective quality evaluation of synthetic images. Most practical objective quality assessment methods of natural images rely on a ground-truth reference, which is often not available in rendering applications. While general purpose no-reference image quality assessment is a difficult problem, we show in a subjective study that the performance of a dedicated no-reference metric as presented in this paper can match the state-of-the-art metrics that do require a reference. This level of predictive power is achieved exploiting information about the underlying synthetic scene (e.g., 3D surfaces, textures) instead of merely considering color, and training our learning framework with typical rendering artifacts. We show that our method successfully detects various non-trivial types of artifacts such as noise and clamping bias due to insufficient virtual point light sources, and shadow map discretization artifacts. We also briefly discuss an inpainting method for automatic correction of detected artifacts.
Export
BibTeX
@article{NoRM_EG2012, TITLE = {{NoRM}: {No-reference} Image Quality Metric for Realistic Image Synthesis}, AUTHOR = {Herzog, Robert and Cad{\'i}k, Martin and Aydin, Tunc Ozan and Kim, Kwang In and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03055.x}, LOCALID = {Local-ID: 673028A8C798FD45C1257A47004B2978-NoRM_EG2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Synthetically generating images and video frames of complex 3D scenes using some photo-realistic rendering software is often prone to artifacts and requires expert knowledge to tune the parameters. The manual work required for detecting and preventing artifacts can be automated through objective quality evaluation of synthetic images. Most practical objective quality assessment methods of natural images rely on a ground-truth reference, which is often not available in rendering applications. While general purpose no-reference image quality assessment is a difficult problem, we show in a subjective study that the performance of a dedicated no-reference metric as presented in this paper can match the state-of-the-art metrics that do require a reference. This level of predictive power is achieved exploiting information about the underlying synthetic scene (e.g., 3D surfaces, textures) instead of merely considering color, and training our learning framework with typical rendering artifacts. We show that our method successfully detects various non-trivial types of artifacts such as noise and clamping bias due to insufficient virtual point light sources, and shadow map discretization artifacts. We also briefly discuss an inpainting method for automatic correction of detected artifacts.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {31}, NUMBER = {2}, PAGES = {545--554}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Herzog, Robert %A Cad&#237;k, Martin %A Aydin, Tunc Ozan %A Kim, Kwang In %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T NoRM: No-reference Image Quality Metric for Realistic Image Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1586-9 %R 10.1111/j.1467-8659.2012.03055.x %F OTHER: Local-ID: 673028A8C798FD45C1257A47004B2978-NoRM_EG2012 %7 2012-06-14 %D 2012 %X Synthetically generating images and video frames of complex 3D scenes using some photo-realistic rendering software is often prone to artifacts and requires expert knowledge to tune the parameters. The manual work required for detecting and preventing artifacts can be automated through objective quality evaluation of synthetic images. Most practical objective quality assessment methods of natural images rely on a ground-truth reference, which is often not available in rendering applications. While general purpose no-reference image quality assessment is a difficult problem, we show in a subjective study that the performance of a dedicated no-reference metric as presented in this paper can match the state-of-the-art metrics that do require a reference. This level of predictive power is achieved exploiting information about the underlying synthetic scene (e.g., 3D surfaces, textures) instead of merely considering color, and training our learning framework with typical rendering artifacts. We show that our method successfully detects various non-trivial types of artifacts such as noise and clamping bias due to insufficient virtual point light sources, and shadow map discretization artifacts. We also briefly discuss an inpainting method for automatic correction of detected artifacts. %J Computer Graphics Forum %V 31 %N 2 %& 545 %P 545 - 554 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O EUROGRAPHICS 2012 The European Association for Computer Graphics 33rd Annual Conference, Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012 EG 2012
Nguyen, C., Ritschel, T., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2012. 3D Material Style Transfer. Computer Graphics Forum (Proc. EUROGRAPHICS 2012)31, 2.
Abstract
This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study.
Export
BibTeX
@article{Nguyen2012z, TITLE = {{3D} Material Style Transfer}, AUTHOR = {Nguyen, Chuong and Ritschel, Tobias and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2012.03022.x}, LOCALID = {Local-ID: 3C190E59F48516AFC1257B0100644708-Nguyen2012}, PUBLISHER = {Wiley-Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, VOLUME = {31}, NUMBER = {2}, PAGES = {431--438}, BOOKTITLE = {EUROGRAPHICS 2012}, EDITOR = {Cignoni, Paolo and Ertl, Thomas}, }
Endnote
%0 Journal Article %A Nguyen, Chuong %A Ritschel, Tobias %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Material Style Transfer : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1537-C %F OTHER: Local-ID: 3C190E59F48516AFC1257B0100644708-Nguyen2012 %R 10.1111/j.1467-8659.2012.03022.x %7 2012-06-07 %D 2012 %X This work proposes a technique to transfer the material style or mood from a guide source such as an image or video onto a target 3D scene. It formulates the problem as a combinatorial optimization of assigning discrete materials extracted from the guide source to discrete objects in the target 3D scene. The assignment is optimized to fulfill multiple goals: overall image mood based on several image statistics; spatial material organization and grouping as well as geometric similarity between objects that were assigned to similar materials. To be able to use common uncalibrated images and videos with unknown geometry and lighting as guides, a material estimation derives perceptually plausible reflectance, specularity, glossiness, and texture. Finally, results produced by our method are compared to manual material assignments in a perceptual study. %J Computer Graphics Forum %V 31 %N 2 %& 431 %P 431 - 438 %I Wiley-Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2012 %O EUROGRAPHICS 2012 EG 2012 The European Association for Computer Graphics 33rd Annual Conference, Cagliari, Sardinia, Italy, May 13th &#8211; 18th, 2012
Ritschel, T., Templin, K., Myszkowski, K., and Seidel, H.-P. 2012. Virtual Passepartouts. Non-Photorealistic Animation and Rendering (NPAR 2012), Eurographics Association.
Abstract
In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the ``looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts.
Export
BibTeX
@inproceedings{RitschelTMS2012, TITLE = {Virtual Passepartouts}, AUTHOR = {Ritschel, Tobias and Templin, Krzysztof and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-90-6}, DOI = {10.2312/PE/NPAR/NPAR12/057-063}, LOCALID = {Local-ID: AF8C88CA4485E3B1C1257A4500606C5D-RitschelTMS2012}, PUBLISHER = {Eurographics Association}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the ``looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts.}, BOOKTITLE = {Non-Photorealistic Animation and Rendering (NPAR 2012)}, EDITOR = {Asente, Paul and Grimm, Cindy}, PAGES = {57--63}, ADDRESS = {Annecy, France}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Templin, Krzysztof %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Virtual Passepartouts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-13D3-B %R 10.2312/PE/NPAR/NPAR12/057-063 %F OTHER: Local-ID: AF8C88CA4485E3B1C1257A4500606C5D-RitschelTMS2012 %D 2012 %B Non-Photorealistic Animation and Rendering 2012 %Z date of event: 2012-06-04 - 2012-06-06 %C Annecy, France %X In traditional media, such as photography and painting, a cardboard sheet with a cutout (called \emphpassepartout}) is frequently placed on top of an image. One of its functions is to increase the depth impression via the ``looking-through-a-window'' metaphor. This paper shows how an improved 3D~effect can be achieved by using a \emph{virtual passepartout: a 2D framing that selectively masks the 3D shape and leads to additional occlusion events between the virtual world and the frame. We introduce a pipeline to design virtual passepartouts interactively as a simple post-process on RGB images augmented with depth information. Additionally, an automated approach finds the optimal virtual passepartout for a given scene. Virtual passepartouts can be used to enhance depth depiction in images and videos with depth information, renderings, stereo images and the fabrication of physical passepartouts. %B Non-Photorealistic Animation and Rendering %E Asente, Paul; Grimm, Cindy %P 57 - 63 %I Eurographics Association %@ 978-3-905673-90-6
Templin, K., Didyk, P., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2012. Highlight Microdisparity for Improved Gloss Depiction. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2012)31, 4.
Abstract
Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the ``highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control.
Export
BibTeX
@article{Templin2012, TITLE = {Highlight Microdisparity for Improved Gloss Depiction}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2185520.2185588}, LOCALID = {Local-ID: BDB99D9DBF6B290EC1257A4500551595-Templin2012}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2012}, DATE = {2012}, ABSTRACT = {Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the ``highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {31}, NUMBER = {4}, PAGES = {1--5}, EID = {92}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2012}, }
Endnote
%0 Journal Article %A Templin, Krzysztof %A Didyk, Piotr %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Highlight Microdisparity for Improved Gloss Depiction : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0015-1617-8 %F OTHER: Local-ID: BDB99D9DBF6B290EC1257A4500551595-Templin2012 %R 10.1145/2185520.2185588 %7 2012-07-01 %D 2012 %X Human stereo perception of glossy materials is substantially different from the perception of diffuse surfaces: A single point on a diffuse object appears the same for both eyes, whereas it appears different to both eyes on a specular object. As highlights are blurry reflections of light sources they have depth themselves, which is different from the depth of the reflecting surface. We call this difference in depth impression the ``highlight disparity''. Due to artistic motivation, for technical reasons, or because of incomplete data, highlights often have to be depicted on-surface, without any disparity. However, it has been shown that a lack of disparity decreases the perceived glossiness and authenticity of a material. To remedy this contradiction, our work introduces a technique for depiction of glossy materials, which improves over simple on-surface highlights, and avoids the problems of physical highlights. Our technique is computationally simple, can be easily integrated in an existing (GPU) shading system, and allows for local and interactive artistic control. %J ACM Transactions on Graphics %V 31 %N 4 %& 1 %P 1 - 5 %Z sequence number: 92 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2012 %O ACM SIGGRAPH 2012 Los Angeles, California, 5 - 9 August 2012
2011
Čadík, M., Aydin, T.O., Myszkowski, K., and Seidel, H.-P. 2011. On Evaluation of Video Quality Metrics: an HDR Dataset for Computer Graphics Applications. Human Vision and Electronic Imaging XVI (HVEI 2011), SPIE.
Export
BibTeX
@inproceedings{Cadik2011, TITLE = {On Evaluation of Video Quality Metrics: an {HDR} Dataset for Computer Graphics Applications}, AUTHOR = {{\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-8194-8402-4}, URL = {http://dx.doi.org/10.1117/12.878875}, DOI = {10.1117/12.878875}, PUBLISHER = {SPIE}, YEAR = {2011}, DATE = {2011}, BOOKTITLE = {Human Vision and Electronic Imaging XVI (HVEI 2011)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N.}, PAGES = {1--9}, EID = {78650R}, SERIES = {Proceedings of SPIE}, VOLUME = {7865}, ADDRESS = {San Francisco, CA, USA}, }
Endnote
%0 Conference Proceedings %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T On Evaluation of Video Quality Metrics: an HDR Dataset for Computer Graphics Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13DF-B %F EDOC: 618862 %R 10.1117/12.878875 %U http://dx.doi.org/10.1117/12.878875 %D 2011 %B Human Vision and Electronic Imaging XVI %Z date of event: 2011-02-24 - 2011-01-27 %C San Francisco, CA, USA %B Human Vision and Electronic Imaging XVI %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N. %P 1 - 9 %Z sequence number: 78650R %I SPIE %@ 978-0-8194-8402-4 %B Proceedings of SPIE %N 7865
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2011. A Perceptual Model for Disparity. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2011)30, 4.
Abstract
Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study.
Export
BibTeX
@article{DidykREMS2011, TITLE = {A Perceptual Model for Disparity}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/2010324.1964991}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {30}, NUMBER = {4}, PAGES = {1--10}, EID = {96}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2011}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Model for Disparity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-1388-F %F EDOC: 618890 %R 10.1145/2010324.1964991 %7 2011 %D 2011 %* Review method: peer-reviewed %X Binocular disparity is an important cue for the human visual system to recognize spatial layout, both in reality and simulated virtual worlds. This paper introduces a perceptual model of disparity for computer graphics that is used to define a metric to compare a stereo image to an alternative stereo image and to estimate the magnitude of the perceived disparity change. Our model can be used to assess the effect of disparity to control the level of undesirable distortions or enhancements (introduced on purpose). A number of psycho-visual experiments are conducted to quantify the mutual effect of disparity magnitude and frequency to derive the model. Besides difference prediction, other applications include compression, and re-targeting. We also present novel applications in form of hybrid stereo images and backward-compatible stereo. The latter minimizes disparity in order to convey a stereo impression if special equipment is used but produces images that appear almost ordinary to the naked eye. The validity of our model and difference metric is again confirmed in a study. %J ACM Transactions on Graphics %V 30 %N 4 %& 1 %P 1 - 10 %Z sequence number: 96 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2011 %O ACM SIGGRAPH 2011 Vancouver, BC, Canada
Pajak, D., Herzog, R., Myszkowski, K., Eisemann, E., and Seidel, H.-P. 2011. Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming. Computer Graphics Forum (Proc. EUROGPRAPHICS 2011)30, 2.
Abstract
In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware.
Export
BibTeX
@article{HerzogEG2011, TITLE = {Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming}, AUTHOR = {Pajak, Dawid and Herzog, Robert and Myszkowski, Karol and Eisemann, Elmar and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1111/j.1467-8659.2011.01871.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGPRAPHICS)}, VOLUME = {30}, NUMBER = {2}, PAGES = {415--424}, BOOKTITLE = {EUROGRAPHICS 2011 (EUROGPRAPHICS 2011)}, EDITOR = {Chen, Min and Deussen, Oliver}, }
Endnote
%0 Journal Article %A Pajak, Dawid %A Herzog, Robert %A Myszkowski, Karol %A Eisemann, Elmar %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Scalable Remote Rendering with Depth and Motion-flow Augmented Streaming : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-13F2-E %F EDOC: 618866 %R 10.1111/j.1467-8659.2011.01871.x %7 2011 %D 2011 %* Review method: peer-reviewed %X In this work, we focus on efficient compression and streaming of frames rendered from a dynamic 3D model. Remote rendering and on-the-fly streaming become increasingly attractive for interactive applications. Data is kept confidential and only images are sent to the client. Even if the client's hardware resources are modest, the user can interact with state-of-the-art rendering applications executed on the server. Our solution focuses on augmented video information, e.g., by depth, which is key to increase robustness with respect to data loss, image reconstruction, and is an important feature for stereo vision and other client-side applications. Two major challenges arise in such a setup. First, the server workload has to be controlled to support many clients, second the data transfer needs to be efficient. Consequently, our contributions are twofold. First, we reduce the server-based computations by making use of sparse sampling and temporal consistency to avoid expensive pixel evaluations. Second, our data-transfer solution takes limited bandwidths into account, is robust to information loss, and compression and decompression are efficient enough to support real-time interaction. Our key insight is to tailor our method explicitly for rendered 3D content and shift some computations on client GPUs, to better balance the server/client workload. Our framework is progressive, scalable, and allows us to stream augmented high-resolution (e.g., HD-ready) frames with small bandwidth on standard hardware. %J Computer Graphics Forum %V 30 %N 2 %& 415 %P 415 - 424 %I Blackwell %C Oxford, UK %B EUROGRAPHICS 2011 %O EUROGPRAPHICS 2011 The European Association for Computer Graphics 32nd Annual Conference ; Llandudno in Wales, UK, April 11th - 15th, 2011 EG 2011
Templin, K., Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2011. Apparent Resolution Enhancement for Animations. Proceedings SCCG 2011 (SSCG 2011), ACM.
Abstract
Presenting the variety of high resolution images captured by high-quality devices, or generated on the computer, is challenging due to the limited resolution of current display devices. Our recent work addressed this problem by taking into account human perception. By applying a specific motion to a high-resolution image shown on a low-resolution display device, human eye tracking and integration could be exploited to achieve apparent resolution enhancement. To this end, the high-resolution image is decomposed into a sequence of temporally varying low-resolution images that are displayed at high refresh rates. However, this approach is limited to a specific class of simple or constant movements, i.e. ``panning''. In this work, we generalize this idea to arbitrary motions, as well as to videos with arbitrary motion flow. The resulting image sequences are compared to a range of other down-sampling methods.
Export
BibTeX
@inproceedings{Templin2011, TITLE = {Apparent Resolution Enhancement for Animations}, AUTHOR = {Templin, Krzysztof and Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-1978-2}, DOI = {10.1145/2461217.2461230}, PUBLISHER = {ACM}, YEAR = {2011}, DATE = {2011}, ABSTRACT = {Presenting the variety of high resolution images captured by high-quality devices, or generated on the computer, is challenging due to the limited resolution of current display devices. Our recent work addressed this problem by taking into account human perception. By applying a specific motion to a high-resolution image shown on a low-resolution display device, human eye tracking and integration could be exploited to achieve apparent resolution enhancement. To this end, the high-resolution image is decomposed into a sequence of temporally varying low-resolution images that are displayed at high refresh rates. However, this approach is limited to a specific class of simple or constant movements, i.e. ``panning''. In this work, we generalize this idea to arbitrary motions, as well as to videos with arbitrary motion flow. The resulting image sequences are compared to a range of other down-sampling methods.}, BOOKTITLE = {Proceedings SCCG 2011 (SSCG 2011)}, EDITOR = {Nishita, Tomoyuki and {\v D}urikovi{\v c}, Roman}, PAGES = {85--92}, ADDRESS = {Vini{\v c}n{\'e}, Slovakia}, }
Endnote
%0 Conference Proceedings %A Templin, Krzysztof %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Resolution Enhancement for Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0010-138B-9 %F EDOC: 618886 %R 10.1145/2461217.2461230 %D 2011 %B 27th Spring Conference on Computer Graphics %Z date of event: 2011-04-28 - 2011-04-30 %C Vini&#269;n&#233;, Slovakia %X Presenting the variety of high resolution images captured by high-quality devices, or generated on the computer, is challenging due to the limited resolution of current display devices. Our recent work addressed this problem by taking into account human perception. By applying a specific motion to a high-resolution image shown on a low-resolution display device, human eye tracking and integration could be exploited to achieve apparent resolution enhancement. To this end, the high-resolution image is decomposed into a sequence of temporally varying low-resolution images that are displayed at high refresh rates. However, this approach is limited to a specific class of simple or constant movements, i.e. ``panning''. In this work, we generalize this idea to arbitrary motions, as well as to videos with arbitrary motion flow. The resulting image sequences are compared to a range of other down-sampling methods. %B Proceedings SCCG 2011 %E Nishita, Tomoyuki; &#270;urikovi&#269;, Roman %P 85 - 92 %I ACM %@ 978-1-4503-1978-2
2010
Aydin, T.O., Čadík, M., Myszkowski, K., and Seidel, H.-P. 2010a. Video Quality Assessment for Computer Graphics Applications. ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia 2010)29, 6.
Export
BibTeX
@article{TuncSGAsia2010, TITLE = {Video Quality Assessment for Computer Graphics Applications}, AUTHOR = {Aydin, Tunc Ozan and {\v C}ad{\'i}k, Martin and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0439-9}, DOI = {10.1145/1866158.1866187}, LOCALID = {Local-ID: C125675300671F7B-0ED72325CD8F187FC12577CF005BA5C5-TuncSGAsia2010}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH Asia)}, VOLUME = {29}, NUMBER = {6}, PAGES = {1--12}, EID = {161}, BOOKTITLE = {Proceedings of ACM SIGGRAPH Asia 2010}, EDITOR = {Drettakis, George}, }
Endnote
%0 Journal Article %A Aydin, Tunc Ozan %A &#268;ad&#237;k, Martin %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Video Quality Assessment for Computer Graphics Applications : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1797-0 %F EDOC: 537307 %R 10.1145/1866158.1866187 %F OTHER: Local-ID: C125675300671F7B-0ED72325CD8F187FC12577CF005BA5C5-TuncSGAsia2010 %D 2010 %J ACM Transactions on Graphics %V 29 %N 6 %& 1 %P 1 - 12 %Z sequence number: 161 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH Asia 2010 %O ACM SIGGRAPH Asia 2010 Seoul, South Korea %@ 978-1-4503-0439-9
Aydin, T.O., Čadík, M., Myszkowski, K., and Seidel, H.-P. 2010b. Visually Significant Edges. ACM Transactions on Applied Perception7, 4.
Abstract
Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting ``visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden.
Export
BibTeX
@article{TuncTAP2010, TITLE = {Visually Significant Edges}, AUTHOR = {Aydin, Tunc Ozan and {\v C}ad{\'i}k, Martin and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/1823738.1823745}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting ``visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden.}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {7}, NUMBER = {4}, PAGES = {1--14}, EID = {27}, }
Endnote
%0 Journal Article %A Aydin, Tunc Ozan %A &#268;ad&#237;k, Martin %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visually Significant Edges : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-179A-A %F EDOC: 537306 %R 10.1145/1823738.1823745 %7 2010 %D 2010 %* Review method: peer-reviewed %X Numerous image processing and computer graphics methods make use of either explicitly computed strength of image edges, or an implicit edge strength definition that is integrated into their algorithms. In both cases, the end result is highly affected by the computation of edge strength. We address several shortcomings of the widely used gradient magnitude based edge strength model through the computation of a hypothetical human visual system (HVS) response at edge locations. Contrary to gradient magnitude, the resulting ``visual significance'' values account for various HVS mechanisms such as luminance adaptation and visual masking, and are scaled in perceptually linear units that are uniform across images. The visual significance computation is implemented in a fast multi-scale second generation wavelet framework,which we use to demonstrate the differences in image retargeting, HDR image stitching and tone mapping applications with respect to gradient magnitude model. Our results suggest that simple perceptual models provide qualitative improvements on applications utilizing edge strength at the cost of a modest computational burden. %J ACM Transactions on Applied Perception %V 7 %N 4 %& 1 %P 1 - 14 %Z sequence number: 27 %I ACM %C New York, NY %@ false
Didyk, P., Ritschel, T., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2010a. Adaptive Image-space Stereo View Synthesis. Vision, Modeling & Visualization (VMV 2010), Eurographics Association.
Export
BibTeX
@inproceedings{Didyk2010b, TITLE = {Adaptive Image-space Stereo View Synthesis}, AUTHOR = {Didyk, Piotr and Ritschel, Tobias and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-3-905673-79-1}, DOI = {10.2312/PE/VMV/VMV10/299-306}, PUBLISHER = {Eurographics Association}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Vision, Modeling \& Visualization (VMV 2010)}, EDITOR = {Koch, Reinhard and Kolb, Andreas and Rezk-Salama, Christof}, PAGES = {299--306}, ADDRESS = {Siegen, Germany}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Ritschel, Tobias %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Adaptive Image-space Stereo View Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-172C-4 %F EDOC: 537308 %R 10.2312/PE/VMV/VMV10/299-306 %D 2010 %B 15th International Workshop on Vision, Modeling, and Visualization %Z date of event: 2010-11-15 - 2010-11-02 %C Siegen, Germany %B Vision, Modeling & Visualization %E Koch, Reinhard; Kolb, Andreas; Rezk-Salama, Christof %P 299 - 306 %I Eurographics Association %@ 978-3-905673-79-1
Didyk, P., Eisemann, E., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2010b. Apparent Display Resolution Enhancement for Moving Images. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2010)29, 4.
Export
BibTeX
@article{Didyk2010a, TITLE = {Apparent Display Resolution Enhancement for Moving Images}, AUTHOR = {Didyk, Piotr and Eisemann, Elmar and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, ISBN = {978-1-4503-0210-4}, DOI = {10.1145/1833349.1778850}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {29}, NUMBER = {4}, PAGES = {1--8}, EID = {113}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2010}, EDITOR = {Hoppe, Hugues}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Eisemann, Elmar %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Display Resolution Enhancement for Moving Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1734-0 %F EDOC: 537269 %R 10.1145/1833349.1778850 %7 2010 %D 2010 %J ACM Transactions on Graphics %O TOG %V 29 %N 4 %& 1 %P 1 - 8 %Z sequence number: 113 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2010 %O ACM SIGGRAPH 2010 Los Angeles, CA ; [25-29 July 2010] %@ 978-1-4503-0210-4
Didyk, P., Eisemann, E., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2010c. Perceptually-motivated Real-time Temporal Upsampling of 3D Content for High-refresh-rate Displays. Computer Graphics Forum (Proc. EUROGRAPHICS 2010)29, 2.
Abstract
High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays.
Export
BibTeX
@article{Didyk2010, TITLE = {Perceptually-motivated Real-time Temporal Upsampling of {3D} Content for High-refresh-rate Displays}, AUTHOR = {Didyk, Piotr and Eisemann, Elmar and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01641.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays.}, JOURNAL = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, VOLUME = {29}, NUMBER = {2}, PAGES = {713--722}, BOOKTITLE = {EUROGRAPHICS 2010}, EDITOR = {Akenine-M{\"o}ller, Tomas and Zwicker, Matthias}, }
Endnote
%0 Journal Article %A Didyk, Piotr %A Eisemann, Elmar %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually-motivated Real-time Temporal Upsampling of 3D Content for High-refresh-rate Displays : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1778-7 %F EDOC: 537284 %R 10.1111/j.1467-8659.2009.01641.x %7 2010 %D 2010 %X High-refresh-rate displays (e.\,g.,~120\,Hz) have recently become available on the consumer market and quickly gain on popularity. One of their aims is to reduce the perceived blur created by moving objects that are tracked by the human eye. However, an improvement is only achieved if the video stream is produced at the same high refresh rate (i.\,e.~120\,Hz). Some devices, such as LCD~TVs, solve this problem by converting low-refresh-rate content (i.\,e.~50\,Hz~PAL) into a higher temporal resolution (i.\,e.~200\,Hz) based on two-dimensional optical flow. In our approach, we will show how rendered three-dimensional images produced by recent graphics hardware can be up-sampled more efficiently resulting in higher quality at the same time. Our algorithm relies on several perceptual findings and preserves the naturalness of the original sequence. A psychophysical study validates our approach and illustrates that temporally up-sampled video streams are preferred over the standard low-rate input by the majority of users. We show that our solution improves task performance on high-refresh-rate displays. %J Computer Graphics Forum %V 29 %N 2 %& 713 %P 713 - 722 %I Blackwell %C Oxford, UK %@ false %B EUROGRAPHICS 2010 %O EUROGRAPHICS 2010 The European Association for Computer Graphics 31st Annual Conference ; Norrk&#246;ping, Sweden, May3rd - 7th, 2010 EG 2010
Havran, V., Filip, J., and Myszkowski, K. 2010. Bidirectional Texture Function Compression based on the Multilevel Vector Quantization. Computer Graphics Forum29, 1.
Export
BibTeX
@article{Havran2010CGF, TITLE = {Bidirectional Texture Function Compression based on the Multilevel Vector Quantization}, AUTHOR = {Havran, Vlastimil and Filip, Jiri and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2009.01585.x}, PUBLISHER = {Blackwell}, ADDRESS = {Oxford, UK}, YEAR = {2010}, DATE = {2010}, JOURNAL = {Computer Graphics Forum}, VOLUME = {29}, NUMBER = {1}, PAGES = {175--190}, }
Endnote
%0 Journal Article %A Havran, Vlastimil %A Filip, Jiri %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T Bidirectional Texture Function Compression based on the Multilevel Vector Quantization : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-173F-9 %F EDOC: 537312 %R 10.1111/j.1467-8659.2009.01585.x %7 2010 %D 2010 %* Review method: peer-reviewed %J Computer Graphics Forum %V 29 %N 1 %& 175 %P 175 - 190 %I Blackwell %C Oxford, UK %@ false
Herzog, R., Eisemann, E., Myszkowski, K., and Seidel, H.-P. 2010. Spatio-Temporal Upsampling on the GPU. Proceedings I3D 2010, ACM.
Abstract
Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result.
Export
BibTeX
@inproceedings{HerzogI3D2010, TITLE = {Spatio-Temporal Upsampling on the {GPU}}, AUTHOR = {Herzog, Robert and Eisemann, Elmar and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-60558-939-8}, DOI = {10.1145/1730804.1730819}, PUBLISHER = {ACM}, YEAR = {2010}, DATE = {2010}, ABSTRACT = {Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result.}, BOOKTITLE = {Proceedings I3D 2010}, EDITOR = {Varshney, Amitabh and Wyman, Chris and Aliaga, Daniel and Oliveira, Manuel M.}, PAGES = {91--98}, ADDRESS = {Washington DC, USA}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Eisemann, Elmar %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-Temporal Upsampling on the GPU : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-178C-C %F EDOC: 537285 %R 10.1145/1730804.1730819 %D 2010 %B ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games %Z date of event: 2010-02-19 - 2010-02-21 %C Washington DC, USA %X Pixel processing is becoming increasingly expensive for real-time applications due to the complexity of today's shaders and high-resolution framebuffers. However, most shading results are spatially or temporally coherent, which allows for sparse sampling and reuse of neighboring pixel values. This paper proposes a simple framework for spatio-temporal upsampling on modern GPUs. In contrast to previous work, which focuses either on temporal or spatial processing on the GPU, we exploit coherence in both. Our algorithm combines adaptive motion-compensated filtering over time and geometry-aware upsampling in image space. It is robust with respect to high-frequency temporal changes, and achieves substantial performance improvements by limiting the number of recomputed samples per frame. At the same time, we increase the quality of spatial upsampling by recovering missing information from previous frames. This temporal strategy also allows us to ensure that the image converges to a higher quality result. %B Proceedings I3D 2010 %E Varshney, Amitabh; Wyman, Chris; Aliaga, Daniel; Oliveira, Manuel M. %P 91 - 98 %I ACM %@ 978-1-60558-939-8
Pajak, D., Čadík, M., Aydin, T.O., Myszkowski, K., and Seidel, H.-P. 2010a. Visual Maladaptation in Contrast Domain. Human Vision and Electronic Imaging XV (HVEI 2010), SPIE.
Export
BibTeX
@inproceedings{Pajak2010, TITLE = {Visual Maladaptation in Contrast Domain}, AUTHOR = {Pajak, Dawid and {\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {9780819479204}, DOI = {10.1117/12.844934}, PUBLISHER = {SPIE}, YEAR = {2010}, DATE = {2010}, BOOKTITLE = {Human Vision and Electronic Imaging XV (HVEI 2010)}, EDITOR = {Rogowitz, Bernice and Pappas, Thrasyvoulous N.}, PAGES = {1--12}, EID = {752710}, SERIES = {Proceedings of SPIE}, VOLUME = {2527}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Pajak, Dawid %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visual Maladaptation in Contrast Domain : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-179C-6 %F EDOC: 537311 %R 10.1117/12.844934 %D 2010 %B Human Vision and Electronic Imaging XV %Z date of event: 2010-01-18 - 2010-01-21 %C San Jose, CA, USA %B Human Vision and Electronic Imaging XV %E Rogowitz, Bernice; Pappas, Thrasyvoulous N. %P 1 - 12 %Z sequence number: 752710 %I SPIE %@ 9780819479204 %B Proceedings of SPIE %N 2527
Pajak, D., Čadík, M., Aydin, T.O., Okabe, M., Myszkowski, K., and Seidel, H.-P. 2010b. Contrast Prescription for Multiscale Image Editing. The Visual Computer26, 6.
Export
BibTeX
@article{Cadik2010, TITLE = {Contrast Prescription for Multiscale Image Editing}, AUTHOR = {Pajak, Dawid and {\v C}ad{\'i}k, Martin and Aydin, Tunc Ozan and Okabe, Makoto and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0178-2789}, DOI = {10.1007/s00371-010-0485-3}, PUBLISHER = {Springer}, ADDRESS = {New York, NY}, YEAR = {2010}, DATE = {2010}, JOURNAL = {The Visual Computer}, VOLUME = {26}, NUMBER = {6}, PAGES = {739--748}, }
Endnote
%0 Journal Article %A Pajak, Dawid %A &#268;ad&#237;k, Martin %A Aydin, Tunc Ozan %A Okabe, Makoto %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Contrast Prescription for Multiscale Image Editing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1748-4 %F EDOC: 537310 %R 10.1007/s00371-010-0485-3 %7 2010 %D 2010 %* Review method: peer-reviewed %J The Visual Computer %V 26 %N 6 %& 739 %P 739 - 748 %I Springer %C New York, NY %@ false
Reinhard, E., Ward, G., Pattanaik, S., Debevec, P., Heidrich, W., and Myszkowski, K., eds. 2010. High Dynamic Range Imaging: Acquisition, Display, and Image-based Lighting. Elsevier (Morgan Kaufmann), Burlington, MA.
Export
BibTeX
@book{HDRtextBook2010, TITLE = {High Dynamic Range Imaging: Acquisition, Display, and Image-based Lighting}, EDITOR = {Reinhard, Erik and Ward, Greg and Pattanaik, Summant and Debevec, Paul and Heidrich, Wolfgang and Myszkowski, Karol}, LANGUAGE = {eng}, ISBN = {978-0-12-374914-7}, PUBLISHER = {Elsevier (Morgan Kaufmann)}, ADDRESS = {Burlington, MA}, EDITION = {2. ed.}, YEAR = {2010}, DATE = {2010}, PAGES = {XVIII, 650 p.}, }
Endnote
%0 Edited Book %A Reinhard, Erik %A Ward, Greg %A Pattanaik, Summant %A Debevec, Paul %A Heidrich, Wolfgang %A Myszkowski, Karol %+ External Organizations External Organizations External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Imaging: Acquisition, Display, and Image-based Lighting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1757-2 %F EDOC: 537309 %@ 978-0-12-374914-7 %I Elsevier (Morgan Kaufmann) %C Burlington, MA %7 2. ed. %D 2010 %P XVIII, 650 p.
2009
Aydin, T.O., Myszkowski, K., and Seidel, H.-P. 2009. Predicting Display Visibility Under Dynamically Changing Lighting Conditions. Computer Graphics Forum (Proc. Eurographics)28, 2.
Abstract
Display devices, more than ever, are finding their ways into electronic consumer goods as a result of recent trends in providing more functionality and user interaction. Combined with the new developments in display technology towards higher reproducible luminance range, the mobility and variation in capability of display devices are constantly increasing. Consequently, in real life usage it is now very likely that the display emission to be distorted by spatially and temporally varying reflections, and the observer's visual system to be not adapted to the particular display that she is viewing at that moment. The actual perception of the display content cannot be fully understood by only considering steady-state illumination and adaptation conditions. We propose an objective method for display visibility analysis formulating the problem as a full-reference image quality assessment problem, where the display emission under ``ideal'' conditions is used as the reference for real-life conditions. Our work includes a human visual system model that accounts for maladaptation and temporal recovery of sensitivity. As an example application we integrate our method to a global illumination simulator and analyze the visibility of a car interior display under realistic lighting conditions.
Export
BibTeX
@article{Tunc2009EG, TITLE = {Predicting Display Visibility Under Dynamically Changing Lighting Conditions}, AUTHOR = {Aydin, Tunc O. and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-33AE0A5CE1E47467C125755C00347B6E-Tunc2009EG}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Display devices, more than ever, are finding their ways into electronic consumer goods as a result of recent trends in providing more functionality and user interaction. Combined with the new developments in display technology towards higher reproducible luminance range, the mobility and variation in capability of display devices are constantly increasing. Consequently, in real life usage it is now very likely that the display emission to be distorted by spatially and temporally varying reflections, and the observer's visual system to be not adapted to the particular display that she is viewing at that moment. The actual perception of the display content cannot be fully understood by only considering steady-state illumination and adaptation conditions. We propose an objective method for display visibility analysis formulating the problem as a full-reference image quality assessment problem, where the display emission under ``ideal'' conditions is used as the reference for real-life conditions. Our work includes a human visual system model that accounts for maladaptation and temporal recovery of sensitivity. As an example application we integrate our method to a global illumination simulator and analyze the visibility of a car interior display under realistic lighting conditions.}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics)}, VOLUME = {28}, NUMBER = {2}, PAGES = {173--182}, }
Endnote
%0 Journal Article %A Aydin, Tunc O. %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Predicting Display Visibility Under Dynamically Changing Lighting Conditions : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19CB-C %F EDOC: 520442 %F OTHER: Local-ID: C125675300671F7B-33AE0A5CE1E47467C125755C00347B6E-Tunc2009EG %D 2009 %* Review method: peer-reviewed %X Display devices, more than ever, are finding their ways into electronic consumer goods as a result of recent trends in providing more functionality and user interaction. Combined with the new developments in display technology towards higher reproducible luminance range, the mobility and variation in capability of display devices are constantly increasing. Consequently, in real life usage it is now very likely that the display emission to be distorted by spatially and temporally varying reflections, and the observer's visual system to be not adapted to the particular display that she is viewing at that moment. The actual perception of the display content cannot be fully understood by only considering steady-state illumination and adaptation conditions. We propose an objective method for display visibility analysis formulating the problem as a full-reference image quality assessment problem, where the display emission under ``ideal'' conditions is used as the reference for real-life conditions. Our work includes a human visual system model that accounts for maladaptation and temporal recovery of sensitivity. As an example application we integrate our method to a global illumination simulator and analyze the visibility of a car interior display under realistic lighting conditions. %J Computer Graphics Forum (Proc. Eurographics) %V 28 %N 2 %& 173 %P 173 - 182
Banterle, F., Debattista, K., Artusi, A., et al. 2009. High Dynamic Range Imaging and LDR Expansion for Generating HDR Content. EUROGRAPHICS State-of-the-Art Report, Eurographics.
Abstract
In the last few years researches in the field of High Dynamic Range (HDR) Imaging have focused on providing tools for expanding LDR content for the generation of HDR images due to the growing popularity of HDR in applications, such as photography and rendering via Image-Based Lighting, and the imminent arrival of HDR displays to the consumer market. LDR content expansion is required due to the lack of fast and reliable consumer level HDR capture for still images and videos. Furthermore, LDR content expansion, will allow the re-use of legacy LDR stills, videos and LDR applications created, over the last century and more, to be widely available. The use of certain LDR expansion methods, those that are based on the inversion of tone mapping operators, has made it possible to create novel compression algorithms that tackle the problem of the size of HDR content storage, which remains one of the major obstacles to be overcome for the adoption of HDR. These methods are used in conjunction with traditional LDR compression methods and can evolve accordingly. The goal of this report is to provide a comprehensive overview on HDR Imaging, and an in depth review on these emerging topics. Moreover, we are proposing how to classify and to validate them. We will discuss limitations of these methods, and identify remaining challenges for the future.
Export
BibTeX
@inproceedings{Banterle2009, TITLE = {High Dynamic Range Imaging and {LDR} Expansion for Generating {HDR} Content}, AUTHOR = {Banterle, Francesco and Debattista, Kurt and Artusi, Alessandro and Pattanaik, Sumanta and Myszkowski, Karol and Ledda, Patrick and Bloj, Marina and Chalmers, Alan}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-C54556685BC86D61C125755C005A9EBC-Banterle2009}, PUBLISHER = {Eurographics}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {In the last few years researches in the field of High Dynamic Range (HDR) Imaging have focused on providing tools for expanding LDR content for the generation of HDR images due to the growing popularity of HDR in applications, such as photography and rendering via Image-Based Lighting, and the imminent arrival of HDR displays to the consumer market. LDR content expansion is required due to the lack of fast and reliable consumer level HDR capture for still images and videos. Furthermore, LDR content expansion, will allow the re-use of legacy LDR stills, videos and LDR applications created, over the last century and more, to be widely available. The use of certain LDR expansion methods, those that are based on the inversion of tone mapping operators, has made it possible to create novel compression algorithms that tackle the problem of the size of HDR content storage, which remains one of the major obstacles to be overcome for the adoption of HDR. These methods are used in conjunction with traditional LDR compression methods and can evolve accordingly. The goal of this report is to provide a comprehensive overview on HDR Imaging, and an in depth review on these emerging topics. Moreover, we are proposing how to classify and to validate them. We will discuss limitations of these methods, and identify remaining challenges for the future.}, BOOKTITLE = {EUROGRAPHICS State-of-the-Art Report}, EDITOR = {Pauly, Marc and Greiner, G{\"u}nther}, PAGES = {17--44}, }
Endnote
%0 Conference Proceedings %A Banterle, Francesco %A Debattista, Kurt %A Artusi, Alessandro %A Pattanaik, Sumanta %A Myszkowski, Karol %A Ledda, Patrick %A Bloj, Marina %A Chalmers, Alan %+ Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Imaging and LDR Expansion for Generating HDR Content : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19B1-4 %F EDOC: 520496 %F OTHER: Local-ID: C125675300671F7B-C54556685BC86D61C125755C005A9EBC-Banterle2009 %I Eurographics %D 2009 %B Untitled Event %Z date of event: 2009-03-30 - 2009-04-03 %C Munich %X In the last few years researches in the field of High Dynamic Range (HDR) Imaging have focused on providing tools for expanding LDR content for the generation of HDR images due to the growing popularity of HDR in applications, such as photography and rendering via Image-Based Lighting, and the imminent arrival of HDR displays to the consumer market. LDR content expansion is required due to the lack of fast and reliable consumer level HDR capture for still images and videos. Furthermore, LDR content expansion, will allow the re-use of legacy LDR stills, videos and LDR applications created, over the last century and more, to be widely available. The use of certain LDR expansion methods, those that are based on the inversion of tone mapping operators, has made it possible to create novel compression algorithms that tackle the problem of the size of HDR content storage, which remains one of the major obstacles to be overcome for the adoption of HDR. These methods are used in conjunction with traditional LDR compression methods and can evolve accordingly. The goal of this report is to provide a comprehensive overview on HDR Imaging, and an in depth review on these emerging topics. Moreover, we are proposing how to classify and to validate them. We will discuss limitations of these methods, and identify remaining challenges for the future. %B EUROGRAPHICS State-of-the-Art Report %E Pauly, Marc; Greiner, G&#252;nther %P 17 - 44 %I Eurographics
Didyk, P., Eisemann, E., Ritschel, T., Myszkowski, K., and Seidel, H.-P. 2009. A Question of Time: Importance and Possibilities of High Refresh-rates. Visual Computing Research Conference, Intel Visual Computing Institute.
Abstract
This work will discuss shortcomings of traditional rendering techniques on today's wide-spread LCD screens. The main observation is that 3D renderings often appear blurred when observed on such a display. Although this might seem to be a shortcoming of the hardware, such blur is actually a consequence of the human visual system perceiving such displays.\\ In this work, we introduce a perception-aware rendering technique that is of very low cost, but significantly improves performance, as well as quality. Especially in conjunction with more recent devices, initially conceived for 3D shutter glasses, our approach achieves significant gains. Besides quality, we show that such approaches even improve task-performance which makes it a crucial component for future interactive applications.
Export
BibTeX
@inproceedings{Didyk2009, TITLE = {A Question of Time: Importance and Possibilities of High Refresh-rates}, AUTHOR = {Didyk, Piotr and Eisemann, Elmar and Ritschel, Tobias and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-6F99D73D0B04CA52C12576B9005417E0-Didyk2009}, PUBLISHER = {Intel Visual Computing Institute}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {This work will discuss shortcomings of traditional rendering techniques on today's wide-spread LCD screens. The main observation is that 3D renderings often appear blurred when observed on such a display. Although this might seem to be a shortcoming of the hardware, such blur is actually a consequence of the human visual system perceiving such displays.\\ In this work, we introduce a perception-aware rendering technique that is of very low cost, but significantly improves performance, as well as quality. Especially in conjunction with more recent devices, initially conceived for 3D shutter glasses, our approach achieves significant gains. Besides quality, we show that such approaches even improve task-performance which makes it a crucial component for future interactive applications.}, BOOKTITLE = {Visual Computing Research Conference}, PAGES = {1--3}, }
Endnote
%0 Conference Proceedings %A Didyk, Piotr %A Eisemann, Elmar %A Ritschel, Tobias %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Question of Time: Importance and Possibilities of High Refresh-rates : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-197D-B %F EDOC: 520455 %F OTHER: Local-ID: C125675300671F7B-6F99D73D0B04CA52C12576B9005417E0-Didyk2009 %I Intel Visual Computing Institute %D 2009 %B Untitled Event %Z date of event: 2009-12-08 - 2009-12-10 %C 8-10 December 2009 %X This work will discuss shortcomings of traditional rendering techniques on today's wide-spread LCD screens. The main observation is that 3D renderings often appear blurred when observed on such a display. Although this might seem to be a shortcoming of the hardware, such blur is actually a consequence of the human visual system perceiving such displays.\\ In this work, we introduce a perception-aware rendering technique that is of very low cost, but significantly improves performance, as well as quality. Especially in conjunction with more recent devices, initially conceived for 3D shutter glasses, our approach achieves significant gains. Besides quality, we show that such approaches even improve task-performance which makes it a crucial component for future interactive applications. %B Visual Computing Research Conference %P 1 - 3 %I Intel Visual Computing Institute
Herzog, R., Myszkowski, K., and Seidel, H.-P. 2009. Anisotropic Radiance-Cache Splatting for Efficiently Computing High-Quality Global Illumination with Lightcuts. Computer Graphics Forum (Proc. EUROGRAPHICS), Wiley-Blackwell.
Abstract
Computing global illumination in complex scenes is even with todays computational power a demanding task. In this work we propose a novel irradiance caching scheme that combines the advantages of two state-of-the-art algorithms for high-quality global illumination rendering: \emph{lightcuts}, an adaptive and hierarchical instant-radiosity based algorithm and the widely used (ir)radiance caching algorithm for sparse sampling and interpolation of (ir)radiance in object space. Our adaptive radiance caching algorithm is based on anisotropic cache splatting, which adapts the cache footprints not only to the magnitude of the illumination gradient computed with lightcuts but also to its orientation allowing larger interpolation errors along the direction of coherent illumination while reducing the error along the illumination gradient. Since lightcuts computes the direct and indirect lighting seamlessly, we use a two-layer radiance cache, to store and control the interpolation of direct and indirect lighting individually with different error criteria. In multiple iterations our method detects cache interpolation errors above the visibility threshold of a pixel and reduces the anisotropic cache footprints accordingly. We achieve significantly better image quality while also speeding up the computation costs by one to two orders of magnitude with respect to the well-known photon mapping with (ir)radiance caching procedure.
Export
BibTeX
@inproceedings{Herzog2008, TITLE = {Anisotropic Radiance-Cache Splatting for Efficiently Computing High-Quality Global Illumination with Lightcuts}, AUTHOR = {Herzog, Robert and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-56F88E3A387C52D6C12575550034E328-Herzog2008}, PUBLISHER = {Wiley-Blackwell}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Computing global illumination in complex scenes is even with todays computational power a demanding task. In this work we propose a novel irradiance caching scheme that combines the advantages of two state-of-the-art algorithms for high-quality global illumination rendering: \emph{lightcuts}, an adaptive and hierarchical instant-radiosity based algorithm and the widely used (ir)radiance caching algorithm for sparse sampling and interpolation of (ir)radiance in object space. Our adaptive radiance caching algorithm is based on anisotropic cache splatting, which adapts the cache footprints not only to the magnitude of the illumination gradient computed with lightcuts but also to its orientation allowing larger interpolation errors along the direction of coherent illumination while reducing the error along the illumination gradient. Since lightcuts computes the direct and indirect lighting seamlessly, we use a two-layer radiance cache, to store and control the interpolation of direct and indirect lighting individually with different error criteria. In multiple iterations our method detects cache interpolation errors above the visibility threshold of a pixel and reduces the anisotropic cache footprints accordingly. We achieve significantly better image quality while also speeding up the computation costs by one to two orders of magnitude with respect to the well-known photon mapping with (ir)radiance caching procedure.}, BOOKTITLE = {Computer Graphics Forum (Proc. EUROGRAPHICS)}, EDITOR = {Stamminger, Marc and Dutr{\'e}, Philip}, PAGES = {259--268}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Anisotropic Radiance-Cache Splatting for Efficiently Computing High-Quality Global Illumination with Lightcuts : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1972-1 %F EDOC: 520500 %F OTHER: Local-ID: C125675300671F7B-56F88E3A387C52D6C12575550034E328-Herzog2008 %I Wiley-Blackwell %D 2009 %B Untitled Event %Z date of event: 2009-03-30 - 2009-04-03 %C M&#252;nchen, Germany %X Computing global illumination in complex scenes is even with todays computational power a demanding task. In this work we propose a novel irradiance caching scheme that combines the advantages of two state-of-the-art algorithms for high-quality global illumination rendering: \emph{lightcuts}, an adaptive and hierarchical instant-radiosity based algorithm and the widely used (ir)radiance caching algorithm for sparse sampling and interpolation of (ir)radiance in object space. Our adaptive radiance caching algorithm is based on anisotropic cache splatting, which adapts the cache footprints not only to the magnitude of the illumination gradient computed with lightcuts but also to its orientation allowing larger interpolation errors along the direction of coherent illumination while reducing the error along the illumination gradient. Since lightcuts computes the direct and indirect lighting seamlessly, we use a two-layer radiance cache, to store and control the interpolation of direct and indirect lighting individually with different error criteria. In multiple iterations our method detects cache interpolation errors above the visibility threshold of a pixel and reduces the anisotropic cache footprints accordingly. We achieve significantly better image quality while also speeding up the computation costs by one to two orders of magnitude with respect to the well-known photon mapping with (ir)radiance caching procedure. %B Computer Graphics Forum (Proc. EUROGRAPHICS) %E Stamminger, Marc; Dutr&#233;, Philip %P 259 - 268 %I Wiley-Blackwell
Ihrke, M., Ritschel, T., Smith, K., Grosch, T., Myszkowski, K., and Seidel, H.-P. 2009. A Perceptual Evaluation of 3D Unsharp Masking. Human Vision and Electronic Imaging XIV, IS\&T/SPIE’s 21st Annual Symposium on Electronic Imaging, SPIE.
Abstract
Much research has gone into developing methods for enhancing the contrast of displayed 3D scenes. In the current study, we investigated the perceptual impact of an algorithm recently proposed by Ritschel et al.1 that provides a general technique for enhancing the perceived contrast in synthesized scenes. Their algorithm extends traditional image-based Unsharp Masking to a 3D scene, achieving a scene-coherent enhancement. We conducted a standardized perceptual experiment to test the proposition that a 3D unsharp enhanced scene was superior to the original scene in terms of perceived contrast and preference. Furthermore, the impact of different settings of the algorithm’s main parameters enhancement-strength (¸) and gradient size (¾) were studied in order to provide an estimate of a reasonable parameter space for the method. All participants preferred a clearly visible enhancement over the original, non-enhanced scenes and the setting for objectionable enhancement was far above the preferred settings. The effect of the gradient size ¾ was negligible. The general pattern found for the parameters provides a useful guideline for designers when making use of 3D Unsharp Masking: as a rule of thumb they can easily determine the strength for which they start to perceive an enhancement and use twice this value for a good effect. Since the value for objectionable results was twice as large again, artifacts should not impose restrictions on the applicability of this rule.
Export
BibTeX
@inproceedings{Ihrke2009SPIE, TITLE = {A Perceptual Evaluation of {3D} Unsharp Masking}, AUTHOR = {Ihrke, Matthias and Ritschel, Tobias and Smith, Kaleigh and Grosch, Thorsten and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {doi:10.1117/12.809026}, LOCALID = {Local-ID: C125675300671F7B-5AB79508CF9875C4C125755C0035BB4C-Ihrke2009SPIE}, PUBLISHER = {SPIE}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Much research has gone into developing methods for enhancing the contrast of displayed 3D scenes. In the current study, we investigated the perceptual impact of an algorithm recently proposed by Ritschel et al.1 that provides a general technique for enhancing the perceived contrast in synthesized scenes. Their algorithm extends traditional image-based Unsharp Masking to a 3D scene, achieving a scene-coherent enhancement. We conducted a standardized perceptual experiment to test the proposition that a 3D unsharp enhanced scene was superior to the original scene in terms of perceived contrast and preference. Furthermore, the impact of different settings of the algorithm{\textquoteright}s main parameters enhancement-strength (&#184;) and gradient size (&#190;) were studied in order to provide an estimate of a reasonable parameter space for the method. All participants preferred a clearly visible enhancement over the original, non-enhanced scenes and the setting for objectionable enhancement was far above the preferred settings. The effect of the gradient size &#190; was negligible. The general pattern found for the parameters provides a useful guideline for designers when making use of 3D Unsharp Masking: as a rule of thumb they can easily determine the strength for which they start to perceive an enhancement and use twice this value for a good effect. Since the value for objectionable results was twice as large again, artifacts should not impose restrictions on the applicability of this rule.}, BOOKTITLE = {Human Vision and Electronic Imaging XIV, IS{\textbackslash}\&T/SPIE's 21st Annual Symposium on Electronic Imaging}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N.}, PAGES = {72400R--1-12}, SERIES = {Annual Symposium on Electronic Imaging}, }
Endnote
%0 Conference Proceedings %A Ihrke, Matthias %A Ritschel, Tobias %A Smith, Kaleigh %A Grosch, Thorsten %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Evaluation of 3D Unsharp Masking : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1975-C %F EDOC: 520494 %R doi:10.1117/12.809026 %F OTHER: Local-ID: C125675300671F7B-5AB79508CF9875C4C125755C0035BB4C-Ihrke2009SPIE %I SPIE %D 2009 %B Untitled Event %Z date of event: 2009-01-19 - 2009-01-22 %C San Jose, USA %X Much research has gone into developing methods for enhancing the contrast of displayed 3D scenes. In the current study, we investigated the perceptual impact of an algorithm recently proposed by Ritschel et al.1 that provides a general technique for enhancing the perceived contrast in synthesized scenes. Their algorithm extends traditional image-based Unsharp Masking to a 3D scene, achieving a scene-coherent enhancement. We conducted a standardized perceptual experiment to test the proposition that a 3D unsharp enhanced scene was superior to the original scene in terms of perceived contrast and preference. Furthermore, the impact of different settings of the algorithm&#8217;s main parameters enhancement-strength (&#184;) and gradient size (&#190;) were studied in order to provide an estimate of a reasonable parameter space for the method. All participants preferred a clearly visible enhancement over the original, non-enhanced scenes and the setting for objectionable enhancement was far above the preferred settings. The effect of the gradient size &#190; was negligible. The general pattern found for the parameters provides a useful guideline for designers when making use of 3D Unsharp Masking: as a rule of thumb they can easily determine the strength for which they start to perceive an enhancement and use twice this value for a good effect. Since the value for objectionable results was twice as large again, artifacts should not impose restrictions on the applicability of this rule. %B Human Vision and Electronic Imaging XIV, IS\&T/SPIE's 21st Annual Symposium on Electronic Imaging %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N. %P 72400R - 1-12 %I SPIE %B Annual Symposium on Electronic Imaging
Ritschel, T., Ihrke, M., Frisvad, J.R., Coppens, J., Myszkowski, K., and Seidel, H.-P. 2009. Temporal Glare: Real-Time Dynamic Simulation of the Scattering in the Human Eye. Computer Graphics Forum (Proc. Eurographics 2009)28, 3.
Abstract
Glare is a consequence of light scattered within the human eye when looking at bright light sources. This effect can be exploited for tone mapping since adding glare to the depiction of high-dynamic range (HDR) imagery on a low-dynamic range (LDR) medium can dramatically increase perceived contrast. Even though most, if not all, subjects report perceiving glare as a bright pattern that fluctuates in time, up to now it has only been modeled as a static phenomenon. We argue that the temporal properties of glare are a strong means to increase perceived brightness and to produce realistic and attractive renderings of bright light sources. Based on the anatomy of the human eye, we propose a model that enables real-time simulation of dynamic glare on a GPU. This allows an improved depiction of HDR images on LDR media for interactive applications like games, feature films, or even by adding movement to initially static HDR images. By conducting psychophysical studies, we validate that our method improves perceived brightness and that dynamic glare-renderings are often perceived as more attractive depending on the chosen scene.
Export
BibTeX
@article{Ritschel2009EG, TITLE = {Temporal Glare: Real-Time Dynamic Simulation of the Scattering in the Human Eye}, AUTHOR = {Ritschel, Tobias and Ihrke, Matthias and Frisvad, Jeppe Revall and Coppens, Joris and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-C0AF37EF8D7C4059C125755C00337FD6-Ritschel2009EG}, YEAR = {2009}, DATE = {2009}, ABSTRACT = {Glare is a consequence of light scattered within the human eye when looking at bright light sources. This effect can be exploited for tone mapping since adding glare to the depiction of high-dynamic range (HDR) imagery on a low-dynamic range (LDR) medium can dramatically increase perceived contrast. Even though most, if not all, subjects report perceiving glare as a bright pattern that fluctuates in time, up to now it has only been modeled as a static phenomenon. We argue that the temporal properties of glare are a strong means to increase perceived brightness and to produce realistic and attractive renderings of bright light sources. Based on the anatomy of the human eye, we propose a model that enables real-time simulation of dynamic glare on a GPU. This allows an improved depiction of HDR images on LDR media for interactive applications like games, feature films, or even by adding movement to initially static HDR images. By conducting psychophysical studies, we validate that our method improves perceived brightness and that dynamic glare-renderings are often perceived as more attractive depending on the chosen scene.}, JOURNAL = {Computer Graphics Forum (Proc. Eurographics 2009)}, VOLUME = {28}, NUMBER = {3}, PAGES = {183--192}, }
Endnote
%0 Journal Article %A Ritschel, Tobias %A Ihrke, Matthias %A Frisvad, Jeppe Revall %A Coppens, Joris %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Temporal Glare: Real-Time Dynamic Simulation of the Scattering in the Human Eye : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-19E7-E %F EDOC: 520489 %F OTHER: Local-ID: C125675300671F7B-C0AF37EF8D7C4059C125755C00337FD6-Ritschel2009EG %D 2009 %* Review method: peer-reviewed %X Glare is a consequence of light scattered within the human eye when looking at bright light sources. This effect can be exploited for tone mapping since adding glare to the depiction of high-dynamic range (HDR) imagery on a low-dynamic range (LDR) medium can dramatically increase perceived contrast. Even though most, if not all, subjects report perceiving glare as a bright pattern that fluctuates in time, up to now it has only been modeled as a static phenomenon. We argue that the temporal properties of glare are a strong means to increase perceived brightness and to produce realistic and attractive renderings of bright light sources. Based on the anatomy of the human eye, we propose a model that enables real-time simulation of dynamic glare on a GPU. This allows an improved depiction of HDR images on LDR media for interactive applications like games, feature films, or even by adding movement to initially static HDR images. By conducting psychophysical studies, we validate that our method improves perceived brightness and that dynamic glare-renderings are often perceived as more attractive depending on the chosen scene. %J Computer Graphics Forum (Proc. Eurographics 2009) %V 28 %N 3 %& 183 %P 183 - 192
2008
Aydin, T.O., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2008. Dynamic Range Independent Image Quality Assessment. ACM Transactions on Graphics (Proc. ACM SIGGRAPH 2008)27, 3.
Abstract
The diversity of display technologies and introduction of high dynamic range imagery introduces the necessity of comparing images of radically different dynamic ranges. Current quality assessment metrics are not suitable for this task, as they assume that both reference and test images have the same dynamic range. Image fidelity measures employed by a majority of current metrics, based on the difference of pixel intensity or contrast values between test and reference images, result in meaningless predictions if this assumption does not hold. We present a novel image quality metric capable of operating on an image pair where both images have arbitrary dynamic ranges. Our metric utilizes a model of the human visual system, and its central idea is a new definition of visible distortion based on the detection and classification of visible changes in the image structure. Our metric is carefully calibrated and its performance is validated through perceptual experiments. We demonstrate possible applications of our metric to the evaluation of direct and inverse tone mapping operators as well as the analysis of the image appearance on displays with various characteristics.
Export
BibTeX
@article{Tunc08SG, TITLE = {Dynamic Range Independent Image Quality Assessment}, AUTHOR = {Aydin, Tunc Ozan and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, URL = {http://doi.acm.org/10.1145/1360612.1360668}, DOI = {10.1145/1360612.1360668}, LOCALID = {Local-ID: C125756E0038A185-155666108816CD9DC12574C500543902-Tunc08SG}, PUBLISHER = {ACM}, ADDRESS = {New York, NY}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {The diversity of display technologies and introduction of high dynamic range imagery introduces the necessity of comparing images of radically different dynamic ranges. Current quality assessment metrics are not suitable for this task, as they assume that both reference and test images have the same dynamic range. Image fidelity measures employed by a majority of current metrics, based on the difference of pixel intensity or contrast values between test and reference images, result in meaningless predictions if this assumption does not hold. We present a novel image quality metric capable of operating on an image pair where both images have arbitrary dynamic ranges. Our metric utilizes a model of the human visual system, and its central idea is a new definition of visible distortion based on the detection and classification of visible changes in the image structure. Our metric is carefully calibrated and its performance is validated through perceptual experiments. We demonstrate possible applications of our metric to the evaluation of direct and inverse tone mapping operators as well as the analysis of the image appearance on displays with various characteristics.}, JOURNAL = {ACM Transactions on Graphics (Proc. ACM SIGGRAPH)}, VOLUME = {27}, NUMBER = {3}, PAGES = {1--10}, EID = {69}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2008}, EDITOR = {Turk, Greg}, }
Endnote
%0 Journal Article %A Aydin, Tunc Ozan %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Dynamic Range Independent Image Quality Assessment : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B77-7 %F EDOC: 427982 %R 10.1145/1360612.1360668 %U http://doi.acm.org/10.1145/1360612.1360668 %F OTHER: Local-ID: C125756E0038A185-155666108816CD9DC12574C500543902-Tunc08SG %D 2008 %X The diversity of display technologies and introduction of high dynamic range imagery introduces the necessity of comparing images of radically different dynamic ranges. Current quality assessment metrics are not suitable for this task, as they assume that both reference and test images have the same dynamic range. Image fidelity measures employed by a majority of current metrics, based on the difference of pixel intensity or contrast values between test and reference images, result in meaningless predictions if this assumption does not hold. We present a novel image quality metric capable of operating on an image pair where both images have arbitrary dynamic ranges. Our metric utilizes a model of the human visual system, and its central idea is a new definition of visible distortion based on the detection and classification of visible changes in the image structure. Our metric is carefully calibrated and its performance is validated through perceptual experiments. We demonstrate possible applications of our metric to the evaluation of direct and inverse tone mapping operators as well as the analysis of the image appearance on displays with various characteristics. %J ACM Transactions on Graphics %V 27 %N 3 %& 1 %P 1 - 10 %Z sequence number: 69 %I ACM %C New York, NY %@ false %B Proceedings of ACM SIGGRAPH 2008 %O ACM SIGGRAPH 2008 Los Angeles, CA
Creem-Regehr, S. and Myszkowski, K., eds. 2008. Symposium on Applied Perception in Graphics and Visualization : proceedings APGV 2008. ACM.
Export
BibTeX
@proceedings{Myszkowski2008APGV, TITLE = {Symposium on Applied Perception in Graphics and Visualization : proceedings APGV 2008}, EDITOR = {Creem-Regehr, Sarah and Myszkowski, Karol}, LANGUAGE = {eng}, ISBN = {978-1-59593-981-4}, LOCALID = {Local-ID: C125756E0038A185-8AFC12175FDF8F80C12574C500637171-Myszkowski2008APGV}, PUBLISHER = {ACM}, YEAR = {2008}, DATE = {2008}, PAGES = {157}, }
Endnote
%0 Conference Proceedings %E Creem-Regehr, Sarah %E Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Symposium on Applied Perception in Graphics and Visualization : proceedings APGV 2008 : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1D1A-D %F EDOC: 428021 %@ 978-1-59593-981-4 %F OTHER: Local-ID: C125756E0038A185-8AFC12175FDF8F80C12574C500637171-Myszkowski2008APGV %I ACM %D 2008 %B Untitled Event %Z date of event: 2008-08-09 - 2008-08-10 %D 2008 %C Los Angeles, CA %P 157
Herzog, R., Kinuwaki, S., Myszkowski, K., and Seidel, H.-P. 2008. Render2MPEG: A Perception-based Framework Towards Integrating Rendering and Video Compression. The European Association for Computer Graphics 29th Annual Conference, EUROGRAPHICS 2008, Blackwell.
Abstract
Currently 3D animation rendering and video compression are completely independent processes even if rendered frames are streamed on-the-fly within a client-server platform. In such scenario, which may involve time-varying transmission bandwidths and different display characteristics at the client side, dynamic adjustment of the rendering quality to such requirements can lead to a better use of server resources. In this work, we present a framework where the renderer and MPEG codec are coupled through a straightforward interface that provides precise motion vectors from the rendering side to the codec and perceptual error thresholds for each pixel in the opposite direction. The perceptual error thresholds take into account bandwidth-dependent quantization errors resulting from the lossy compression as well as image content-dependent luminance and spatial contrast masking. The availability of the discrete cosine transform (DCT) coefficients at the codec side enables to use advanced models of the human visual system (HVS) in the perceptual error threshold derivation without incurring any significant cost. Those error thresholds are then used to control the rendering quality and make it well aligned with the compressed stream quality. In our prototype system we use the lightcuts technique developed by Walter et al., which we enhance to handle dynamic image sequences, and an MPEG-2 implementation. Our results clearly demonstrate many advantages of coupling the rendering with video compression in terms of faster rendering. Furthermore, temporally coherent rendering leads to a reduction of temporal artifacts.
Export
BibTeX
@inproceedings{Herzog08EG, TITLE = {{Render2MPEG}: A Perception-based Framework Towards Integrating Rendering and Video Compression}, AUTHOR = {Herzog, Robert and Kinuwaki, Shinichi and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://dx.doi.org/10.1111/j.1467-8659.2008.01115.x}, DOI = {10.1111/j.1467-8659.2008.01115.x}, LOCALID = {Local-ID: C125756E0038A185-3B410E71DC037794C12574C5005576A5-Herzog08EG}, PUBLISHER = {Blackwell}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {Currently 3D animation rendering and video compression are completely independent processes even if rendered frames are streamed on-the-fly within a client-server platform. In such scenario, which may involve time-varying transmission bandwidths and different display characteristics at the client side, dynamic adjustment of the rendering quality to such requirements can lead to a better use of server resources. In this work, we present a framework where the renderer and MPEG codec are coupled through a straightforward interface that provides precise motion vectors from the rendering side to the codec and perceptual error thresholds for each pixel in the opposite direction. The perceptual error thresholds take into account bandwidth-dependent quantization errors resulting from the lossy compression as well as image content-dependent luminance and spatial contrast masking. The availability of the discrete cosine transform (DCT) coefficients at the codec side enables to use advanced models of the human visual system (HVS) in the perceptual error threshold derivation without incurring any significant cost. Those error thresholds are then used to control the rendering quality and make it well aligned with the compressed stream quality. In our prototype system we use the lightcuts technique developed by Walter et al., which we enhance to handle dynamic image sequences, and an MPEG-2 implementation. Our results clearly demonstrate many advantages of coupling the rendering with video compression in terms of faster rendering. Furthermore, temporally coherent rendering leads to a reduction of temporal artifacts.}, BOOKTITLE = {The European Association for Computer Graphics 29th Annual Conference, EUROGRAPHICS 2008}, EDITOR = {Drettakis, George and Scopigno, Roberto}, PAGES = {183--192}, SERIES = {Computer Graphics Forum}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Kinuwaki, Shinichi %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Render2MPEG: A Perception-based Framework Towards Integrating Rendering and Video Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CD2-8 %F EDOC: 428103 %R 10.1111/j.1467-8659.2008.01115.x %U http://dx.doi.org/10.1111/j.1467-8659.2008.01115.x %F OTHER: Local-ID: C125756E0038A185-3B410E71DC037794C12574C5005576A5-Herzog08EG %I Blackwell %D 2008 %B Untitled Event %Z date of event: 2008-04-14 - 2008-04-14 %C Crete, Greece %X Currently 3D animation rendering and video compression are completely independent processes even if rendered frames are streamed on-the-fly within a client-server platform. In such scenario, which may involve time-varying transmission bandwidths and different display characteristics at the client side, dynamic adjustment of the rendering quality to such requirements can lead to a better use of server resources. In this work, we present a framework where the renderer and MPEG codec are coupled through a straightforward interface that provides precise motion vectors from the rendering side to the codec and perceptual error thresholds for each pixel in the opposite direction. The perceptual error thresholds take into account bandwidth-dependent quantization errors resulting from the lossy compression as well as image content-dependent luminance and spatial contrast masking. The availability of the discrete cosine transform (DCT) coefficients at the codec side enables to use advanced models of the human visual system (HVS) in the perceptual error threshold derivation without incurring any significant cost. Those error thresholds are then used to control the rendering quality and make it well aligned with the compressed stream quality. In our prototype system we use the lightcuts technique developed by Walter et al., which we enhance to handle dynamic image sequences, and an MPEG-2 implementation. Our results clearly demonstrate many advantages of coupling the rendering with video compression in terms of faster rendering. Furthermore, temporally coherent rendering leads to a reduction of temporal artifacts. %B The European Association for Computer Graphics 29th Annual Conference, EUROGRAPHICS 2008 %E Drettakis, George; Scopigno, Roberto %P 183 - 192 %I Blackwell %B Computer Graphics Forum
Mantiuk, R., Zdrojewska, D., Tomaszewska, A., Mantiuk, R., and Myszkowski, K. 2008. Selected Problems of High Dynamic Range Video Compression and GPU-based Contrast Domain Tone Mapping. Proceedings of the 24th Spring Conference on Computer Graphics (SCCG’08), SCCG.
Abstract
The main goal of High Dynamic Range Imaging (HDRI) is precise reproduction of real world appearance in terms of intensity levels and color gamut at all stages of image and video processing from acquisition to display. In our work, we investigate the problem of lossy HDR image and video compression and provide a number of novel solutions, which are optimized for storage efficiency or backward compatibility with existing compression standards. To take advantage of HDR information even for traditional low-dynamic range displays, we design tone mapping algorithms, which adjust HDR contrast ranges in a scene to those available in typical display devices.
Export
BibTeX
@inproceedings{Myszkowski2007, TITLE = {Selected Problems of High Dynamic Range Video Compression and {GPU}-based Contrast Domain Tone Mapping}, AUTHOR = {Mantiuk, Radoslaw and Zdrojewska, Dorota and Tomaszewska, Anna and Mantiuk, Rafa{\l} and Myszkowski, Karol}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125756E0038A185-5A787107DA03F936C12574C5005043C7-Myszkowski2007}, PUBLISHER = {SCCG}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {The main goal of High Dynamic Range Imaging (HDRI) is precise reproduction of real world appearance in terms of intensity levels and color gamut at all stages of image and video processing from acquisition to display. In our work, we investigate the problem of lossy HDR image and video compression and provide a number of novel solutions, which are optimized for storage efficiency or backward compatibility with existing compression standards. To take advantage of HDR information even for traditional low-dynamic range displays, we design tone mapping algorithms, which adjust HDR contrast ranges in a scene to those available in typical display devices.}, BOOKTITLE = {Proceedings of the 24th Spring Conference on Computer Graphics (SCCG'08)}, EDITOR = {Myszkowski, Karol}, PAGES = {11--18}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Radoslaw %A Zdrojewska, Dorota %A Tomaszewska, Anna %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Selected Problems of High Dynamic Range Video Compression and GPU-based Contrast Domain Tone Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CE4-F %F EDOC: 428158 %F OTHER: Local-ID: C125756E0038A185-5A787107DA03F936C12574C5005043C7-Myszkowski2007 %I SCCG %D 2008 %B Untitled Event %Z date of event: 2008-04-21 - 2008-04-23 %C Budmerice, Slovakia %X The main goal of High Dynamic Range Imaging (HDRI) is precise reproduction of real world appearance in terms of intensity levels and color gamut at all stages of image and video processing from acquisition to display. In our work, we investigate the problem of lossy HDR image and video compression and provide a number of novel solutions, which are optimized for storage efficiency or backward compatibility with existing compression standards. To take advantage of HDR information even for traditional low-dynamic range displays, we design tone mapping algorithms, which adjust HDR contrast ranges in a scene to those available in typical display devices. %B Proceedings of the 24th Spring Conference on Computer Graphics (SCCG'08) %E Myszkowski, Karol %P 11 - 18 %I SCCG
Myszkowski, K., Mantiuk, R., and Krawczyk, G. 2008. High Dynamic Range Video. Morgan & Claypool Publishers, San Rafael, USA.
Abstract
As new displays and cameras offer enhanced color capabilities, there is a need to extend the precision of digital content. High Dynamic Range (HDR) imaging encodes images and video with higher than normal 8 bit-per-color-channel precision, enabling representation of the complete color gamut and the full visible range of luminance.However, to realize transition from the traditional toHDRimaging, it is necessary to develop imaging algorithms that work with the high-precision data. Tomake such algorithms effective and feasible in practice, it is necessary to take advantage of the limitations of the human visual system by aligning the data shortcomings to those of the human eye, thus limiting storage and processing precision. Therefore, human visual perception is the key component of the solutions we discuss in this book. This book presents a complete pipeline for HDR image and video processing from acquisition, through compression and quality evaluation, to display. At the HDR image and video acquisition stage specialized HDR sensors or multi-exposure techniques suitable for traditional cameras are discussed. Then, we present a practical solution for pixel values calibration in terms of photometric or radiometric quantities, which are required in some technically oriented applications. Also, we cover the problem of efficient image and video compression and encoding either for storage or transmission purposes, including the aspect of backward compatibility with existing formats. Finally, we review existing HDR display technologies and the associated problems of image contrast and brightness adjustment. For this purpose tone mapping is employed to accommodate HDR content to LDR devices. Conversely, the so-called inverse tone mapping is required to upgrade LDR content for displaying on HDR devices. We overview HDR-enabled image and video quality metrics, which are needed to verify algorithms at all stages of the pipeline. Additionally, we cover successful examples of the HDR technology applications, in particular, in computer graphics and computer vision. The goal of this book is to present all discussed components of the HDR pipeline with the main focus on video. For some pipeline stages HDR video solutions are either not well established or do not exist at all, in which case we describe techniques for single HDR images. In such cases we attempt to select the techniques, which can be extended into temporal domain. Whenever needed, relevant background information on human perception is given, which enables better understanding of the design choices behind the discussed algorithms and HDR equipment. Table of Contents: Introduction / Representation of an HDR Image / HDR Image and Video Acquisition / HDR Image Quality / HDR Image, Video, and Texture Compression / Tone Reproduction / HDR Display Devices / LDR2HDR: Recovering Dynamic Range in Legacy Content / HDRI in Computer Graphics / Software
Export
BibTeX
@book{Myszkowski2008, TITLE = {High Dynamic Range Video}, AUTHOR = {Myszkowski, Karol and Mantiuk, Rafa{\l} and Krawczyk, Grzegorz}, LANGUAGE = {eng}, ISBN = {9781598292145}, URL = {http://dx.doi.org/10.2200/S00109ED1V01Y200806CGR005}, LOCALID = {Local-ID: C125756E0038A185-B9003D1C8852615FC12574C50051C1EE-Myszkowski2008}, PUBLISHER = {Morgan \& Claypool Publishers}, ADDRESS = {San Rafael, USA}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {As new displays and cameras offer enhanced color capabilities, there is a need to extend the precision of digital content. High Dynamic Range (HDR) imaging encodes images and video with higher than normal 8 bit-per-color-channel precision, enabling representation of the complete color gamut and the full visible range of luminance.However, to realize transition from the traditional toHDRimaging, it is necessary to develop imaging algorithms that work with the high-precision data. Tomake such algorithms effective and feasible in practice, it is necessary to take advantage of the limitations of the human visual system by aligning the data shortcomings to those of the human eye, thus limiting storage and processing precision. Therefore, human visual perception is the key component of the solutions we discuss in this book. This book presents a complete pipeline for HDR image and video processing from acquisition, through compression and quality evaluation, to display. At the HDR image and video acquisition stage specialized HDR sensors or multi-exposure techniques suitable for traditional cameras are discussed. Then, we present a practical solution for pixel values calibration in terms of photometric or radiometric quantities, which are required in some technically oriented applications. Also, we cover the problem of efficient image and video compression and encoding either for storage or transmission purposes, including the aspect of backward compatibility with existing formats. Finally, we review existing HDR display technologies and the associated problems of image contrast and brightness adjustment. For this purpose tone mapping is employed to accommodate HDR content to LDR devices. Conversely, the so-called inverse tone mapping is required to upgrade LDR content for displaying on HDR devices. We overview HDR-enabled image and video quality metrics, which are needed to verify algorithms at all stages of the pipeline. Additionally, we cover successful examples of the HDR technology applications, in particular, in computer graphics and computer vision. The goal of this book is to present all discussed components of the HDR pipeline with the main focus on video. For some pipeline stages HDR video solutions are either not well established or do not exist at all, in which case we describe techniques for single HDR images. In such cases we attempt to select the techniques, which can be extended into temporal domain. Whenever needed, relevant background information on human perception is given, which enables better understanding of the design choices behind the discussed algorithms and HDR equipment. Table of Contents: Introduction / Representation of an HDR Image / HDR Image and Video Acquisition / HDR Image Quality / HDR Image, Video, and Texture Compression / Tone Reproduction / HDR Display Devices / LDR2HDR: Recovering Dynamic Range in Legacy Content / HDRI in Computer Graphics / Software}, PAGES = {158}, }
Endnote
%0 Book %A Myszkowski, Karol %A Mantiuk, Rafa&#322; %A Krawczyk, Grzegorz %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1BD7-2 %F EDOC: 428175 %@ 9781598292145 %U http://dx.doi.org/10.2200/S00109ED1V01Y200806CGR005 %F OTHER: Local-ID: C125756E0038A185-B9003D1C8852615FC12574C50051C1EE-Myszkowski2008 %I Morgan & Claypool Publishers %C San Rafael, USA %D 2008 %P 158 %X As new displays and cameras offer enhanced color capabilities, there is a need to extend the precision of digital content. High Dynamic Range (HDR) imaging encodes images and video with higher than normal 8 bit-per-color-channel precision, enabling representation of the complete color gamut and the full visible range of luminance.However, to realize transition from the traditional toHDRimaging, it is necessary to develop imaging algorithms that work with the high-precision data. Tomake such algorithms effective and feasible in practice, it is necessary to take advantage of the limitations of the human visual system by aligning the data shortcomings to those of the human eye, thus limiting storage and processing precision. Therefore, human visual perception is the key component of the solutions we discuss in this book. This book presents a complete pipeline for HDR image and video processing from acquisition, through compression and quality evaluation, to display. At the HDR image and video acquisition stage specialized HDR sensors or multi-exposure techniques suitable for traditional cameras are discussed. Then, we present a practical solution for pixel values calibration in terms of photometric or radiometric quantities, which are required in some technically oriented applications. Also, we cover the problem of efficient image and video compression and encoding either for storage or transmission purposes, including the aspect of backward compatibility with existing formats. Finally, we review existing HDR display technologies and the associated problems of image contrast and brightness adjustment. For this purpose tone mapping is employed to accommodate HDR content to LDR devices. Conversely, the so-called inverse tone mapping is required to upgrade LDR content for displaying on HDR devices. We overview HDR-enabled image and video quality metrics, which are needed to verify algorithms at all stages of the pipeline. Additionally, we cover successful examples of the HDR technology applications, in particular, in computer graphics and computer vision. The goal of this book is to present all discussed components of the HDR pipeline with the main focus on video. For some pipeline stages HDR video solutions are either not well established or do not exist at all, in which case we describe techniques for single HDR images. In such cases we attempt to select the techniques, which can be extended into temporal domain. Whenever needed, relevant background information on human perception is given, which enables better understanding of the design choices behind the discussed algorithms and HDR equipment. Table of Contents: Introduction / Representation of an HDR Image / HDR Image and Video Acquisition / HDR Image Quality / HDR Image, Video, and Texture Compression / Tone Reproduction / HDR Display Devices / LDR2HDR: Recovering Dynamic Range in Legacy Content / HDRI in Computer Graphics / Software
Myszkowski, K., ed. 2008. Proceedings of the 24th Spring Conference on Computer Graphics (SCCG ’08). SCCG, Bratislava.
Export
BibTeX
@book{Myszkowski2008SCCG, TITLE = {Proceedings of the 24th Spring Conference on Computer Graphics ({SCCG} '08)}, EDITOR = {Myszkowski, Karol}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125756E0038A185-B374E4826C5F0B2EC12574C50063B91D-Myszkowski2008SCCG}, PUBLISHER = {SCCG}, ADDRESS = {Bratislava}, YEAR = {2008}, DATE = {2008}, PAGES = {211}, }
Endnote
%0 Edited Book %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Proceedings of the 24th Spring Conference on Computer Graphics (SCCG '08) : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CB7-6 %F EDOC: 428174 %F OTHER: Local-ID: C125756E0038A185-B374E4826C5F0B2EC12574C50063B91D-Myszkowski2008SCCG %I SCCG %C Bratislava %D 2008 %P 211
Ritschel, T., Smith, K., Ihrke, M., Grosch, T., Myszkowski, K., and Seidel, H.-P. 2008. 3D Unsharp Masking for Scene Coherent Enhancement. Proceedings of ACM SIGGRAPH 2008, ACM.
Export
BibTeX
@inproceedings{Ritschel08Sig, TITLE = {{3D} Unsharp Masking for Scene Coherent Enhancement}, AUTHOR = {Ritschel, Tobias and Smith, Kaleigh and Ihrke, Matthias and Grosch, Thorsten and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://doi.acm.org/10.1145/1360612.1360689}, DOI = {10.1145/1360612.1360689}, LOCALID = {Local-ID: C125756E0038A185-41E8E32E3589C504C12574C500535A27-Ritschel08Sig}, PUBLISHER = {ACM}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2008}, EDITOR = {Turk, Greg}, PAGES = {Art.90.1--8}, SERIES = {ACM Transactions on Graphics}, }
Endnote
%0 Conference Proceedings %A Ritschel, Tobias %A Smith, Kaleigh %A Ihrke, Matthias %A Grosch, Thorsten %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T 3D Unsharp Masking for Scene Coherent Enhancement : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1AB8-0 %F EDOC: 428200 %R 10.1145/1360612.1360689 %U http://doi.acm.org/10.1145/1360612.1360689 %F OTHER: Local-ID: C125756E0038A185-41E8E32E3589C504C12574C500535A27-Ritschel08Sig %I ACM %D 2008 %B Untitled Event %Z date of event: 2008-08-11 - 2008-08-15 %C Los Angeles, USA %B Proceedings of ACM SIGGRAPH 2008 %E Turk, Greg %P Art.90.1 - 8 %I ACM %B ACM Transactions on Graphics
Smith, K., Landes, P.-E., Thollot, J., and Myszkowski, K. 2008. Apparent Greyscale: A Simple and Fast Conversion to Perceptually Accurate Images and Video. The European Association for Computer Graphics 29th Annual Conference, EUROGRAPHICS 2008, Blackwell.
Abstract
This paper presents a quick and simple method for converting complex images and video to perceptually accurate greyscale versions. We use a two-step approach first to globally assign grey values and determine colour ordering, then second, to locally enhance the greyscale to reproduce the original contrast. Our global mapping is image independent and incorporates the Helmholtz-Kohlrausch colour appearance effect for predicting differences between isoluminant colours. Our multiscale local contrast enhancement reintroduces lost discontinuities only in regions that insufficiently represent original chromatic contrast. All operations are restricted so that they preserve the overall image appearance, lightness range and differences, colour ordering, and spatial details, resulting in perceptually accurate achromatic reproductions of the colour original.
Export
BibTeX
@inproceedings{Smith2008, TITLE = {Apparent Greyscale: A Simple and Fast Conversion to Perceptually Accurate Images and Video}, AUTHOR = {Smith, Kaleigh and Landes, Pierre-Edouard and Thollot, J{\"o}elle and Myszkowski, Karol}, LANGUAGE = {eng}, URL = {http://dx.doi.org/10.1111/j.1467-8659.2008.01116.x}, DOI = {10.1111/j.1467-8659.2008.01116.x}, LOCALID = {Local-ID: C125756E0038A185-E88EBE366EBA274FC1257495005568E5-Smith2008}, PUBLISHER = {Blackwell}, YEAR = {2008}, DATE = {2008}, ABSTRACT = {This paper presents a quick and simple method for converting complex images and video to perceptually accurate greyscale versions. We use a two-step approach first to globally assign grey values and determine colour ordering, then second, to locally enhance the greyscale to reproduce the original contrast. Our global mapping is image independent and incorporates the Helmholtz-Kohlrausch colour appearance effect for predicting differences between isoluminant colours. Our multiscale local contrast enhancement reintroduces lost discontinuities only in regions that insufficiently represent original chromatic contrast. All operations are restricted so that they preserve the overall image appearance, lightness range and differences, colour ordering, and spatial details, resulting in perceptually accurate achromatic reproductions of the colour original.}, BOOKTITLE = {The European Association for Computer Graphics 29th Annual Conference, EUROGRAPHICS 2008}, EDITOR = {Drettakis, George and Scopigno, Roberto}, PAGES = {193--200}, SERIES = {Computer Graphics Forum}, }
Endnote
%0 Conference Proceedings %A Smith, Kaleigh %A Landes, Pierre-Edouard %A Thollot, J&#246;elle %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Apparent Greyscale: A Simple and Fast Conversion to Perceptually Accurate Images and Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1AF9-D %F EDOC: 428226 %R 10.1111/j.1467-8659.2008.01116.x %U http://dx.doi.org/10.1111/j.1467-8659.2008.01116.x %F OTHER: Local-ID: C125756E0038A185-E88EBE366EBA274FC1257495005568E5-Smith2008 %I Blackwell %D 2008 %B Untitled Event %Z date of event: 2008-04-14 - 2008-04-18 %C Crete, Greece %X This paper presents a quick and simple method for converting complex images and video to perceptually accurate greyscale versions. We use a two-step approach first to globally assign grey values and determine colour ordering, then second, to locally enhance the greyscale to reproduce the original contrast. Our global mapping is image independent and incorporates the Helmholtz-Kohlrausch colour appearance effect for predicting differences between isoluminant colours. Our multiscale local contrast enhancement reintroduces lost discontinuities only in regions that insufficiently represent original chromatic contrast. All operations are restricted so that they preserve the overall image appearance, lightness range and differences, colour ordering, and spatial details, resulting in perceptually accurate achromatic reproductions of the colour original. %B The European Association for Computer Graphics 29th Annual Conference, EUROGRAPHICS 2008 %E Drettakis, George; Scopigno, Roberto %P 193 - 200 %I Blackwell %B Computer Graphics Forum
Yoshida, A., Ihrke, M., Mantiuk, R., and Seidel, H.-P. 2008a. Brightness of the Glare Illusion. Symposium on Applied Perception in Graphics and Visualization : proceedings APGV 2008, ACM.
Export
BibTeX
@inproceedings{Yoshida2008_APGV, TITLE = {Brightness of the Glare Illusion}, AUTHOR = {Yoshida, Akiko and Ihrke, Matthias and Mantiuk, Rafa{\l} and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-981-4}, URL = {http://www.mpi-inf.mpg.de/~yoshida/Yoshida_APGV2008.pdf}, LOCALID = {Local-ID: C125756E0038A185-0747F286D3E9D7EDC12574410035A60A-Yoshida2008_APGV}, PUBLISHER = {ACM}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Symposium on Applied Perception in Graphics and Visualization : proceedings APGV 2008}, EDITOR = {Creem-Regehr, Sarah and Myszkowski, Karol}, PAGES = {83--90}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Ihrke, Matthias %A Mantiuk, Rafa&#322; %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Brightness of the Glare Illusion : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1B26-E %F EDOC: 428257 %U http://www.mpi-inf.mpg.de/~yoshida/Yoshida_APGV2008.pdf %F OTHER: Local-ID: C125756E0038A185-0747F286D3E9D7EDC12574410035A60A-Yoshida2008_APGV %I ACM %D 2008 %B Untitled Event %Z date of event: 2008-08-09 - 2008-08-10 %C Los Angeles, CA, USA %B Symposium on Applied Perception in Graphics and Visualization : proceedings APGV 2008 %E Creem-Regehr, Sarah; Myszkowski, Karol %P 83 - 90 %I ACM %@ 978-1-59593-981-4
Yoshida, A., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2008b. Perception-Based Contrast Enhancement Model for Complex Images in High Dynamic Range. Human Vision and Electronic Imaging XIII, SPIE.
Export
BibTeX
@inproceedings{Yoshida_SPIE2008, TITLE = {Perception-Based Contrast Enhancement Model for Complex Images in High Dynamic Range}, AUTHOR = {Yoshida, Akiko and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-0-8194-6978-6}, URL = {http://dx.doi.org/10.1117/12.766500}, DOI = {10.1117/12.766500}, LOCALID = {Local-ID: C125756E0038A185-1AF67FD9509EB0FAC12573AF006318E7-Yoshida_SPIE2008}, PUBLISHER = {SPIE}, YEAR = {2008}, DATE = {2008}, BOOKTITLE = {Human Vision and Electronic Imaging XIII}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N.}, PAGES = {68060C--1-11}, SERIES = {Proceedings of SPIE-IS\&T Electronic Imaging}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-Based Contrast Enhancement Model for Complex Images in High Dynamic Range : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1CA2-3 %F EDOC: 428258 %R 10.1117/12.766500 %U http://dx.doi.org/10.1117/12.766500 %F OTHER: Local-ID: C125756E0038A185-1AF67FD9509EB0FAC12573AF006318E7-Yoshida_SPIE2008 %I SPIE %D 2008 %B Untitled Event %Z date of event: 2008-01-28 - 2008-01-31 %C San Jose, CA, USA %B Human Vision and Electronic Imaging XIII %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N. %P 68060C - 1-11 %I SPIE %@ 978-0-8194-6978-6 %B Proceedings of SPIE-IS&T Electronic Imaging
2007
Gösele, M. and Myszkowski, K. 2007. HDR Applications in Computer Graphics. In: High-Dynamic-Range (HDR) Vision. Springer, Berlin.
Export
BibTeX
@incollection{Myszkowski2007HDRbook1, TITLE = {{HDR} Applications in Computer Graphics}, AUTHOR = {G{\"o}sele, Michael and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {1437-0387}, ISBN = {978-3-540-44432-9; 978-3-540-44433-6}, DOI = {10.1007/978-3-540-44433-6_13}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {High-Dynamic-Range (HDR) Vision}, EDITOR = {Hoefflinger, Bernd}, PAGES = {193--210}, SERIES = {Springer Series in Advanced Microelectronics}, VOLUME = {26}, }
Endnote
%0 Book Section %A G&#246;sele, Michael %A Myszkowski, Karol %+ External Organizations Computer Graphics, MPI for Informatics, Max Planck Society %T HDR Applications in Computer Graphics : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-D414-F %R 10.1007/978-3-540-44433-6_13 %D 2007 %B High-Dynamic-Range (HDR) Vision %E Hoefflinger, Bernd %P 193 - 210 %I Springer %C Berlin %@ 978-3-540-44432-9 978-3-540-44433-6 %S Springer Series in Advanced Microelectronics %N 26 %@ false
Herzog, R., Havran, V., Kinuwaki, S., Myszkowski, K., and Seidel, H.-P. 2007a. Global Illumination using Photon Ray Splatting. Eurographics 2007, Blackwell.
Abstract
We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. Many existing global illumination approaches either quickly compute an overly approximate solution or perform an orders of magnitude slower computation to obtain high-quality results for the indirect illumination. The proposed method improves photon density estimation and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our photon splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space.
Export
BibTeX
@inproceedings{HerzogEG2007, TITLE = {Global Illumination using Photon Ray Splatting}, AUTHOR = {Herzog, Robert and Havran, Vlastimil and Kinuwaki, Shinichi and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, LOCALID = {Local-ID: C12573CC004A8E26-922F7B2EB5B8D78CC12573C4004C5B93-HerzogEG2007}, PUBLISHER = {Blackwell}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. Many existing global illumination approaches either quickly compute an overly approximate solution or perform an orders of magnitude slower computation to obtain high-quality results for the indirect illumination. The proposed method improves photon density estimation and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our photon splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space.}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slavik, Pavel}, PAGES = {503--513}, SERIES = {Computer Graphics Forum}, }
Endnote
%0 Conference Proceedings %A Herzog, Robert %A Havran, Vlastimil %A Kinuwaki, Shinichi %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Global Illumination using Photon Ray Splatting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F5A-F %F EDOC: 356513 %F OTHER: Local-ID: C12573CC004A8E26-922F7B2EB5B8D78CC12573C4004C5B93-HerzogEG2007 %I Blackwell %D 2007 %B Untitled Event %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %X We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. Many existing global illumination approaches either quickly compute an overly approximate solution or perform an orders of magnitude slower computation to obtain high-quality results for the indirect illumination. The proposed method improves photon density estimation and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our photon splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space. %B Eurographics 2007 %E Cohen-Or, Daniel; Slavik, Pavel %P 503 - 513 %I Blackwell %B Computer Graphics Forum %@ false
Herzog, R., Havran, V., Kinuwaki, S., Myszkowski, K., and Seidel, H.-P. 2007b. Global Illumination using Photon Ray Splatting. Max-Planck-Institut für Informatik, Saarbrücken, Germany.
Abstract
We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. A vast majority of existing global illumination approaches either quickly computes an approximate solution, which may not be adequate for previews, or performs a much more time-consuming computation to obtain high-quality results for the indirect illumination. Our method improves photon density estimation, which is an approximate solution, and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Our novel lighting computation is derived from basic radiometric theory and requires only small changes to existing photon splatting approaches. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space.
Export
BibTeX
@techreport{HerzogReport2007, TITLE = {Global Illumination using Photon Ray Splatting}, AUTHOR = {Herzog, Robert and Havran, Vlastimil and Kinuwaki, Shinichi and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, NUMBER = {MPI-I-2007-4-007}, LOCALID = {Local-ID: C12573CC004A8E26-88919E23BF524D6AC12573C4005B8D41-HerzogReport2007}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken, Germany}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. A vast majority of existing global illumination approaches either quickly computes an approximate solution, which may not be adequate for previews, or performs a much more time-consuming computation to obtain high-quality results for the indirect illumination. Our method improves photon density estimation, which is an approximate solution, and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Our novel lighting computation is derived from basic radiometric theory and requires only small changes to existing photon splatting approaches. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space.}, TYPE = {Research Report}, }
Endnote
%0 Report %A Herzog, Robert %A Havran, Vlastimil %A Kinuwaki, Shinichi %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society International Max Planck Research School, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Global Illumination using Photon Ray Splatting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F57-6 %F EDOC: 356502 %F OTHER: Local-ID: C12573CC004A8E26-88919E23BF524D6AC12573C4005B8D41-HerzogReport2007 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken, Germany %D 2007 %P 66 p. %X We present a novel framework for efficiently computing the indirect illumination in diffuse and moderately glossy scenes using density estimation techniques. A vast majority of existing global illumination approaches either quickly computes an approximate solution, which may not be adequate for previews, or performs a much more time-consuming computation to obtain high-quality results for the indirect illumination. Our method improves photon density estimation, which is an approximate solution, and leads to significantly better visual quality in particular for complex geometry, while only slightly increasing the computation time. We perform direct splatting of photon rays, which allows us to use simpler search data structures. Our novel lighting computation is derived from basic radiometric theory and requires only small changes to existing photon splatting approaches. Since our density estimation is carried out in ray space rather than on surfaces, as in the commonly used photon mapping algorithm, the results are more robust against geometrically incurred sources of bias. This holds also in combination with final gathering where photon mapping often overestimates the illumination near concave geometric features. In addition, we show that our splatting technique can be extended to handle moderately glossy surfaces and can be combined with traditional irradiance caching for sparse sampling and filtering in image space. %B Research Report
Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2007a. Contrast Restoration by Adaptive Countershading. Eurographics 2007, Blackwell.
Abstract
We address the problem of communicating contrasts in images degraded with respect to their original due to processing with computer graphics algorithms. Such degradation can happen during the tone mapping of high dynamic range images, or while rendering scenes with low contrast shaders or with poor lighting. Inspired by a family of known perceptual illusions: Craik-O'Brien-Cornsweet, we enhance contrasts by modulating brightness at the edges to create countershading profiles. We generalize unsharp masking by coupling it with a multi-resolution local contrast metric to automatically create the countershading profiles from the sub-band components which are individually adjusted to each corrected feature to best enhance contrast with respect to the reference. Additionally, we employ a visual detection model to assure that our enhancements are not perceived as objectionable halo artifacts. The overall appearance of images remains mostly unchanged and the enhancement is achieved within the available dynamic range. We use our method to post-correct tone mapped images and improve images using their depth information.
Export
BibTeX
@inproceedings{KrawczykEG2007, TITLE = {Contrast Restoration by Adaptive Countershading}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, LOCALID = {Local-ID: C12573CC004A8E26-3AAFD82CDB4A81EBC12573C4005DCCE4-KrawczykEG2007}, PUBLISHER = {Blackwell}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {We address the problem of communicating contrasts in images degraded with respect to their original due to processing with computer graphics algorithms. Such degradation can happen during the tone mapping of high dynamic range images, or while rendering scenes with low contrast shaders or with poor lighting. Inspired by a family of known perceptual illusions: Craik-O'Brien-Cornsweet, we enhance contrasts by modulating brightness at the edges to create countershading profiles. We generalize unsharp masking by coupling it with a multi-resolution local contrast metric to automatically create the countershading profiles from the sub-band components which are individually adjusted to each corrected feature to best enhance contrast with respect to the reference. Additionally, we employ a visual detection model to assure that our enhancements are not perceived as objectionable halo artifacts. The overall appearance of images remains mostly unchanged and the enhancement is achieved within the available dynamic range. We use our method to post-correct tone mapped images and improve images using their depth information.}, BOOKTITLE = {Eurographics 2007}, EDITOR = {Cohen-Or, Daniel and Slavik, Pavel}, PAGES = {581--590}, SERIES = {Computer Graphics Forum}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Contrast Restoration by Adaptive Countershading : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1EBC-C %F EDOC: 356532 %F OTHER: Local-ID: C12573CC004A8E26-3AAFD82CDB4A81EBC12573C4005DCCE4-KrawczykEG2007 %I Blackwell %D 2007 %B Untitled Event %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %X We address the problem of communicating contrasts in images degraded with respect to their original due to processing with computer graphics algorithms. Such degradation can happen during the tone mapping of high dynamic range images, or while rendering scenes with low contrast shaders or with poor lighting. Inspired by a family of known perceptual illusions: Craik-O'Brien-Cornsweet, we enhance contrasts by modulating brightness at the edges to create countershading profiles. We generalize unsharp masking by coupling it with a multi-resolution local contrast metric to automatically create the countershading profiles from the sub-band components which are individually adjusted to each corrected feature to best enhance contrast with respect to the reference. Additionally, we employ a visual detection model to assure that our enhancements are not perceived as objectionable halo artifacts. The overall appearance of images remains mostly unchanged and the enhancement is achieved within the available dynamic range. We use our method to post-correct tone mapped images and improve images using their depth information. %B Eurographics 2007 %E Cohen-Or, Daniel; Slavik, Pavel %P 581 - 590 %I Blackwell %B Computer Graphics Forum
Krawczyk, G., Myszkowski, K., and Brosch, D. 2007b. HDR Tone Mapping. In: High-Dynamic-Range (HDR) Vision. Springer, Berlin.
Export
BibTeX
@incollection{Myszkowski2007HDRbook2, TITLE = {{HDR} Tone Mapping}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Brosch, Daniel}, LANGUAGE = {eng}, ISSN = {1437-0387}, ISBN = {978-3-540-44432-9; 978-3-540-44433-6}, DOI = {10.1007/978-3-540-44433-6_11}, PUBLISHER = {Springer}, ADDRESS = {Berlin}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {High-Dynamic-Range (HDR) Vision}, EDITOR = {Hoefflinger, Bernd}, PAGES = {193--210}, SERIES = {Springer Series in Advanced Microelectronics}, VOLUME = {26}, }
Endnote
%0 Book Section %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Brosch, Daniel %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society External Organizations %T HDR Tone Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-D418-7 %R 10.1007/978-3-540-44433-6_11 %D 2007 %B High-Dynamic-Range (HDR) Vision %E Hoefflinger, Bernd %P 193 - 210 %I Springer %C Berlin %@ 978-3-540-44432-9 978-3-540-44433-6 %S Springer Series in Advanced Microelectronics %N 26 %@ false
Lensch, H.P.A., Goesele, M., and Müller, G. 2007. Capturing Reflectance - From Theory to Practice. Eurographics 2007 Tutorial Notes, Eurographics Association.
Export
BibTeX
@inproceedings{Lensch:2007:CRT, TITLE = {Capturing Reflectance -- From Theory to Practice}, AUTHOR = {Lensch, Hendrik P. A. and Goesele, Michael and M{\"u}ller, Gero}, LANGUAGE = {eng}, ISSN = {1017-4656}, LOCALID = {Local-ID: C12573CC004A8E26-8516249336F3C8C9C12573C9003DA89D-Lensch:2007:CRT}, PUBLISHER = {Eurographics Association}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {Eurographics 2007 Tutorial Notes}, EDITOR = {Myszkowski, Karol and Havran, Vlastimil}, PAGES = {485--556}, ADDRESS = {Prague, Czech Republic}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik P. A. %A Goesele, Michael %A M&#252;ller, Gero %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations External Organizations %T Capturing Reflectance - From Theory to Practice : Tutorial Notes EUROGRAPHICS 2007 Tutorial 6 %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1E7E-7 %F EDOC: 356507 %F OTHER: Local-ID: C12573CC004A8E26-8516249336F3C8C9C12573C9003DA89D-Lensch:2007:CRT %D 2007 %B Eurographics 2007 %Z date of event: 2007-09-03 - 2007-09-07 %C Prague, Czech Republic %B Eurographics 2007 Tutorial Notes %E Myszkowski, Karol; Havran, Vlastimil %P 485 - 556 %I Eurographics Association %@ false
Mantiuk, R., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2007. High Dynamic Range Image and Video Compression - Fidelity Matching Human Visual Performance. IEEE International Conference on Image Processing 2007, ICIP 2007. - Vol. 1, IEEE.
Abstract
Vast majority of digital images and video material stored today can capture only a fraction of visual information visible to the human eye and does not offer sufficient quality to fully exploit capabilities of new display devices. High dynamic range (HDR) image and video formats encode the full visible range of luminance and color gamut, thus offering ultimate fidelity, limited only by the capabilities of the human eye and not by any existing technology. In this paper we demonstrate how existing image and video compression standards can be extended to encode HDR content efficiently. This is achieved by a custom color space for encoding HDR pixel values that is derived from the visual performance data. We also demonstrate how HDR image and video compression can be designed so that it is backward compatible with existing formats.
Export
BibTeX
@inproceedings{Mantiuk2007hdrivc, TITLE = {High Dynamic Range Image and Video Compression -- Fidelity Matching Human Visual Performance}, AUTHOR = {Mantiuk, Rafa{\l} and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4244-1437-6}, DOI = {10.1109/ICIP.2007.4378878}, LOCALID = {Local-ID: C12573CC004A8E26-8908FB59F4C64796C125739F003CC9EF-Mantiuk2007hdrivc}, PUBLISHER = {IEEE}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {Vast majority of digital images and video material stored today can capture only a fraction of visual information visible to the human eye and does not offer sufficient quality to fully exploit capabilities of new display devices. High dynamic range (HDR) image and video formats encode the full visible range of luminance and color gamut, thus offering ultimate fidelity, limited only by the capabilities of the human eye and not by any existing technology. In this paper we demonstrate how existing image and video compression standards can be extended to encode HDR content efficiently. This is achieved by a custom color space for encoding HDR pixel values that is derived from the visual performance data. We also demonstrate how HDR image and video compression can be designed so that it is backward compatible with existing formats.}, BOOKTITLE = {IEEE International Conference on Image Processing 2007, ICIP 2007. -- Vol. 1}, PAGES = {9--12}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafa&#322; %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T High Dynamic Range Image and Video Compression - Fidelity Matching Human Visual Performance : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1F68-F %F EDOC: 356576 %R 10.1109/ICIP.2007.4378878 %F OTHER: Local-ID: C12573CC004A8E26-8908FB59F4C64796C125739F003CC9EF-Mantiuk2007hdrivc %I IEEE %D 2007 %B Untitled Event %Z date of event: 2007-09-16 - 2007-09-19 %C San Antonio, TX, USA %X Vast majority of digital images and video material stored today can capture only a fraction of visual information visible to the human eye and does not offer sufficient quality to fully exploit capabilities of new display devices. High dynamic range (HDR) image and video formats encode the full visible range of luminance and color gamut, thus offering ultimate fidelity, limited only by the capabilities of the human eye and not by any existing technology. In this paper we demonstrate how existing image and video compression standards can be extended to encode HDR content efficiently. This is achieved by a custom color space for encoding HDR pixel values that is derived from the visual performance data. We also demonstrate how HDR image and video compression can be designed so that it is backward compatible with existing formats. %B IEEE International Conference on Image Processing 2007, ICIP 2007. - Vol. 1 %P 9 - 12 %I IEEE %@ 978-1-4244-1437-6
Yoshida, A., Blanz, V., Myszkowski, K., and Seidel, H.-P. 2007a. Testing tone mapping operators with human-perceived reality. Journal of Electronic Imaging16, 1.
Abstract
A number of successful tone mapping operators for contrast compression have been proposed due to the need to visualize high dynamic range (HDR) images on low dynamic range (LDR) devices. They were inspired by fields as diverse as image processing, photographic practice, and modeling of the human visual systems (HVS). The variety of approaches calls for a systematic perceptual evaluation of their performance. We conduct a psychophysical experiment based on a direct comparison between the appearance of real-world scenes and HDR images of these scenes displayed on an LDR monitor. In our experiment, HDR images are tone mapped by seven existing tone mapping operators. The primary interest of this psychophysical experiment is to assess the differences in how tone mapped images are perceived by human observers and to find out which attributes of image appearance account for these differences when tone mapped images are compared directly with their corresponding real-world scenes rather than with each other. The human subjects rate image naturalness, overall contrast, overall brightness, and detail reproduction in dark and bright image regions with respect to the corresponding real-world scene. The results indicate substantial differences in perception of images produced by individual tone mapping operators. We observe a clear distinction between global and local operators in favor of the latter, and we classify the tone mapping operators according to naturalness and appearance attributes.
Export
BibTeX
@article{Yoshida_JEI2007, TITLE = {Testing tone mapping operators with human-perceived reality}, AUTHOR = {Yoshida, Akiko and Blanz, Volker and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1017-9909}, DOI = {10.1117/1.2711822}, LOCALID = {Local-ID: C12573CC004A8E26-1BC207A1242FDBC1C1257222003A5012-Yoshida_JEI2007}, YEAR = {2007}, DATE = {2007}, ABSTRACT = {A number of successful tone mapping operators for contrast compression have been proposed due to the need to visualize high dynamic range (HDR) images on low dynamic range (LDR) devices. They were inspired by fields as diverse as image processing, photographic practice, and modeling of the human visual systems (HVS). The variety of approaches calls for a systematic perceptual evaluation of their performance. We conduct a psychophysical experiment based on a direct comparison between the appearance of real-world scenes and HDR images of these scenes displayed on an LDR monitor. In our experiment, HDR images are tone mapped by seven existing tone mapping operators. The primary interest of this psychophysical experiment is to assess the differences in how tone mapped images are perceived by human observers and to find out which attributes of image appearance account for these differences when tone mapped images are compared directly with their corresponding real-world scenes rather than with each other. The human subjects rate image naturalness, overall contrast, overall brightness, and detail reproduction in dark and bright image regions with respect to the corresponding real-world scene. The results indicate substantial differences in perception of images produced by individual tone mapping operators. We observe a clear distinction between global and local operators in favor of the latter, and we classify the tone mapping operators according to naturalness and appearance attributes.}, JOURNAL = {Journal of Electronic Imaging}, VOLUME = {16}, NUMBER = {1}, PAGES = {1--14}, EID = {013004}, }
Endnote
%0 Journal Article %A Yoshida, Akiko %A Blanz, Volker %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Testing tone mapping operators with human-perceived reality : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-20EF-9 %F EDOC: 356603 %R 10.1117/1.2711822 %F OTHER: Local-ID: C12573CC004A8E26-1BC207A1242FDBC1C1257222003A5012-Yoshida_JEI2007 %D 2007 %* Review method: peer-reviewed %X A number of successful tone mapping operators for contrast compression have been proposed due to the need to visualize high dynamic range (HDR) images on low dynamic range (LDR) devices. They were inspired by fields as diverse as image processing, photographic practice, and modeling of the human visual systems (HVS). The variety of approaches calls for a systematic perceptual evaluation of their performance. We conduct a psychophysical experiment based on a direct comparison between the appearance of real-world scenes and HDR images of these scenes displayed on an LDR monitor. In our experiment, HDR images are tone mapped by seven existing tone mapping operators. The primary interest of this psychophysical experiment is to assess the differences in how tone mapped images are perceived by human observers and to find out which attributes of image appearance account for these differences when tone mapped images are compared directly with their corresponding real-world scenes rather than with each other. The human subjects rate image naturalness, overall contrast, overall brightness, and detail reproduction in dark and bright image regions with respect to the corresponding real-world scene. The results indicate substantial differences in perception of images produced by individual tone mapping operators. We observe a clear distinction between global and local operators in favor of the latter, and we classify the tone mapping operators according to naturalness and appearance attributes. %J Journal of Electronic Imaging %V 16 %N 1 %& 1 %P 1 - 14 %Z sequence number: 013004 %@ false
Yoshida, A., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2007b. Perceptual Uniformity of Contrast Scaling in Complex Images. APGV 2007, Symposium on Applied Perception in Graphics and Visualization, ACM.
Export
BibTeX
@inproceedings{Yoshida_APGV2007, TITLE = {Perceptual Uniformity of Contrast Scaling in Complex Images}, AUTHOR = {Yoshida, Akiko and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-670-7}, DOI = {10.1145/1272582.1272614}, PUBLISHER = {ACM}, YEAR = {2007}, DATE = {2007}, BOOKTITLE = {APGV 2007, Symposium on Applied Perception in Graphics and Visualization}, EDITOR = {Wallraven, Christian and Sundstedt, Veronica and Fleming, Roland W. and Langer, Michael and Spencer, Stephen N.}, PAGES = {137--137}, ADDRESS = {T{\"u}bingen, Germany}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Uniformity of Contrast Scaling in Complex Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0023-D1B0-5 %R 10.1145/1272582.1272614 %D 2007 %B Symposium on Applied Perception in Graphics and Visualization %Z date of event: 2007-07-25 - 2007-07-27 %C T&#252;bingen, Germany %B APGV 2007 %E Wallraven, Christian; Sundstedt, Veronica; Fleming, Roland W.; Langer, Michael; Spencer, Stephen N. %P 137 - 137 %I ACM %@ 978-1-59593-670-7
2006
Efremov, A., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2006. Design and evaluation of backward compatible high dynamic range video compression. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
In this report we describe the details of the backward compatible high dynamic range (HDR) video compression algorithm. The algorithm is designed to facilitate a smooth transition from standard low dynamic range (LDR) video to high fidelity high dynamic range content. The HDR and the corresponding LDR video frames are decorrelated and then compressed into a single MPEG stream, which can be played on both existing DVD players and HDR-enabled devices.
Export
BibTeX
@techreport{EfremovMantiukMyszkowskiSeidel, TITLE = {Design and evaluation of backward compatible high dynamic range video compression}, AUTHOR = {Efremov, Alexander and Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-001}, NUMBER = {MPI-I-2006-4-001}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {In this report we describe the details of the backward compatible high dynamic range (HDR) video compression algorithm. The algorithm is designed to facilitate a smooth transition from standard low dynamic range (LDR) video to high fidelity high dynamic range content. The HDR and the corresponding LDR video frames are decorrelated and then compressed into a single MPEG stream, which can be played on both existing DVD players and HDR-enabled devices.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Efremov, Alexander %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Design and evaluation of backward compatible high dynamic range video compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6811-0 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2006-4-001 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2006 %P 50 p. %X In this report we describe the details of the backward compatible high dynamic range (HDR) video compression algorithm. The algorithm is designed to facilitate a smooth transition from standard low dynamic range (LDR) video to high fidelity high dynamic range content. The HDR and the corresponding LDR video frames are decorrelated and then compressed into a single MPEG stream, which can be played on both existing DVD players and HDR-enabled devices. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2006. Computational Model of Lightness Perception in High Dynamic Range Imaging. Human Vision and Electronic Imaging XI, SPIE.
Abstract
An anchoring theory of lightness perception by Gilchrist et al. [1999] explains <br>many characteristics of human visual system such as lightness constancy and its <br>spectacular failures which are important in the perception of images. The <br>principal concept of this theory is the perception of complex scenes in terms <br>of groups of consistent areas (frameworks). Such areas, following the gestalt <br>theorists, are defined by the regions of common illumination. The key aspect of <br>the image perception is the estimation of lightness within each framework <br>through the anchoring to the luminance perceived as white, followed by the <br>computation of the global lightness. In this paper we provide a computational <br>model for automatic decomposition of HDR images into frameworks. We derive a <br>tone mapping operator which predicts lightness perception of the real world <br>scenes and aims at its accurate reproduction on low dynamic range displays. <br>Furthermore, such a decomposition into frameworks opens new grounds for local <br>image analysis in view of human perception.
Export
BibTeX
@inproceedings{Krawczyk-et-al_ASEI06, TITLE = {Computational Model of Lightness Perception in High Dynamic Range Imaging}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, DOI = {10.1117/12.639266}, LOCALID = {Local-ID: C125675300671F7B-E9AB6DE505E34EABC1257149002AB5F8-Krawczyk2006spie}, PUBLISHER = {SPIE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {An anchoring theory of lightness perception by Gilchrist et al. [1999] explains <br>many characteristics of human visual system such as lightness constancy and its <br>spectacular failures which are important in the perception of images. The <br>principal concept of this theory is the perception of complex scenes in terms <br>of groups of consistent areas (frameworks). Such areas, following the gestalt <br>theorists, are defined by the regions of common illumination. The key aspect of <br>the image perception is the estimation of lightness within each framework <br>through the anchoring to the luminance perceived as white, followed by the <br>computation of the global lightness. In this paper we provide a computational <br>model for automatic decomposition of HDR images into frameworks. We derive a <br>tone mapping operator which predicts lightness perception of the real world <br>scenes and aims at its accurate reproduction on low dynamic range displays. <br>Furthermore, such a decomposition into frameworks opens new grounds for local <br>image analysis in view of human perception.}, BOOKTITLE = {Human Vision and Electronic Imaging XI}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and Daly, Scott J.}, PAGES = {1--12}, SERIES = {SPIE}, VOLUME = {6057}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Computational Model of Lightness Perception in High Dynamic Range Imaging : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2258-5 %F EDOC: 314537 %F OTHER: Local-ID: C125675300671F7B-E9AB6DE505E34EABC1257149002AB5F8-Krawczyk2006spie %R 10.1117/12.639266 %D 2006 %B IS&T/SPIE's 18th Annual Symposium on Electronic Imaging %Z date of event: 2006-01-15 - 2006-01-19 %C San Jose, CA, USA %X An anchoring theory of lightness perception by Gilchrist et al. [1999] explains <br>many characteristics of human visual system such as lightness constancy and its <br>spectacular failures which are important in the perception of images. The <br>principal concept of this theory is the perception of complex scenes in terms <br>of groups of consistent areas (frameworks). Such areas, following the gestalt <br>theorists, are defined by the regions of common illumination. The key aspect of <br>the image perception is the estimation of lightness within each framework <br>through the anchoring to the luminance perceived as white, followed by the <br>computation of the global lightness. In this paper we provide a computational <br>model for automatic decomposition of HDR images into frameworks. We derive a <br>tone mapping operator which predicts lightness perception of the real world <br>scenes and aims at its accurate reproduction on low dynamic range displays. <br>Furthermore, such a decomposition into frameworks opens new grounds for local <br>image analysis in view of human perception. %B Human Vision and Electronic Imaging XI %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %P 1 - 12 %I SPIE %B SPIE %N 6057 %@ false
Mantiuk, R., Efremov, A., Myszkowski, K., and Seidel, H.-P. 2006a. Backward Compatible High Dynamic Range MPEG Video Compression. ACM Transactions on Graphics, ACM.
Abstract
To embrace the imminent transition from traditional low-contrast<br> video (LDR) content to superior high dynamic range (HDR) content, we<br> propose a novel backward compatible HDR video compression (HDR~MPEG)<br> method. We introduce a compact reconstruction function that is used<br> to decompose an HDR video stream into a residual stream and a<br> standard LDR stream, which can be played on existing MPEG decoders,<br> such as DVD players. The reconstruction function is finely tuned to<br> the content of each HDR frame to achieve strong decorrelation<br> between the LDR and residual streams, which minimizes the amount of<br> redundant information. The size of the residual stream is further<br> reduced by removing invisible details prior to compression using our<br> HDR-enabled filter, which models luminance adaptation, contrast<br> sensitivity, and visual masking based on the HDR content. Designed<br> especially for DVD movie distribution, our HDR~MPEG compression<br> method features low storage requirements for HDR content resulting<br> in a 30\% size increase to an LDR video sequence. The proposed<br> compression method does not impose restrictions or modify the<br> appearance of the LDR or HDR video. This is important for backward<br> compatibility of the LDR stream with current DVD appearance, and<br> also enables independent fine tuning, tone mapping, and color<br> grading of both streams.
Export
BibTeX
@inproceedings{Mantiuk-et-al_SIGGRAPH06, TITLE = {Backward Compatible High Dynamic Range {MPEG} Video Compression}, AUTHOR = {Mantiuk, Rafa{\l} and Efremov, Alexander and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1141911.1141946}, LOCALID = {Local-ID: C125675300671F7B-1B2B94EF48903F44C1257149002EEC16-Mantiuk2006:hdrmpeg}, PUBLISHER = {ACM}, PUBLISHER = {Association for Computing Machinery}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {To embrace the imminent transition from traditional low-contrast<br> video (LDR) content to superior high dynamic range (HDR) content, we<br> propose a novel backward compatible HDR video compression (HDR~MPEG)<br> method. We introduce a compact reconstruction function that is used<br> to decompose an HDR video stream into a residual stream and a<br> standard LDR stream, which can be played on existing MPEG decoders,<br> such as DVD players. The reconstruction function is finely tuned to<br> the content of each HDR frame to achieve strong decorrelation<br> between the LDR and residual streams, which minimizes the amount of<br> redundant information. The size of the residual stream is further<br> reduced by removing invisible details prior to compression using our<br> HDR-enabled filter, which models luminance adaptation, contrast<br> sensitivity, and visual masking based on the HDR content. Designed<br> especially for DVD movie distribution, our HDR~MPEG compression<br> method features low storage requirements for HDR content resulting<br> in a 30\% size increase to an LDR video sequence. The proposed<br> compression method does not impose restrictions or modify the<br> appearance of the LDR or HDR video. This is important for backward<br> compatibility of the LDR stream with current DVD appearance, and<br> also enables independent fine tuning, tone mapping, and color<br> grading of both streams.}, BOOKTITLE = {Proceedings of ACM SIGGRAPH 2006}, EDITOR = {Dorsey, Julie}, PAGES = {713--723}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Boston, MA, USA}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafa&#322; %A Efremov, Alexander %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Backward Compatible High Dynamic Range MPEG Video Compression : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-223A-9 %F EDOC: 314605 %F OTHER: Local-ID: C125675300671F7B-1B2B94EF48903F44C1257149002EEC16-Mantiuk2006:hdrmpeg %R 10.1145/1141911.1141946 %D 2006 %B SIGGRAPH 2006: 33rd Annual Conference on Computer Graphics and Interactive Techniques %Z date of event: 2006-07-30 - 2006-08-03 %C Boston, MA, USA %X To embrace the imminent transition from traditional low-contrast<br> video (LDR) content to superior high dynamic range (HDR) content, we<br> propose a novel backward compatible HDR video compression (HDR~MPEG)<br> method. We introduce a compact reconstruction function that is used<br> to decompose an HDR video stream into a residual stream and a<br> standard LDR stream, which can be played on existing MPEG decoders,<br> such as DVD players. The reconstruction function is finely tuned to<br> the content of each HDR frame to achieve strong decorrelation<br> between the LDR and residual streams, which minimizes the amount of<br> redundant information. The size of the residual stream is further<br> reduced by removing invisible details prior to compression using our<br> HDR-enabled filter, which models luminance adaptation, contrast<br> sensitivity, and visual masking based on the HDR content. Designed<br> especially for DVD movie distribution, our HDR~MPEG compression<br> method features low storage requirements for HDR content resulting<br> in a 30\% size increase to an LDR video sequence. The proposed<br> compression method does not impose restrictions or modify the<br> appearance of the LDR or HDR video. This is important for backward<br> compatibility of the LDR stream with current DVD appearance, and<br> also enables independent fine tuning, tone mapping, and color<br> grading of both streams. %B Proceedings of ACM SIGGRAPH 2006 %E Dorsey, Julie %P 713 - 723 %I ACM %J ACM Transactions on Graphics %V 25 %N 3 %I Association for Computing Machinery %@ false
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2006b. Lossy Compression of High Dynamic Range Images and Video. Human Vision and Electronic Imaging XI, SPIE.
Abstract
Most common image and video formats have been designed to work with<br> existing output devices, like LCD or CRT monitors. As display<br> technology makes progress, these formats no longer represent the<br> data that new devices can display. Therefore a shift towards higher<br> precision image and video formats is imminent.<br><br> To overcome limitations of common image and video formats, such as<br> JPEG, PNG or MPEG, we propose a novel color space, which can<br> accommodate an extended dynamic range and guarantees the precision<br> that is below the visibility threshold. The proposed color space,<br> which is derived from contrast detection data, can represent the<br> full range of luminance values and the complete color gamut that is<br> visible to the human eye. We show that only minor changes are<br> required to the existing encoding algorithms to accommodate the new<br> color space and therefore greatly enhance information content of the<br> visual data. We demonstrate this with two compression algorithms for<br> High Dynamic Range (HDR) visual data: for static images and for<br> video. We argue that the proposed HDR representation is a simple and<br> universal way to encode visual data independent of the display or<br> capture technology.
Export
BibTeX
@inproceedings{Mantiuk-et-al_SPIE06, TITLE = {Lossy Compression of High Dynamic Range Images and Video}, AUTHOR = {Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, DOI = {10.1117/12.639140}, LOCALID = {Local-ID: C125675300671F7B-313F8F727ABF44C0C125713800369E82-Mantiuk2005:LossyCompression}, PUBLISHER = {SPIE}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Most common image and video formats have been designed to work with<br> existing output devices, like LCD or CRT monitors. As display<br> technology makes progress, these formats no longer represent the<br> data that new devices can display. Therefore a shift towards higher<br> precision image and video formats is imminent.<br><br> To overcome limitations of common image and video formats, such as<br> JPEG, PNG or MPEG, we propose a novel color space, which can<br> accommodate an extended dynamic range and guarantees the precision<br> that is below the visibility threshold. The proposed color space,<br> which is derived from contrast detection data, can represent the<br> full range of luminance values and the complete color gamut that is<br> visible to the human eye. We show that only minor changes are<br> required to the existing encoding algorithms to accommodate the new<br> color space and therefore greatly enhance information content of the<br> visual data. We demonstrate this with two compression algorithms for<br> High Dynamic Range (HDR) visual data: for static images and for<br> video. We argue that the proposed HDR representation is a simple and<br> universal way to encode visual data independent of the display or<br> capture technology.}, BOOKTITLE = {Human Vision and Electronic Imaging XI}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and Daly, Scott J.}, SERIES = {SPIE}, VOLUME = {6057}, ADDRESS = {San Jose, USA}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lossy Compression of High Dynamic Range Images and Video : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-235C-8 %F EDOC: 314546 %F OTHER: Local-ID: C125675300671F7B-313F8F727ABF44C0C125713800369E82-Mantiuk2005:LossyCompression %R 10.1117/12.639140 %D 2006 %B Electronic Imaging 2006 %Z date of event: 2006-01-15 - 2006-01-19 %C San Jose, USA %X Most common image and video formats have been designed to work with<br> existing output devices, like LCD or CRT monitors. As display<br> technology makes progress, these formats no longer represent the<br> data that new devices can display. Therefore a shift towards higher<br> precision image and video formats is imminent.<br><br> To overcome limitations of common image and video formats, such as<br> JPEG, PNG or MPEG, we propose a novel color space, which can<br> accommodate an extended dynamic range and guarantees the precision<br> that is below the visibility threshold. The proposed color space,<br> which is derived from contrast detection data, can represent the<br> full range of luminance values and the complete color gamut that is<br> visible to the human eye. We show that only minor changes are<br> required to the existing encoding algorithms to accommodate the new<br> color space and therefore greatly enhance information content of the<br> visual data. We demonstrate this with two compression algorithms for<br> High Dynamic Range (HDR) visual data: for static images and for<br> video. We argue that the proposed HDR representation is a simple and<br> universal way to encode visual data independent of the display or<br> capture technology. %B Human Vision and Electronic Imaging XI %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %I SPIE %B SPIE %N 6057
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2006c. A Perceptual Framework for Contrast Processing of High Dynamic Range Images. ACM Transactions on Applied Perception3, 3.
Abstract
Image processing often involves an image transformation into a<br> domain that is better correlated with visual perception, such as the<br> wavelet domain, image pyramids, multi-scale contrast<br> representations, contrast in retinex algorithms, and chroma,<br> lightness and colorfulness predictors in color appearance models.<br> Many of these transformations are not ideally suited for image<br> processing that significantly modifies an image. For example, the<br> modification of a single band in a multi-scale model leads to an<br> unrealistic image with severe halo artifacts. Inspired by gradient<br> domain methods we derive a framework that imposes constraints on the<br> entire set of contrasts in an image for a full range of spatial<br> frequencies. This way, even severe image modifications do not<br> reverse the polarity of contrast. The strengths of the framework are<br> demonstrated by aggressive contrast enhancement and a visually<br> appealing tone mapping which does not introduce artifacts.<br> Additionally, we perceptually linearize contrast magnitudes using a<br> custom transducer function. The transducer function has been derived<br> especially for the purpose of HDR images, based on the contrast<br> discrimination measurements for high contrast stimuli.
Export
BibTeX
@article{Mantiuk-et-al_TAP06, TITLE = {A Perceptual Framework for Contrast Processing of High Dynamic Range Images}, AUTHOR = {Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {1544-3558}, DOI = {10.1145/1166087.1166095}, LOCALID = {Local-ID: C125675300671F7B-43FC98F7A2FC192EC1257149002E3B9A-Mantiuk2006:ContrastDomain}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {Image processing often involves an image transformation into a<br> domain that is better correlated with visual perception, such as the<br> wavelet domain, image pyramids, multi-scale contrast<br> representations, contrast in retinex algorithms, and chroma,<br> lightness and colorfulness predictors in color appearance models.<br> Many of these transformations are not ideally suited for image<br> processing that significantly modifies an image. For example, the<br> modification of a single band in a multi-scale model leads to an<br> unrealistic image with severe halo artifacts. Inspired by gradient<br> domain methods we derive a framework that imposes constraints on the<br> entire set of contrasts in an image for a full range of spatial<br> frequencies. This way, even severe image modifications do not<br> reverse the polarity of contrast. The strengths of the framework are<br> demonstrated by aggressive contrast enhancement and a visually<br> appealing tone mapping which does not introduce artifacts.<br> Additionally, we perceptually linearize contrast magnitudes using a<br> custom transducer function. The transducer function has been derived<br> especially for the purpose of HDR images, based on the contrast<br> discrimination measurements for high contrast stimuli.}, JOURNAL = {ACM Transactions on Applied Perception}, VOLUME = {3}, NUMBER = {3}, PAGES = {286--308}, }
Endnote
%0 Journal Article %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Framework for Contrast Processing of High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2214-E %F EDOC: 314382 %F OTHER: Local-ID: C125675300671F7B-43FC98F7A2FC192EC1257149002E3B9A-Mantiuk2006:ContrastDomain %R 10.1145/1166087.1166095 %D 2006 %* Review method: peer-reviewed %X Image processing often involves an image transformation into a<br> domain that is better correlated with visual perception, such as the<br> wavelet domain, image pyramids, multi-scale contrast<br> representations, contrast in retinex algorithms, and chroma,<br> lightness and colorfulness predictors in color appearance models.<br> Many of these transformations are not ideally suited for image<br> processing that significantly modifies an image. For example, the<br> modification of a single band in a multi-scale model leads to an<br> unrealistic image with severe halo artifacts. Inspired by gradient<br> domain methods we derive a framework that imposes constraints on the<br> entire set of contrasts in an image for a full range of spatial<br> frequencies. This way, even severe image modifications do not<br> reverse the polarity of contrast. The strengths of the framework are<br> demonstrated by aggressive contrast enhancement and a visually<br> appealing tone mapping which does not introduce artifacts.<br> Additionally, we perceptually linearize contrast magnitudes using a<br> custom transducer function. The transducer function has been derived<br> especially for the purpose of HDR images, based on the contrast<br> discrimination measurements for high contrast stimuli. %J ACM Transactions on Applied Perception %V 3 %N 3 %& 286 %P 286 - 308 %I Association for Computing Machinery %C New York, NY %@ false
Smith, K., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2006. Beyond Tone Mapping: Enhanced Depiction of Tone Mapped HDR Images. Computer Graphics Forum, Blackwell.
Abstract
High Dynamic Range (HDR) images capture the full range of luminance<br> present in real world scenes, and unlike Low Dynamic Range (LDR)<br> images, can simultaneously contain detailed information in the<br> deepest of shadows and the brightest of light sources. For display<br> or aesthetic purposes, it is often necessary to perform tone<br> mapping, which creates LDR depictions of HDR images at the cost of<br> contrast information loss. The purpose of this work is two-fold: to<br> analyze a displayed LDR image against its original HDR counterpart<br> in terms of perceived contrast distortion, and to enhance the LDR<br> depiction with perceptually driven colour adjustments to restore the<br> original HDR contrast information. For analysis, we present a novel<br> algorithm for the characterization of tone mapping distortion in terms<br> of observed loss of global contrast, and loss of contour and texture<br> details. We classify existing tone mapping operators accordingly.<br> We measure both distortions with perceptual metrics that enable the<br> automatic and meaningful enhancement of LDR depictions. For image<br> enhancement, we identify artistic and photographic colour techniques<br> from which we derive adjustments that create contrast with colour.<br> The enhanced LDR image is an improved depiction of the original HDR<br> image with restored contrast<br> information.
Export
BibTeX
@inproceedings{Smith-et-al_EG06, TITLE = {Beyond Tone Mapping: Enhanced Depiction of Tone Mapped {HDR} Images}, AUTHOR = {Smith, Kaleigh and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, ISBN = {ISSN: 0167-7055}, DOI = {10.1111/j.1467-8659.2006.00962.x}, LOCALID = {Local-ID: C125675300671F7B-8B783A77FDD3AB10C125722F003AF5B2-Smith2006eg}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {High Dynamic Range (HDR) images capture the full range of luminance<br> present in real world scenes, and unlike Low Dynamic Range (LDR)<br> images, can simultaneously contain detailed information in the<br> deepest of shadows and the brightest of light sources. For display<br> or aesthetic purposes, it is often necessary to perform tone<br> mapping, which creates LDR depictions of HDR images at the cost of<br> contrast information loss. The purpose of this work is two-fold: to<br> analyze a displayed LDR image against its original HDR counterpart<br> in terms of perceived contrast distortion, and to enhance the LDR<br> depiction with perceptually driven colour adjustments to restore the<br> original HDR contrast information. For analysis, we present a novel<br> algorithm for the characterization of tone mapping distortion in terms<br> of observed loss of global contrast, and loss of contour and texture<br> details. We classify existing tone mapping operators accordingly.<br> We measure both distortions with perceptual metrics that enable the<br> automatic and meaningful enhancement of LDR depictions. For image<br> enhancement, we identify artistic and photographic colour techniques<br> from which we derive adjustments that create contrast with colour.<br> The enhanced LDR image is an improved depiction of the original HDR<br> image with restored contrast<br> information.}, BOOKTITLE = {EUROGRAPHICS 2006 Proceedings}, EDITOR = {Szirmay-Kalos, L{\'a}szl{\'o} and Gr{\"o}ller, Eduard}, PAGES = {427--438}, JOURNAL = {Computer Graphics Forum}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Smith, Kaleigh %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Beyond Tone Mapping: Enhanced Depiction of Tone Mapped HDR Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-223F-0 %F EDOC: 314503 %F OTHER: Local-ID: C125675300671F7B-8B783A77FDD3AB10C125722F003AF5B2-Smith2006eg %R 10.1111/j.1467-8659.2006.00962.x %D 2006 %B The European Association for Computer Graphics 27th Annual Conference %Z date of event: 2006-09-04 - 2006-09-08 %C Vienna, Austria %X High Dynamic Range (HDR) images capture the full range of luminance<br> present in real world scenes, and unlike Low Dynamic Range (LDR)<br> images, can simultaneously contain detailed information in the<br> deepest of shadows and the brightest of light sources. For display<br> or aesthetic purposes, it is often necessary to perform tone<br> mapping, which creates LDR depictions of HDR images at the cost of<br> contrast information loss. The purpose of this work is two-fold: to<br> analyze a displayed LDR image against its original HDR counterpart<br> in terms of perceived contrast distortion, and to enhance the LDR<br> depiction with perceptually driven colour adjustments to restore the<br> original HDR contrast information. For analysis, we present a novel<br> algorithm for the characterization of tone mapping distortion in terms<br> of observed loss of global contrast, and loss of contour and texture<br> details. We classify existing tone mapping operators accordingly.<br> We measure both distortions with perceptual metrics that enable the<br> automatic and meaningful enhancement of LDR depictions. For image<br> enhancement, we identify artistic and photographic colour techniques<br> from which we derive adjustments that create contrast with colour.<br> The enhanced LDR image is an improved depiction of the original HDR<br> image with restored contrast<br> information. %B EUROGRAPHICS 2006 Proceedings %E Szirmay-Kalos, L&#225;szl&#243;; Gr&#246;ller, Eduard %P 427 - 438 %I Blackwell %@ ISSN: 0167-7055 %J Computer Graphics Forum %V 25 %N 3 %I Blackwell-Wiley %@ false
Yoshida, A., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2006. Analysis of Reproducing Real-world Appearance on Displays of Varying Dynamic Range. Computer Graphics Forum, Blackwell.
Abstract
We conduct a series of experiments to investigate the desired properties of a <br>tone mapping operator (TMO) and to design such an operator based on subjective <br>data. We propose a novel approach to the tone mapping problem, in which the <br>tone mapping is determined by the data from subjective experiments, rather than <br>an image processing algorithm or a visual model. To collect such data, a series <br>of experiments are conducted in which the subjects adjust three generic TMO <br>parameters: brightness, contrast and color saturation. In two experiments, the <br>subjects are to find a) the most preferred image without a reference image and <br>b) the closest image to the real-world scene which the subjects are confronted <br>with. The purpose of these experiments is to collect data for two rendering <br>goals of a TMO: rendering the most preferred image and preserving the fidelity <br>with the real world scene. The data provide an assessment for the most <br>intuitive control over the tone mapping parameters. Unlike most of the <br>researched TMOs that focus on rendering for standard low dynamic range <br>monitors, we consider a broad range of potential displays, each offering <br>different dynamic range and brightness. We simulate capabilities of such <br>displays on a high dynamic range (HDR) monitor. This lets us address the <br>question of whether tone mapping is needed for HDR displays.
Export
BibTeX
@inproceedings{Yoshida-et-al_EG06, TITLE = {Analysis of Reproducing Real-world Appearance on Displays of Varying Dynamic Range}, AUTHOR = {Yoshida, Akiko and Mantiuk, Rafa{\l} and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2006.00961.x}, LOCALID = {Local-ID: C12573CC004A8E26-36B5343ECEA5A706C125730D00546611-Yoshida_EG2006z}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2006}, DATE = {2006}, ABSTRACT = {We conduct a series of experiments to investigate the desired properties of a <br>tone mapping operator (TMO) and to design such an operator based on subjective <br>data. We propose a novel approach to the tone mapping problem, in which the <br>tone mapping is determined by the data from subjective experiments, rather than <br>an image processing algorithm or a visual model. To collect such data, a series <br>of experiments are conducted in which the subjects adjust three generic TMO <br>parameters: brightness, contrast and color saturation. In two experiments, the <br>subjects are to find a) the most preferred image without a reference image and <br>b) the closest image to the real-world scene which the subjects are confronted <br>with. The purpose of these experiments is to collect data for two rendering <br>goals of a TMO: rendering the most preferred image and preserving the fidelity <br>with the real world scene. The data provide an assessment for the most <br>intuitive control over the tone mapping parameters. Unlike most of the <br>researched TMOs that focus on rendering for standard low dynamic range <br>monitors, we consider a broad range of potential displays, each offering <br>different dynamic range and brightness. We simulate capabilities of such <br>displays on a high dynamic range (HDR) monitor. This lets us address the <br>question of whether tone mapping is needed for HDR displays.}, BOOKTITLE = {Eurographics 2006 Proceedings}, EDITOR = {Gr{\"o}ller, Eduard and Szirmay-Kalos, L{\'a}szl{\'o}}, PAGES = {415--426}, JOURNAL = {Computer Graphics Forum}, VOLUME = {25}, ISSUE = {3}, ADDRESS = {Vienna, Austria}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Mantiuk, Rafa&#322; %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Analysis of Reproducing Real-world Appearance on Displays of Varying Dynamic Range : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2481-A %F EDOC: 356548 %F OTHER: Local-ID: C12573CC004A8E26-36B5343ECEA5A706C125730D00546611-Yoshida_EG2006z %R 10.1111/j.1467-8659.2006.00961.x %D 2006 %B Eurographics 2006 %Z date of event: 2006-09-04 - 2006-09-08 %C Vienna, Austria %X We conduct a series of experiments to investigate the desired properties of a <br>tone mapping operator (TMO) and to design such an operator based on subjective <br>data. We propose a novel approach to the tone mapping problem, in which the <br>tone mapping is determined by the data from subjective experiments, rather than <br>an image processing algorithm or a visual model. To collect such data, a series <br>of experiments are conducted in which the subjects adjust three generic TMO <br>parameters: brightness, contrast and color saturation. In two experiments, the <br>subjects are to find a) the most preferred image without a reference image and <br>b) the closest image to the real-world scene which the subjects are confronted <br>with. The purpose of these experiments is to collect data for two rendering <br>goals of a TMO: rendering the most preferred image and preserving the fidelity <br>with the real world scene. The data provide an assessment for the most <br>intuitive control over the tone mapping parameters. Unlike most of the <br>researched TMOs that focus on rendering for standard low dynamic range <br>monitors, we consider a broad range of potential displays, each offering <br>different dynamic range and brightness. We simulate capabilities of such <br>displays on a high dynamic range (HDR) monitor. This lets us address the <br>question of whether tone mapping is needed for HDR displays. %B Eurographics 2006 Proceedings %E Gr&#246;ller, Eduard; Szirmay-Kalos, L&#225;szl&#243; %P 415 - 426 %I Blackwell %J Computer Graphics Forum %V 25 %N 3 %I Blackwell-Wiley %@ false
2005
Havran, V., Smyk, M., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2005a. Importance Sampling for Video Environment Maps. SIGGRAPH ’05: ACM SIGGRAPH 2005 Sketches, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/HavranSKMS05, TITLE = {Importance Sampling for Video Environment Maps}, AUTHOR = {Havran, Vlastimil and Smyk, Miloslaw and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7827-7}, DOI = {10.1145/1187112.1187243}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches}, EDITOR = {Buhler, Juan}, PAGES = {109}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Smyk, Miloslaw %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Importance Sampling for Video Environment Maps : %G eng %U http://hdl.handle.net/21.11116/0000-000F-3CD6-D %R 10.1145/1187112.1187243 %D 2005 %B International Conference on Computer Graphics and Interactive Techniques %Z date of event: 2005-07-31 - 2005-08-04 %C Los Angeles, CA, USA %B SIGGRAPH '05: ACM SIGGRAPH 2005 Sketches %E Buhler, Juan %P 109 %I ACM %@ 978-1-4503-7827-7
Havran, V., Smyk, M., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2005b. Interactive System for Dynamic Scene Lighting using Captured Video Environment Maps. Rendering Techniques 2005: Eurographics Symposium on Rendering (EGSR 2005), Eurographics Association.
Abstract
We present an interactive system for fully dynamic scene lighting using <br>captured high dynamic range (HDR) video environment maps. The key component of <br>our system is an algorithm for efficient decomposition of HDR video environment <br>map captured over hemisphere into a set of representative directional light <br>sources, which can be used for the direct lighting computation with shadows <br>using graphics hardware. The resulting lights exhibit good temporal coherence <br>and their number can be adaptively changed to keep a constant framerate while <br>good spatial distribution (stratification) properties are maintained. We can <br>handle a large number of light sources with shadows using a novel technique <br>which reduces the cost of BRDF-based shading and visibility computations. We <br>demonstrate the use of our system in a mixed reality application in which real <br>and synthetic objects are illuminated by consistent lighting at interactive <br>framerates.
Export
BibTeX
@inproceedings{Havran-et-al_EGSR05.2, TITLE = {Interactive System for Dynamic Scene Lighting using Captured Video Environment Maps}, AUTHOR = {Havran, Vlastimil and Smyk, Miloslaw and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {3-905673-23-1}, DOI = {10.2312/EGWR/EGSR05/031-042}, LOCALID = {Local-ID: C125675300671F7B-C3468DABE0F8D837C12570B30047ED74-Havran2005egsrEM}, PUBLISHER = {Eurographics Association}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {We present an interactive system for fully dynamic scene lighting using <br>captured high dynamic range (HDR) video environment maps. The key component of <br>our system is an algorithm for efficient decomposition of HDR video environment <br>map captured over hemisphere into a set of representative directional light <br>sources, which can be used for the direct lighting computation with shadows <br>using graphics hardware. The resulting lights exhibit good temporal coherence <br>and their number can be adaptively changed to keep a constant framerate while <br>good spatial distribution (stratification) properties are maintained. We can <br>handle a large number of light sources with shadows using a novel technique <br>which reduces the cost of BRDF-based shading and visibility computations. We <br>demonstrate the use of our system in a mixed reality application in which real <br>and synthetic objects are illuminated by consistent lighting at interactive <br>framerates.}, BOOKTITLE = {Rendering Techniques 2005: Eurographics Symposium on Rendering (EGSR 2005)}, EDITOR = {Deussen, Oliver and Keller, Alexander and Bala, Kavita and Dutr{\'e}, Philip and Fellner, Dieter W. and Spencer, Stephen N.}, PAGES = {31--42, 311}, ADDRESS = {Konstanz, Germany}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Smyk, Miloslaw %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive System for Dynamic Scene Lighting using Captured Video Environment Maps : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26DB-0 %F EDOC: 279016 %F OTHER: Local-ID: C125675300671F7B-C3468DABE0F8D837C12570B30047ED74-Havran2005egsrEM %R 10.2312/EGWR/EGSR05/031-042 %D 2005 %B 16th Eurographics Symposium on Rendering %Z date of event: 2005-06-29 - 2005-07-01 %C Konstanz, Germany %X We present an interactive system for fully dynamic scene lighting using <br>captured high dynamic range (HDR) video environment maps. The key component of <br>our system is an algorithm for efficient decomposition of HDR video environment <br>map captured over hemisphere into a set of representative directional light <br>sources, which can be used for the direct lighting computation with shadows <br>using graphics hardware. The resulting lights exhibit good temporal coherence <br>and their number can be adaptively changed to keep a constant framerate while <br>good spatial distribution (stratification) properties are maintained. We can <br>handle a large number of light sources with shadows using a novel technique <br>which reduces the cost of BRDF-based shading and visibility computations. We <br>demonstrate the use of our system in a mixed reality application in which real <br>and synthetic objects are illuminated by consistent lighting at interactive <br>framerates. %B Rendering Techniques 2005: Eurographics Symposium on Rendering %E Deussen, Oliver; Keller, Alexander; Bala, Kavita; Dutr&#233;, Philip; Fellner, Dieter W.; Spencer, Stephen N. %P 31 - 42, 311 %I Eurographics Association %@ 3-905673-23-1
Jiménez, J.-R., Myszkowski, K., and Pueyo, X. 2005. Interactive Global Illumination in Dynamic Participating Media Using Selective Photon Tracing. SCCG ’05: Proceedings of the 21st spring conference on Computer graphics, ACM.
Abstract
Tremendous progress in the development and accessibility of high dynamic range (HDR) technology that has happened just recently results in fast proliferation of HDR synthetic image sequences and captured HDR video. When properly processed, such HDR data can lead to very convincing and realistic results even when presented on traditional low dynamic range (LDR) display devices. This requires real-time local contrast compression (tone mapping) with simultaneous modeling of important in HDR image perception effects such as visual acuity, glare, day and night vision. We propose a unified model to include all those effects into a common computational framework, which enables an efficient implementation on currently available graphics hardware. We develop a post processing module which can be added as the final stage of any real-time rendering system, game engine, or digital video player, which enhances the realism and believability of displayed image streams.
Export
BibTeX
@inproceedings{Jimenez05, TITLE = {Interactive Global Illumination in Dynamic Participating Media Using Selective Photon Tracing}, AUTHOR = {Jim{\'e}nez, Juan-Roberto and Myszkowski, Karol and Pueyo, Xavier}, LANGUAGE = {eng}, ISBN = {1-59593-203-6}, LOCALID = {Local-ID: C125675300671F7B-F70D9F523B0C4008C1256FE9004C51A5-Jimenez05}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Tremendous progress in the development and accessibility of high dynamic range (HDR) technology that has happened just recently results in fast proliferation of HDR synthetic image sequences and captured HDR video. When properly processed, such HDR data can lead to very convincing and realistic results even when presented on traditional low dynamic range (LDR) display devices. This requires real-time local contrast compression (tone mapping) with simultaneous modeling of important in HDR image perception effects such as visual acuity, glare, day and night vision. We propose a unified model to include all those effects into a common computational framework, which enables an efficient implementation on currently available graphics hardware. We develop a post processing module which can be added as the final stage of any real-time rendering system, game engine, or digital video player, which enhances the realism and believability of displayed image streams.}, BOOKTITLE = {SCCG '05: Proceedings of the 21st spring conference on Computer graphics}, PAGES = {211--218}, }
Endnote
%0 Conference Proceedings %A Jim&#233;nez, Juan-Roberto %A Myszkowski, Karol %A Pueyo, Xavier %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Global Illumination in Dynamic Participating Media Using Selective Photon Tracing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26D9-3 %F EDOC: 279010 %F OTHER: Local-ID: C125675300671F7B-F70D9F523B0C4008C1256FE9004C51A5-Jimenez05 %I ACM %D 2005 %B Untitled Event %Z date of event: 2005-05-12 - %C Budmerice, Slovakia %X Tremendous progress in the development and accessibility of high dynamic range (HDR) technology that has happened just recently results in fast proliferation of HDR synthetic image sequences and captured HDR video. When properly processed, such HDR data can lead to very convincing and realistic results even when presented on traditional low dynamic range (LDR) display devices. This requires real-time local contrast compression (tone mapping) with simultaneous modeling of important in HDR image perception effects such as visual acuity, glare, day and night vision. We propose a unified model to include all those effects into a common computational framework, which enables an efficient implementation on currently available graphics hardware. We develop a post processing module which can be added as the final stage of any real-time rendering system, game engine, or digital video player, which enhances the realism and believability of displayed image streams. %B SCCG '05: Proceedings of the 21st spring conference on Computer graphics %P 211 - 218 %I ACM %@ 1-59593-203-6
Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2005a. Lightness Perception in Tone Reproduction for High Dynamic Range Images. Computer Graphics Forum, Blackwell.
Export
BibTeX
@inproceedings{Krawczyk-et-al_EUROGRAPHICS_05, TITLE = {Lightness Perception in Tone Reproduction for High Dynamic Range Images}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/j.1467-8659.2005.00888.x}, LOCALID = {Local-ID: C125675300671F7B-D7B5D281DAAB9EB0C1256FE90049E357-Krawczyk05EG}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {The European Association for Computer Graphics 26th Annual Conference : EUROGRAPHICS 2005}, EDITOR = {Alexa, Marc and Marks, Joe}, PAGES = {635--645}, JOURNAL = {Computer Graphics Forum}, VOLUME = {24}, ISSUE = {3}, ADDRESS = {Dublin, Ireland}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lightness Perception in Tone Reproduction for High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-26F6-3 %F EDOC: 279009 %F OTHER: Local-ID: C125675300671F7B-D7B5D281DAAB9EB0C1256FE90049E357-Krawczyk05EG %R 10.1111/j.1467-8659.2005.00888.x %D 2005 %B The European Association for Computer Graphics 26th Annual Conference %Z date of event: 2005-08-29 - %C Dublin, Ireland %B The European Association for Computer Graphics 26th Annual Conference : EUROGRAPHICS 2005 %E Alexa, Marc; Marks, Joe %P 635 - 645 %I Blackwell %J Computer Graphics Forum %V 24 %N 3 %I Blackwell-Wiley %@ false
Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2005b. Perceptual Effects in Real-time Tone Mapping. SCCG ’05: Proceedings of the 21st spring conference on Computer graphics, ACM.
Abstract
Tremendous progress in the development and accessibility of high dynamic range <br>(HDR) technology that has happened just recently results in fast proliferation <br>of HDR synthetic image sequences and captured HDR video. When properly <br>processed, such HDR data can lead to very convincing and realistic results even <br>when presented on traditional low dynamic range (LDR) display devices. This <br>requires real-time local contrast compression (tone mapping) with simultaneous <br>modeling of important in HDR image perception effects such as visual acuity, <br>glare, day and night vision. We propose a unified model to include all those <br>effects into a common computational framework, which enables an efficient <br>implementation on currently available graphics hardware. We develop a post <br>processing module which can be added as the final stage of any real-time <br>rendering system, game engine, or digital video player, which enhances the <br>realism and believability of displayed image streams.
Export
BibTeX
@inproceedings{Krawczyk-et-al_SCCG05, TITLE = {Perceptual Effects in Real-time Tone Mapping}, AUTHOR = {Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-204-4}, DOI = {10.1145/1090122.1090154}, LOCALID = {Local-ID: C125675300671F7B-A48310C4FDBE1EA6C1256FE9004D4776-Krawczyk2005sccg}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {Tremendous progress in the development and accessibility of high dynamic range <br>(HDR) technology that has happened just recently results in fast proliferation <br>of HDR synthetic image sequences and captured HDR video. When properly <br>processed, such HDR data can lead to very convincing and realistic results even <br>when presented on traditional low dynamic range (LDR) display devices. This <br>requires real-time local contrast compression (tone mapping) with simultaneous <br>modeling of important in HDR image perception effects such as visual acuity, <br>glare, day and night vision. We propose a unified model to include all those <br>effects into a common computational framework, which enables an efficient <br>implementation on currently available graphics hardware. We develop a post <br>processing module which can be added as the final stage of any real-time <br>rendering system, game engine, or digital video player, which enhances the <br>realism and believability of displayed image streams.}, BOOKTITLE = {SCCG '05: Proceedings of the 21st spring conference on Computer graphics}, PAGES = {195--202}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Effects in Real-time Tone Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2757-0 %F EDOC: 279038 %F OTHER: Local-ID: C125675300671F7B-A48310C4FDBE1EA6C1256FE9004D4776-Krawczyk2005sccg %R 10.1145/1090122.1090154 %D 2005 %B 21st Spring Conference on Computer Graphics %Z date of event: 2005-05-12 - 2005-05-14 %C Budmerice, Slovakia %X Tremendous progress in the development and accessibility of high dynamic range <br>(HDR) technology that has happened just recently results in fast proliferation <br>of HDR synthetic image sequences and captured HDR video. When properly <br>processed, such HDR data can lead to very convincing and realistic results even <br>when presented on traditional low dynamic range (LDR) display devices. This <br>requires real-time local contrast compression (tone mapping) with simultaneous <br>modeling of important in HDR image perception effects such as visual acuity, <br>glare, day and night vision. We propose a unified model to include all those <br>effects into a common computational framework, which enables an efficient <br>implementation on currently available graphics hardware. We develop a post <br>processing module which can be added as the final stage of any real-time <br>rendering system, game engine, or digital video player, which enhances the <br>realism and believability of displayed image streams. %B SCCG '05: Proceedings of the 21st spring conference on Computer graphics %P 195 - 202 %I ACM %@ 978-1-59593-204-4
Mantiuk, R., Daly, S., Myszkowski, K., and Seidel, H.-P. 2005a. Predicting Visible Differences in High Dynamic Range Images - Model and its Calibration. Human Vision and Electronic Imaging X, IS&T/SPIE’s 17th Annual Symposium on Electronic Imaging (2005), SPIE.
Abstract
New imaging and rendering systems commonly use physically accurate<br> lighting information in the form of high-dynamic range (HDR) images<br> and video. HDR images contain actual colorimetric or physical<br> values, which can span 14 orders of magnitude, instead of 8-bit<br> renderings, found in standard images. The additional precision and<br> quality retained in HDR visual data is necessary to display images<br> on advanced HDR display devices, capable of showing contrast of<br> 50,000:1, as compared to the contrast of 700:1 for LCD displays.<br> With the development of high-dynamic range visual techniques comes a<br> need for an automatic visual quality assessment of the resulting<br> images.<br><br> In this paper we propose several modifications to the Visual<br> Difference Predicator (VDP). The modifications improve the<br> prediction of perceivable differences in the full visible range of<br> luminance and under the adaptation conditions corresponding to real<br> scene observation. The proposed metric takes into account the<br> aspects of high contrast vision, like scattering of the light in the<br> optics (OTF), nonlinear response to light for the full range of<br> luminance, and local adaptation. To calibrate our HDR~VDP we perform<br> experiments using an advanced HDR display, capable of displaying the<br> range of luminance that is close to that found in real scenes.
Export
BibTeX
@inproceedings{Mantiuk-et-al_SPIE05, TITLE = {Predicting Visible Differences in High Dynamic Range Images -- Model and its Calibration}, AUTHOR = {Mantiuk, Rafal and Daly, Scott and Myszkowski, Karol and Seidel, Hans-Peter}, EDITOR = {Daly, Scott J.}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {978-0-8194-5639-7}, DOI = {10.1117/12.586757}, LOCALID = {Local-ID: C125675300671F7B-7A33923425AEBF68C1256F800037FB11-Mantiuk2005}, PUBLISHER = {SPIE}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {New imaging and rendering systems commonly use physically accurate<br> lighting information in the form of high-dynamic range (HDR) images<br> and video. HDR images contain actual colorimetric or physical<br> values, which can span 14 orders of magnitude, instead of 8-bit<br> renderings, found in standard images. The additional precision and<br> quality retained in HDR visual data is necessary to display images<br> on advanced HDR display devices, capable of showing contrast of<br> 50,000:1, as compared to the contrast of 700:1 for LCD displays.<br> With the development of high-dynamic range visual techniques comes a<br> need for an automatic visual quality assessment of the resulting<br> images.<br><br> In this paper we propose several modifications to the Visual<br> Difference Predicator (VDP). The modifications improve the<br> prediction of perceivable differences in the full visible range of<br> luminance and under the adaptation conditions corresponding to real<br> scene observation. The proposed metric takes into account the<br> aspects of high contrast vision, like scattering of the light in the<br> optics (OTF), nonlinear response to light for the full range of<br> luminance, and local adaptation. To calibrate our HDR~VDP we perform<br> experiments using an advanced HDR display, capable of displaying the<br> range of luminance that is close to that found in real scenes.}, BOOKTITLE = {Human Vision and Electronic Imaging X, IS\&T/SPIE's 17th Annual Symposium on Electronic Imaging (2005)}, DEBUG = {editor: Rogowitz, Bernice E.; editor: Pappas, Thrasyvoulos N.; editor: Daly, Scott J.}, PAGES = {204--214}, SERIES = {SPIE Proceedings Series}, VOLUME = {5666}, ADDRESS = {San Jose, California USA}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafal %A Daly, Scott %A Myszkowski, Karol %A Seidel, Hans-Peter %E Daly, Scott J. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Predicting Visible Differences in High Dynamic Range Images - Model and its Calibration : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2773-0 %F EDOC: 278999 %F OTHER: Local-ID: C125675300671F7B-7A33923425AEBF68C1256F800037FB11-Mantiuk2005 %R 10.1117/12.586757 %D 2005 %B IS&T/SPIE's 17th Annual Symposium on Electronic Imaging %Z date of event: 2005-01-17 - %C San Jose, California USA %X New imaging and rendering systems commonly use physically accurate<br> lighting information in the form of high-dynamic range (HDR) images<br> and video. HDR images contain actual colorimetric or physical<br> values, which can span 14 orders of magnitude, instead of 8-bit<br> renderings, found in standard images. The additional precision and<br> quality retained in HDR visual data is necessary to display images<br> on advanced HDR display devices, capable of showing contrast of<br> 50,000:1, as compared to the contrast of 700:1 for LCD displays.<br> With the development of high-dynamic range visual techniques comes a<br> need for an automatic visual quality assessment of the resulting<br> images.<br><br> In this paper we propose several modifications to the Visual<br> Difference Predicator (VDP). The modifications improve the<br> prediction of perceivable differences in the full visible range of<br> luminance and under the adaptation conditions corresponding to real<br> scene observation. The proposed metric takes into account the<br> aspects of high contrast vision, like scattering of the light in the<br> optics (OTF), nonlinear response to light for the full range of<br> luminance, and local adaptation. To calibrate our HDR~VDP we perform<br> experiments using an advanced HDR display, capable of displaying the<br> range of luminance that is close to that found in real scenes. %B Human Vision and Electronic Imaging X, IS&T/SPIE's 17th Annual Symposium on Electronic Imaging (2005) %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %P 204 - 214 %I SPIE %@ 978-0-8194-5639-7 %B SPIE Proceedings Series %N 5666 %@ false
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2005b. A Perceptual Framework for Contrast Processing of High Dynamic Range Images. APGV ’05: Proceedings of the 2nd Symposium on Applied Perception in Graphics and Visualization, ACM.
Abstract
In this work we propose a framework for image processing in a visual<br> response space, in which contrast values directly correlate with<br> their visibility in an image. Our framework involves a<br> transformation of an image from luminance space to a pyramid of<br> low-pass contrast images and then to the visual response space.<br> After modifying response values, the transformation can be reversed<br> to produce the resulting image. To predict the visibility of<br> suprathreshold contrast, we derive a transducer function for the<br> full range of contrast levels that can be found in High Dynamic<br> Range images. We show that a complex contrast compression operation,<br> which preserves textures of small contrast, is reduced to a linear<br> scaling in the proposed visual response space.
Export
BibTeX
@inproceedings{Mantiuk-et-al_APGV05, TITLE = {A Perceptual Framework for Contrast Processing of High Dynamic Range Images}, AUTHOR = {Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-59593-139-9}, DOI = {10.1145/1080402.1080418}, LOCALID = {Local-ID: C125675300671F7B-C07FBDA152C52871C12570700034B914-mantiuk2004::contrast}, PUBLISHER = {ACM}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {In this work we propose a framework for image processing in a visual<br> response space, in which contrast values directly correlate with<br> their visibility in an image. Our framework involves a<br> transformation of an image from luminance space to a pyramid of<br> low-pass contrast images and then to the visual response space.<br> After modifying response values, the transformation can be reversed<br> to produce the resulting image. To predict the visibility of<br> suprathreshold contrast, we derive a transducer function for the<br> full range of contrast levels that can be found in High Dynamic<br> Range images. We show that a complex contrast compression operation,<br> which preserves textures of small contrast, is reduced to a linear<br> scaling in the proposed visual response space.}, BOOKTITLE = {APGV '05: Proceedings of the 2nd Symposium on Applied Perception in Graphics and Visualization}, EDITOR = {Malik, Jitendra and Koenderink, Jan J.}, PAGES = {87--94}, ADDRESS = {Coruna, Spain}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A Perceptual Framework for Contrast Processing of High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-25BB-F %F EDOC: 278998 %F OTHER: Local-ID: C125675300671F7B-C07FBDA152C52871C12570700034B914-mantiuk2004::contrast %R 10.1145/1080402.1080418 %D 2005 %B 2nd Symposium on Applied Perception in Graphics and Visualization %Z date of event: 2005-08-26 - 2005-08-28 %C Coruna, Spain %X In this work we propose a framework for image processing in a visual<br> response space, in which contrast values directly correlate with<br> their visibility in an image. Our framework involves a<br> transformation of an image from luminance space to a pyramid of<br> low-pass contrast images and then to the visual response space.<br> After modifying response values, the transformation can be reversed<br> to produce the resulting image. To predict the visibility of<br> suprathreshold contrast, we derive a transducer function for the<br> full range of contrast levels that can be found in High Dynamic<br> Range images. We show that a complex contrast compression operation,<br> which preserves textures of small contrast, is reduced to a linear<br> scaling in the proposed visual response space. %B APGV '05: Proceedings of the 2nd Symposium on Applied Perception in Graphics and Visualization %E Malik, Jitendra; Koenderink, Jan J. %P 87 - 94 %I ACM %@ 978-1-59593-139-9
Smyk, M., Kinuwaki, S., Durikovic, R., and Myszkowski, K. 2005. Temporally Coherent Irradiance Caching for High Quality Animation Rendering. The European Association for Computer Graphics 26th Annual Conference : EUROGRAPHICS 2005, Blackwell.
Export
BibTeX
@inproceedings{Smyk05EG, TITLE = {Temporally Coherent Irradiance Caching for High Quality Animation Rendering}, AUTHOR = {Smyk, Miloslaw and Kinuwaki, Shin-ichi and Durikovic, Roman and Myszkowski, Karol}, EDITOR = {Alexa, Marc and Marks, Joe}, LANGUAGE = {eng}, ISSN = {0167-7055}, LOCALID = {Local-ID: C125675300671F7B-9292A579A1D2DBAEC1256FE9004A40A0-Smyk05EG}, PUBLISHER = {Blackwell}, YEAR = {2005}, DATE = {2005}, BOOKTITLE = {The European Association for Computer Graphics 26th Annual Conference : EUROGRAPHICS 2005}, PAGES = {401--412}, SERIES = {Computer Graphics Forum}, }
Endnote
%0 Conference Proceedings %A Smyk, Miloslaw %A Kinuwaki, Shin-ichi %A Durikovic, Roman %A Myszkowski, Karol %E Alexa, Marc %E Marks, Joe %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Temporally Coherent Irradiance Caching for High Quality Animation Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-27D9-E %F EDOC: 278974 %F OTHER: Local-ID: C125675300671F7B-9292A579A1D2DBAEC1256FE9004A40A0-Smyk05EG %I Blackwell %D 2005 %B Untitled Event %Z date of event: 2005-08-29 - %C Dublin, Ireland %B The European Association for Computer Graphics 26th Annual Conference : EUROGRAPHICS 2005 %P 401 - 412 %I Blackwell %B Computer Graphics Forum %@ false
Yoshida, A., Blanz, V., Myszkowski, K., and Seidel, H.-P. 2005. Perceptual Evaluation of Tone Mapping Operators with Real-World Scenes. Human Vision and Electronic Imaging X, IS&T/SPIE’s 17th Annual Symposium on Electronic Imaging (2005), SPIE.
Abstract
A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need<br>to visualize high dynamic range (HDR) images on low dynamic range devices. They <br>were inspired by Øelds<br>as diverse as image processing, photographic practice, and modeling of the <br>human visual systems (HVS). The<br>variety of approaches calls for a systematic perceptual evaluation of their <br>performance.<br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world<br>scenes and HDR images of these scenes displayed on a low dynamic range monitor. <br>In our experiment, HDR<br>images are tone mapped by seven existing tone mapping operators. The primary <br>interest of this psychophysical<br>experiment is to assess the diÆerences in how tone mapped images are perceived <br>by human observers and to Ønd<br>out which attributes of image appearance account for these diÆerences when tone <br>mapped images are compared<br>directly with their corresponding real-world scenes rather than with each <br>other. The human subjects rate image<br>naturalness, overall contrast, overall brightness, and detail reproduction in <br>dark and bright image regions with<br>respect to the corresponding real-world scene.<br>The results indicate substantial diÆerences in perception of images produced by <br>individual tone mapping<br>operators. We observe a clear distinction between global and local operators in <br>favor of the latter, and we<br>classify the tone mapping operators according to naturalness and appearance <br>attributes.ce attributes.
Export
BibTeX
@inproceedings{Yoshida-et-al_SPIE05, TITLE = {Perceptual Evaluation of Tone Mapping Operators with Real-World Scenes}, AUTHOR = {Yoshida, Akiko and Blanz, Volker and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0277-786X}, ISBN = {978-0-8194-5639-7}, DOI = {10.1117/12.587782}, LOCALID = {Local-ID: C125675300671F7B-6BD5753531007D22C1256F5C006B5D8C-Yoshida2005}, PUBLISHER = {SPIE}, YEAR = {2005}, DATE = {2005}, ABSTRACT = {A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need<br>to visualize high dynamic range (HDR) images on low dynamic range devices. They <br>were inspired by {\O}elds<br>as diverse as image processing, photographic practice, and modeling of the <br>human visual systems (HVS). The<br>variety of approaches calls for a systematic perceptual evaluation of their <br>performance.<br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world<br>scenes and HDR images of these scenes displayed on a low dynamic range monitor. <br>In our experiment, HDR<br>images are tone mapped by seven existing tone mapping operators. The primary <br>interest of this psychophysical<br>experiment is to assess the di{\AE}erences in how tone mapped images are perceived <br>by human observers and to {\O}nd<br>out which attributes of image appearance account for these di{\AE}erences when tone <br>mapped images are compared<br>directly with their corresponding real-world scenes rather than with each <br>other. The human subjects rate image<br>naturalness, overall contrast, overall brightness, and detail reproduction in <br>dark and bright image regions with<br>respect to the corresponding real-world scene.<br>The results indicate substantial di{\AE}erences in perception of images produced by <br>individual tone mapping<br>operators. We observe a clear distinction between global and local operators in <br>favor of the latter, and we<br>classify the tone mapping operators according to naturalness and appearance <br>attributes.ce attributes.}, BOOKTITLE = {Human Vision and Electronic Imaging X, IS\&T/SPIE's 17th Annual Symposium on Electronic Imaging (2005)}, EDITOR = {Rogowitz, Bernice E. and Pappas, Thrasyvoulos N. and Daly, Scott J.}, PAGES = {192--203}, SERIES = {SPIE Proceedings Series}, VOLUME = {5666}, ADDRESS = {San Jose, CA, USA}, }
Endnote
%0 Conference Proceedings %A Yoshida, Akiko %A Blanz, Volker %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Evaluation of Tone Mapping Operators with Real-World Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2759-C %F EDOC: 278958 %F OTHER: Local-ID: C125675300671F7B-6BD5753531007D22C1256F5C006B5D8C-Yoshida2005 %R 10.1117/12.587782 %D 2005 %B IS&T/SPIE's 17th Annual Symposium on Electronic Imaging %Z date of event: 2005-01-17 - %C San Jose, CA, USA %X A number of successful tone mapping operators for contrast compression have <br>been proposed due to the need<br>to visualize high dynamic range (HDR) images on low dynamic range devices. They <br>were inspired by &#216;elds<br>as diverse as image processing, photographic practice, and modeling of the <br>human visual systems (HVS). The<br>variety of approaches calls for a systematic perceptual evaluation of their <br>performance.<br>We conduct a psychophysical experiment based on a direct comparison between the <br>appearance of real-world<br>scenes and HDR images of these scenes displayed on a low dynamic range monitor. <br>In our experiment, HDR<br>images are tone mapped by seven existing tone mapping operators. The primary <br>interest of this psychophysical<br>experiment is to assess the di&#198;erences in how tone mapped images are perceived <br>by human observers and to &#216;nd<br>out which attributes of image appearance account for these di&#198;erences when tone <br>mapped images are compared<br>directly with their corresponding real-world scenes rather than with each <br>other. The human subjects rate image<br>naturalness, overall contrast, overall brightness, and detail reproduction in <br>dark and bright image regions with<br>respect to the corresponding real-world scene.<br>The results indicate substantial di&#198;erences in perception of images produced by <br>individual tone mapping<br>operators. We observe a clear distinction between global and local operators in <br>favor of the latter, and we<br>classify the tone mapping operators according to naturalness and appearance <br>attributes.ce attributes. %B Human Vision and Electronic Imaging X, IS&T/SPIE's 17th Annual Symposium on Electronic Imaging (2005) %E Rogowitz, Bernice E.; Pappas, Thrasyvoulos N.; Daly, Scott J. %P 192 - 203 %I SPIE %@ 978-0-8194-5639-7 %B SPIE Proceedings Series %N 5666 %@ false
2004
Dmitriev, K., Annen, T., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2004. A CAVE System for Interactive Modeling of Global Illumination in Car Interior. Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST 2004), ACM.
Abstract
Global illumination dramatically improves realistic appearance<br>of rendered scenes, but usually it is neglected in VR systems<br>due to its high costs. In this work we present an efficient <br>global illumination solution specifically tailored for those CAVE<br>applications, which require an immediate response for dynamic light<br>changes and allow for free motion of the observer, but involve scenes<br>with static geometry. As an application example we choose<br>the car interior modeling under free driving conditions.<br>We illuminate the car using dynamically changing High Dynamic Range <br>(HDR) environment maps and use the Precomputed Radiance Transfer (PRT) <br>method for the global illumination computation. We<br>leverage the PRT method to handle scenes with non-trivial topology<br>represented by complex meshes. Also, we propose a hybrid of<br>PRT and final gathering approach for high-quality rendering<br>of objects with complex Bi-directional Reflectance Distribution<br>Function (BRDF). We use<br>this method for predictive rendering of the navigation LCD panel<br>based on its measured BRDF. Since the global illumination <br>computation leads to HDR images we propose a tone mapping<br>algorithm tailored specifically for the CAVE. We employ <br>head tracking to identify the observed screen region<br>and derive for it proper luminance adaptation conditions, <br>which are then used for tone mapping on all walls in the CAVE.<br>We distribute our global illumination and tone mapping computation<br>on all CPUs and GPUs available in the<br>CAVE, which enables us to achieve interactive performance<br>even for the costly final gathering approach.
Export
BibTeX
@inproceedings{Dmitriev-et-al_VRST04, TITLE = {A {CAVE} System for Interactive Modeling of Global Illumination in Car Interior}, AUTHOR = {Dmitriev, Kirill and Annen, Thomas and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-907-5}, DOI = {10.1145/1077534.1077560}, LOCALID = {Local-ID: C125675300671F7B-9738E2CF6F79F214C1256F5E004819E6-dmitriev04acs}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Global illumination dramatically improves realistic appearance<br>of rendered scenes, but usually it is neglected in VR systems<br>due to its high costs. In this work we present an efficient <br>global illumination solution specifically tailored for those CAVE<br>applications, which require an immediate response for dynamic light<br>changes and allow for free motion of the observer, but involve scenes<br>with static geometry. As an application example we choose<br>the car interior modeling under free driving conditions.<br>We illuminate the car using dynamically changing High Dynamic Range <br>(HDR) environment maps and use the Precomputed Radiance Transfer (PRT) <br>method for the global illumination computation. We<br>leverage the PRT method to handle scenes with non-trivial topology<br>represented by complex meshes. Also, we propose a hybrid of<br>PRT and final gathering approach for high-quality rendering<br>of objects with complex Bi-directional Reflectance Distribution<br>Function (BRDF). We use<br>this method for predictive rendering of the navigation LCD panel<br>based on its measured BRDF. Since the global illumination <br>computation leads to HDR images we propose a tone mapping<br>algorithm tailored specifically for the CAVE. We employ <br>head tracking to identify the observed screen region<br>and derive for it proper luminance adaptation conditions, <br>which are then used for tone mapping on all walls in the CAVE.<br>We distribute our global illumination and tone mapping computation<br>on all CPUs and GPUs available in the<br>CAVE, which enables us to achieve interactive performance<br>even for the costly final gathering approach.}, BOOKTITLE = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST 2004)}, EDITOR = {Lau, Rynson and Baciu, George}, PAGES = {137--145}, ADDRESS = {Hong Kong}, }
Endnote
%0 Conference Proceedings %A Dmitriev, Kirill %A Annen, Thomas %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T A CAVE System for Interactive Modeling of Global Illumination in Car Interior : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-29FF-A %F EDOC: 231986 %F OTHER: Local-ID: C125675300671F7B-9738E2CF6F79F214C1256F5E004819E6-dmitriev04acs %R 10.1145/1077534.1077560 %D 2004 %B ACM Symposium on Virtual Reality Software and Technology 2004 %Z date of event: 2004-11-10 - 2004-11-12 %C Hong Kong %X Global illumination dramatically improves realistic appearance<br>of rendered scenes, but usually it is neglected in VR systems<br>due to its high costs. In this work we present an efficient <br>global illumination solution specifically tailored for those CAVE<br>applications, which require an immediate response for dynamic light<br>changes and allow for free motion of the observer, but involve scenes<br>with static geometry. As an application example we choose<br>the car interior modeling under free driving conditions.<br>We illuminate the car using dynamically changing High Dynamic Range <br>(HDR) environment maps and use the Precomputed Radiance Transfer (PRT) <br>method for the global illumination computation. We<br>leverage the PRT method to handle scenes with non-trivial topology<br>represented by complex meshes. Also, we propose a hybrid of<br>PRT and final gathering approach for high-quality rendering<br>of objects with complex Bi-directional Reflectance Distribution<br>Function (BRDF). We use<br>this method for predictive rendering of the navigation LCD panel<br>based on its measured BRDF. Since the global illumination <br>computation leads to HDR images we propose a tone mapping<br>algorithm tailored specifically for the CAVE. We employ <br>head tracking to identify the observed screen region<br>and derive for it proper luminance adaptation conditions, <br>which are then used for tone mapping on all walls in the CAVE.<br>We distribute our global illumination and tone mapping computation<br>on all CPUs and GPUs available in the<br>CAVE, which enables us to achieve interactive performance<br>even for the costly final gathering approach. %B Proceedings of the ACM Symposium on Virtual Reality Software and Technology %E Lau, Rynson; Baciu, George %P 137 - 145 %I ACM %@ 978-1-58113-907-5
Ershov, S., Durikovic, R., Kolchin, K., and Myszkowski, K. 2004. Reverse engineering approach to appearance-based design of metallic and pearlescent paints. The Visual Computer20.
Abstract
We propose a new approach to interactive design of metallic and pearlescent coatings, such as automotive paints and plastic finishes of electronic appliances. This approach includes solving the inverse problem, that is, finding pigment composition of a paint from its bidirectional reflectance distribution function (BRDF) based on a simple paint model. The inverse problem is solved by two consecutive optimizations calculated in realtime on a contemporary PC. Such reverse engineering can serve as a starting point for subsequent design of new paints in terms of appearance attributes that are directly connected to the physical parameters of our model. This allows the user to have a paint composition in parallel with the appearance being designed.
Export
BibTeX
@article{Ershov2004, TITLE = {Reverse engineering approach to appearance-based design of metallic and pearlescent paints}, AUTHOR = {Ershov, Sergey and Durikovic, Roman and Kolchin, Konstantin and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0178-2789}, LOCALID = {Local-ID: C125675300671F7B-1ED9315CB8B336DAC1256F5C003D02C9-Ershov2004}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {We propose a new approach to interactive design of metallic and pearlescent coatings, such as automotive paints and plastic finishes of electronic appliances. This approach includes solving the inverse problem, that is, finding pigment composition of a paint from its bidirectional reflectance distribution function (BRDF) based on a simple paint model. The inverse problem is solved by two consecutive optimizations calculated in realtime on a contemporary PC. Such reverse engineering can serve as a starting point for subsequent design of new paints in terms of appearance attributes that are directly connected to the physical parameters of our model. This allows the user to have a paint composition in parallel with the appearance being designed.}, JOURNAL = {The Visual Computer}, VOLUME = {20}, PAGES = {587--600}, }
Endnote
%0 Journal Article %A Ershov, Sergey %A Durikovic, Roman %A Kolchin, Konstantin %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Reverse engineering approach to appearance-based design of metallic and pearlescent paints : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B2B-7 %F EDOC: 232050 %F OTHER: Local-ID: C125675300671F7B-1ED9315CB8B336DAC1256F5C003D02C9-Ershov2004 %D 2004 %* Review method: peer-reviewed %X We propose a new approach to interactive design of metallic and pearlescent coatings, such as automotive paints and plastic finishes of electronic appliances. This approach includes solving the inverse problem, that is, finding pigment composition of a paint from its bidirectional reflectance distribution function (BRDF) based on a simple paint model. The inverse problem is solved by two consecutive optimizations calculated in realtime on a contemporary PC. Such reverse engineering can serve as a starting point for subsequent design of new paints in terms of appearance attributes that are directly connected to the physical parameters of our model. This allows the user to have a paint composition in parallel with the appearance being designed. %J The Visual Computer %V 20 %& 587 %P 587 - 600 %@ false
Krawczyk, G., Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2004. Lightness Perception Inspired Tone Mapping. Proceedings APGV 2004, ACM.
Export
BibTeX
@inproceedings{Krawczyk2004, TITLE = {Lightness Perception Inspired Tone Mapping}, AUTHOR = {Krawczyk, Grzegorz and Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-914-3}, DOI = {10.1145/1012551.1012594}, LOCALID = {Local-ID: C125675300671F7B-07985C48329EC4DFC1256FC4002A5333-Krawczyk2004}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, BOOKTITLE = {Proceedings APGV 2004}, EDITOR = {Spencer, Stephen N.}, PAGES = {172--172}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Krawczyk, Grzegorz %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Lightness Perception Inspired Tone Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2543-E %F EDOC: 231335 %R 10.1145/1012551.1012594 %F OTHER: Local-ID: C125675300671F7B-07985C48329EC4DFC1256FC4002A5333-Krawczyk2004 %D 2004 %B 1st Symposium on Applied Perception in Graphics and Visualization %Z date of event: 2004-08-07 - 2004-08-08 %C Los Angeles, CA, USA %B Proceedings APGV 2004 %E Spencer, Stephen N. %P 172 - 172 %I ACM %@ 978-1-58113-914-3
Mantiuk, R., Myszkowski, K., and Seidel, H.-P. 2004a. Visible Difference Predicator for High Dynamic Range Images. 2004 IEEE International Conference on Systems, Man & Cybernetics (SMC 2004), IEEE.
Abstract
Since new imaging and rendering systems commonly use physically<br>accurate lighting information in the form of High-Dynamic Range<br>data, there is a need for an automatic visual quality assessment of the <br>resulting images. In this work we extend the Visual Difference Predictor (VDP) <br>developed by Daly to handle HDR data. This let us predict if a human observer <br>is able to perceive differences for a pair of HDR images under the adaptation <br>conditions corresponding to the real scene observation.
Export
BibTeX
@inproceedings{Mantiuk2004HDRVDP, TITLE = {Visible Difference Predicator for High Dynamic Range Images}, AUTHOR = {Mantiuk, Rafal and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7803-8567-5}, DOI = {10.1109/ICSMC.2004.1400750}, LOCALID = {Local-ID: C125675300671F7B-4A5E8413EEF67127C1256F330053216A-Mantiuk2004HDRVDP}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Since new imaging and rendering systems commonly use physically<br>accurate lighting information in the form of High-Dynamic Range<br>data, there is a need for an automatic visual quality assessment of the <br>resulting images. In this work we extend the Visual Difference Predictor (VDP) <br>developed by Daly to handle HDR data. This let us predict if a human observer <br>is able to perceive differences for a pair of HDR images under the adaptation <br>conditions corresponding to the real scene observation.}, BOOKTITLE = {2004 IEEE International Conference on Systems, Man \& Cybernetics (SMC 2004)}, EDITOR = {Thissen, Wil and Wieringa, Peter and Pantic, Maja and Ludema, Marcel}, PAGES = {2763--2769}, ADDRESS = {The Hague, The Netherlands}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafal %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Visible Difference Predicator for High Dynamic Range Images : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B78-C %F EDOC: 231938 %R 10.1109/ICSMC.2004.1400750 %F OTHER: Local-ID: C125675300671F7B-4A5E8413EEF67127C1256F330053216A-Mantiuk2004HDRVDP %D 2004 %B 2004 IEEE International Conference on Systems, Man & Cybernetics %Z date of event: 2004-10-10 - 2004-10-13 %C The Hague, The Netherlands %X Since new imaging and rendering systems commonly use physically<br>accurate lighting information in the form of High-Dynamic Range<br>data, there is a need for an automatic visual quality assessment of the <br>resulting images. In this work we extend the Visual Difference Predictor (VDP) <br>developed by Daly to handle HDR data. This let us predict if a human observer <br>is able to perceive differences for a pair of HDR images under the adaptation <br>conditions corresponding to the real scene observation. %B 2004 IEEE International Conference on Systems, Man & Cybernetics %E Thissen, Wil; Wieringa, Peter; Pantic, Maja; Ludema, Marcel %P 2763 - 2769 %I IEEE %@ 0-7803-8567-5
Mantiuk, R., Krawczyk, G., Myszkowski, K., and Seidel, H.-P. 2004b. Perception-motivated High Dynamic Range Video Encoding. ACM Transactions on Graphics23, 3.
Abstract
Due to rapid technological progress in high dynamic range (HDR)<br> video capture and display, the efficient storage and<br> transmission of such data is crucial for the completeness of any HDR<br> imaging pipeline. We propose a new approach for<br> inter-frame encoding of HDR video, which is embedded in the<br> well-established MPEG-4 video compression standard. The key<br> component of our technique is luminance quantization <br> that is optimized for the contrast threshold perception in the<br> human visual system. The quantization<br> scheme requires only 10--11 bits to encode 12 orders of magnitude of<br> visible luminance range and does not lead to perceivable contouring<br> artifacts. Besides video encoding, the proposed quantization<br> provides perceptually-optimized luminance sampling for fast<br> implementation of any<br> global tone mapping operator using a lookup table.<br> To improve the quality of synthetic video sequences, we introduce<br> a coding scheme for discrete cosine transform (DCT) blocks with<br> high contrast. We demonstrate the capabilities of HDR video in<br> a player, which enables decoding, tone mapping, and applying<br> post-processing effects in real-time. The tone mapping algorithm as well<br> as its parameters can be changed interactively while the video is playing.<br> We can simulate post-processing<br> effects such as glare, night vision, and motion blur, which appear<br> very realistic due to the usage of HDR data.
Export
BibTeX
@article{Mantiuk-et-al_TG04, TITLE = {Perception-motivated High Dynamic Range Video Encoding}, AUTHOR = {Mantiuk, Rafal and Krawczyk, Grzegorz and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0730-0301}, DOI = {10.1145/1015706.1015794}, LOCALID = {Local-ID: C125675300671F7B-2BA4C8B1EE81007BC1256EC1003757E0-Mantiuk2004HDREnc}, PUBLISHER = {Association for Computing Machinery}, ADDRESS = {New York, NY}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Due to rapid technological progress in high dynamic range (HDR)<br> video capture and display, the efficient storage and<br> transmission of such data is crucial for the completeness of any HDR<br> imaging pipeline. We propose a new approach for<br> inter-frame encoding of HDR video, which is embedded in the<br> well-established MPEG-4 video compression standard. The key<br> component of our technique is luminance quantization <br> that is optimized for the contrast threshold perception in the<br> human visual system. The quantization<br> scheme requires only 10--11 bits to encode 12 orders of magnitude of<br> visible luminance range and does not lead to perceivable contouring<br> artifacts. Besides video encoding, the proposed quantization<br> provides perceptually-optimized luminance sampling for fast<br> implementation of any<br> global tone mapping operator using a lookup table.<br> To improve the quality of synthetic video sequences, we introduce<br> a coding scheme for discrete cosine transform (DCT) blocks with<br> high contrast. We demonstrate the capabilities of HDR video in<br> a player, which enables decoding, tone mapping, and applying<br> post-processing effects in real-time. The tone mapping algorithm as well<br> as its parameters can be changed interactively while the video is playing.<br> We can simulate post-processing<br> effects such as glare, night vision, and motion blur, which appear<br> very realistic due to the usage of HDR data.}, JOURNAL = {ACM Transactions on Graphics}, EDITOR = {Marks, Joe}, VOLUME = {23}, NUMBER = {3}, PAGES = {733--741}, }
Endnote
%0 Journal Article %A Mantiuk, Rafal %A Krawczyk, Grzegorz %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-motivated High Dynamic Range Video Encoding : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2AFA-0 %F EDOC: 231948 %F OTHER: Local-ID: C125675300671F7B-2BA4C8B1EE81007BC1256EC1003757E0-Mantiuk2004HDREnc %R 10.1145/1015706.1015794 %D 2004 %* Review method: peer-reviewed %X Due to rapid technological progress in high dynamic range (HDR)<br> video capture and display, the efficient storage and<br> transmission of such data is crucial for the completeness of any HDR<br> imaging pipeline. We propose a new approach for<br> inter-frame encoding of HDR video, which is embedded in the<br> well-established MPEG-4 video compression standard. The key<br> component of our technique is luminance quantization <br> that is optimized for the contrast threshold perception in the<br> human visual system. The quantization<br> scheme requires only 10--11 bits to encode 12 orders of magnitude of<br> visible luminance range and does not lead to perceivable contouring<br> artifacts. Besides video encoding, the proposed quantization<br> provides perceptually-optimized luminance sampling for fast<br> implementation of any<br> global tone mapping operator using a lookup table.<br> To improve the quality of synthetic video sequences, we introduce<br> a coding scheme for discrete cosine transform (DCT) blocks with<br> high contrast. We demonstrate the capabilities of HDR video in<br> a player, which enables decoding, tone mapping, and applying<br> post-processing effects in real-time. The tone mapping algorithm as well<br> as its parameters can be changed interactively while the video is playing.<br> We can simulate post-processing<br> effects such as glare, night vision, and motion blur, which appear<br> very realistic due to the usage of HDR data. %J ACM Transactions on Graphics %V 23 %N 3 %& 733 %P 733 - 741 %I Association for Computing Machinery %C New York, NY %@ false
Tawara, T., Myszkowski, K., and Seidel, H.-P. 2004a. Exploiting Temporal Coherence in Final Gathering for Dynamic Scenes. Proceedings of the 2004 Computer Graphics International Conference (CGI 2004), IEEE.
Abstract
Efficient global illumination computation in dynamically<br>changing environments is an important practical<br>problem. In high-quality animation rendering<br>costly "final gathering" technique is commonly<br>used. We extend this technique into temporal domain<br>by exploiting coherence between the subsequent frames.<br>For this purpose we store previously computed<br>incoming radiance samples and refresh them evenly in space and<br>time using some aging criteria. The approach is<br>based upon a two-pass photon mapping algorithm with irradiance<br>cache, but it can be applied also in other gathering methods. The<br>algorithm significantly reduces the cost of expensive indirect<br>lighting computation and suppresses temporal aliasing<br>with respect to the state of the art frame-by-frame<br>rendering techniques.
Export
BibTeX
@inproceedings{Tawara-et-al_CGI04, TITLE = {Exploiting Temporal Coherence in Final Gathering for Dynamic Scenes}, AUTHOR = {Tawara, Takehiro and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2171-1}, DOI = {10.1109/CGI.2004.1309199}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Efficient global illumination computation in dynamically<br>changing environments is an important practical<br>problem. In high-quality animation rendering<br>costly "final gathering" technique is commonly<br>used. We extend this technique into temporal domain<br>by exploiting coherence between the subsequent frames.<br>For this purpose we store previously computed<br>incoming radiance samples and refresh them evenly in space and<br>time using some aging criteria. The approach is<br>based upon a two-pass photon mapping algorithm with irradiance<br>cache, but it can be applied also in other gathering methods. The<br>algorithm significantly reduces the cost of expensive indirect<br>lighting computation and suppresses temporal aliasing<br>with respect to the state of the art frame-by-frame<br>rendering techniques.}, BOOKTITLE = {Proceedings of the 2004 Computer Graphics International Conference (CGI 2004)}, EDITOR = {Cohen-Or, Daniel and Jain, Lakhmi and Magnenat-Thalmann, Nadia}, PAGES = {110--119}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Tawara, Takehiro %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exploiting Temporal Coherence in Final Gathering for Dynamic Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A93-5 %F EDOC: 231897 %R 10.1109/CGI.2004.1309199 %D 2004 %B Computer Graphics International 2004 %Z date of event: 2004-06-16 - 2004-06-19 %C Crete, Greece %X Efficient global illumination computation in dynamically<br>changing environments is an important practical<br>problem. In high-quality animation rendering<br>costly "final gathering" technique is commonly<br>used. We extend this technique into temporal domain<br>by exploiting coherence between the subsequent frames.<br>For this purpose we store previously computed<br>incoming radiance samples and refresh them evenly in space and<br>time using some aging criteria. The approach is<br>based upon a two-pass photon mapping algorithm with irradiance<br>cache, but it can be applied also in other gathering methods. The<br>algorithm significantly reduces the cost of expensive indirect<br>lighting computation and suppresses temporal aliasing<br>with respect to the state of the art frame-by-frame<br>rendering techniques. %B Proceedings of the 2004 Computer Graphics International Conference %E Cohen-Or, Daniel; Jain, Lakhmi; Magnenat-Thalmann, Nadia %P 110 - 119 %I IEEE %@ 0-7695-2171-1
Tawara, T., Myszkowski, K., Dmitriev, K., Havran, V., Damez, C., and Seidel, H.-P. 2004b. Exploiting Temporal Coherence in Global Illumination. Proceedings of the 20th Spring Conference on Computer Graphics (SCCG 2004), ACM.
Abstract
Producing high quality animations featuring rich object appearance and<br>compelling lighting effects is very time consuming using traditional<br>frame-by-frame rendering systems. In this paper we present a<br>number of global illumination and rendering solutions that<br>exploit temporal coherence in lighting distribution for subsequent<br>frames to improve the computation performance and overall<br>animation quality. Our strategy relies on extending into temporal domain<br>well-known global illumination techniques<br>such as density estimation photon tracing,<br>photon mapping, and bi-directional path tracing, which were<br>originally designed to handle static scenes only.
Export
BibTeX
@inproceedings{Tawara-et-al_SCCG04, TITLE = {Exploiting Temporal Coherence in Global Illumination}, AUTHOR = {Tawara, Takehiro and Myszkowski, Karol and Dmitriev, Kirill and Havran, Vlastimil and Damez, Cyrille and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58113-914-4}, DOI = {10.1145/1037210.1037214}, LOCALID = {Local-ID: C125675300671F7B-6088B687D952F1E4C1256EC1002F0C62-Tawara2004b}, PUBLISHER = {ACM}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Producing high quality animations featuring rich object appearance and<br>compelling lighting effects is very time consuming using traditional<br>frame-by-frame rendering systems. In this paper we present a<br>number of global illumination and rendering solutions that<br>exploit temporal coherence in lighting distribution for subsequent<br>frames to improve the computation performance and overall<br>animation quality. Our strategy relies on extending into temporal domain<br>well-known global illumination techniques<br>such as density estimation photon tracing,<br>photon mapping, and bi-directional path tracing, which were<br>originally designed to handle static scenes only.}, BOOKTITLE = {Proceedings of the 20th Spring Conference on Computer Graphics (SCCG 2004)}, EDITOR = {Pasko, Alexander}, PAGES = {23--33}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Tawara, Takehiro %A Myszkowski, Karol %A Dmitriev, Kirill %A Havran, Vlastimil %A Damez, Cyrille %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Exploiting Temporal Coherence in Global Illumination : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A95-1 %F EDOC: 231906 %F OTHER: Local-ID: C125675300671F7B-6088B687D952F1E4C1256EC1002F0C62-Tawara2004b %R 10.1145/1037210.1037214 %D 2004 %B 20th Spring Conference on Computer Graphics %Z date of event: 2004-04-22 - 2004-04-24 %C Budmerice, Slovakia %X Producing high quality animations featuring rich object appearance and<br>compelling lighting effects is very time consuming using traditional<br>frame-by-frame rendering systems. In this paper we present a<br>number of global illumination and rendering solutions that<br>exploit temporal coherence in lighting distribution for subsequent<br>frames to improve the computation performance and overall<br>animation quality. Our strategy relies on extending into temporal domain<br>well-known global illumination techniques<br>such as density estimation photon tracing,<br>photon mapping, and bi-directional path tracing, which were<br>originally designed to handle static scenes only. %B Proceedings of the 20th Spring Conference on Computer Graphics %E Pasko, Alexander %P 23 - 33 %I ACM %@ 1-58113-914-4
Tawara, T., Myszkowski, K., and Seidel, H.-P. 2004c. Efficient Rendering of Strong Secondary Lighting in Photon Mapping Algorithm. Theory and Practice of Computer Graphics 2004 (TPCG 2004), IEEE.
Abstract
In this paper we propose an efficient algorithm<br>for handling strong secondary light sources<br>within the photon mapping framework. We introduce<br>an additional photon map as an implicit representation<br>of such light sources. At the<br>rendering stage this map is used for the explicit<br>sampling of strong indirect lighting in a similar<br>way as it is usually performed for primary light<br>sources. Our technique works fully automatically,<br>improves the computation performance, and leads to<br>a better image quality than traditional rendering<br>approaches.
Export
BibTeX
@inproceedings{Tawara-et-al_TPCG04, TITLE = {Efficient Rendering of Strong Secondary Lighting in Photon Mapping Algorithm}, AUTHOR = {Tawara, Takehiro and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2137-1}, DOI = {10.1109/TPCG.2004.1314468}, LOCALID = {Local-ID: C125675300671F7B-9FD06C3F844A7B2EC1256E5C003A7515-Tawara2004c}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {In this paper we propose an efficient algorithm<br>for handling strong secondary light sources<br>within the photon mapping framework. We introduce<br>an additional photon map as an implicit representation<br>of such light sources. At the<br>rendering stage this map is used for the explicit<br>sampling of strong indirect lighting in a similar<br>way as it is usually performed for primary light<br>sources. Our technique works fully automatically,<br>improves the computation performance, and leads to<br>a better image quality than traditional rendering<br>approaches.}, BOOKTITLE = {Theory and Practice of Computer Graphics 2004 (TPCG 2004)}, EDITOR = {Lever, Paul G.}, PAGES = {174--178}, ADDRESS = {Bournemouth, UK}, }
Endnote
%0 Conference Proceedings %A Tawara, Takehiro %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Rendering of Strong Secondary Lighting in Photon Mapping Algorithm : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2A80-F %F EDOC: 231931 %F OTHER: Local-ID: C125675300671F7B-9FD06C3F844A7B2EC1256E5C003A7515-Tawara2004c %R 10.1109/TPCG.2004.1314468 %D 2004 %B Theory and Practice of Computer Graphics 2004 %Z date of event: 2004-06-08 - 2004-06-10 %C Bournemouth, UK %X In this paper we propose an efficient algorithm<br>for handling strong secondary light sources<br>within the photon mapping framework. We introduce<br>an additional photon map as an implicit representation<br>of such light sources. At the<br>rendering stage this map is used for the explicit<br>sampling of strong indirect lighting in a similar<br>way as it is usually performed for primary light<br>sources. Our technique works fully automatically,<br>improves the computation performance, and leads to<br>a better image quality than traditional rendering<br>approaches. %B Theory and Practice of Computer Graphics 2004 %E Lever, Paul G. %P 174 - 178 %I IEEE %@ 0-7695-2137-1
Weber, M., Milch, M., Myszkowski, K., Dmitriev, K.A., Rokita, P., and Seidel, H.-P. 2004. Spatio-temporal Photon Density Estimation Using Bilateral Filtering. Proceedings of the 2004 Computer Graphics International Conference (CGI 2004), IEEE.
Abstract
Photon tracing and density estimation are well established<br>techniques in global illumination computation and rendering of<br>high-quality animation sequences. Using traditional density estimation <br>techniques<br>it is difficult to remove stochastic noise inherent for photon-based<br>methods while avoiding overblurring lighting details.<br>In this paper we investigate the use of bilateral filtering<br>for lighting reconstruction based on the local density of photon hit points.<br>Bilateral filtering is applied in spatio-temporal domain and<br>provides control over the level-of-details in reconstructed lighting.<br>All changes of lighting below this level are treated as<br>stochastic noise and are suppressed. Bilateral<br>filtering proves to be efficient in preserving sharp features<br>in lighting which is in particular important for high-quality caustic<br>reconstruction. Also, flickering<br>between subsequent animation frames is substantially reduced<br>due to extending bilateral filtering into temporal domain.
Export
BibTeX
@inproceedings{Weber-et-al_CGI04, TITLE = {Spatio-temporal Photon Density Estimation Using Bilateral Filtering}, AUTHOR = {Weber, Markus and Milch, Marco and Myszkowski, Karol and Dmitriev, Kirill Alexandrovich and Rokita, Przemyslaw and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-7695-2171-1}, DOI = {10.1109/CGI.2004.1309200}, LOCALID = {Local-ID: C125675300671F7B-E7C820E451C4356AC1256E46006AB0DF-Weber2004}, PUBLISHER = {IEEE}, YEAR = {2004}, DATE = {2004}, ABSTRACT = {Photon tracing and density estimation are well established<br>techniques in global illumination computation and rendering of<br>high-quality animation sequences. Using traditional density estimation <br>techniques<br>it is difficult to remove stochastic noise inherent for photon-based<br>methods while avoiding overblurring lighting details.<br>In this paper we investigate the use of bilateral filtering<br>for lighting reconstruction based on the local density of photon hit points.<br>Bilateral filtering is applied in spatio-temporal domain and<br>provides control over the level-of-details in reconstructed lighting.<br>All changes of lighting below this level are treated as<br>stochastic noise and are suppressed. Bilateral<br>filtering proves to be efficient in preserving sharp features<br>in lighting which is in particular important for high-quality caustic<br>reconstruction. Also, flickering<br>between subsequent animation frames is substantially reduced<br>due to extending bilateral filtering into temporal domain.}, BOOKTITLE = {Proceedings of the 2004 Computer Graphics International Conference (CGI 2004)}, EDITOR = {Cohen-Or, Daniel and Jain, Lakhmi and Magnenat-Thalmann, Nadia}, PAGES = {120--127}, ADDRESS = {Crete, Greece}, }
Endnote
%0 Conference Proceedings %A Weber, Markus %A Milch, Marco %A Myszkowski, Karol %A Dmitriev, Kirill Alexandrovich %A Rokita, Przemyslaw %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Spatio-temporal Photon Density Estimation Using Bilateral Filtering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2B40-6 %F EDOC: 231920 %F OTHER: Local-ID: C125675300671F7B-E7C820E451C4356AC1256E46006AB0DF-Weber2004 %R 10.1109/CGI.2004.1309200 %D 2004 %B Computer Graphics International 2004 %Z date of event: 2004-06-16 - 2004-06-19 %C Crete, Greece %X Photon tracing and density estimation are well established<br>techniques in global illumination computation and rendering of<br>high-quality animation sequences. Using traditional density estimation <br>techniques<br>it is difficult to remove stochastic noise inherent for photon-based<br>methods while avoiding overblurring lighting details.<br>In this paper we investigate the use of bilateral filtering<br>for lighting reconstruction based on the local density of photon hit points.<br>Bilateral filtering is applied in spatio-temporal domain and<br>provides control over the level-of-details in reconstructed lighting.<br>All changes of lighting below this level are treated as<br>stochastic noise and are suppressed. Bilateral<br>filtering proves to be efficient in preserving sharp features<br>in lighting which is in particular important for high-quality caustic<br>reconstruction. Also, flickering<br>between subsequent animation frames is substantially reduced<br>due to extending bilateral filtering into temporal domain. %B Proceedings of the 2004 Computer Graphics International Conference %E Cohen-Or, Daniel; Jain, Lakhmi; Magnenat-Thalmann, Nadia %P 120 - 127 %I IEEE %@ 0-7695-2171-1
2003
Damez, C., Dmitriev, K., and Myszkowski, K. 2003. State of the Art for Global Illumination in Interactive Applications and High-Quality Animations. Computer Graphics Forum22.
Abstract
Global illumination algorithms are regarded as computationally intensive. This cost is a practical problem when producing animations or when interactions with complex models are required. Several algorithms have been proposed to address this issue. Roughly, two families of methods can be distinguished. The first one aims at providing interactive feedback for lighting design applications. The second one gives higher priority to the quality of results, and therefore relies on offline computations. Recently, impressive advances have been made in both categories. In this report, we present a survey and classification of the most up-to-date of these methods.
Export
BibTeX
@article{DDM2003, TITLE = {State of the Art for Global Illumination in Interactive Applications and High-Quality Animations}, AUTHOR = {Damez, Cyrille and Dmitriev, Kirill and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0167-7055}, LOCALID = {Local-ID: C125675300671F7B-884A78970185CB39C1256D030043DEC6-DDM2003}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Global illumination algorithms are regarded as computationally intensive. This cost is a practical problem when producing animations or when interactions with complex models are required. Several algorithms have been proposed to address this issue. Roughly, two families of methods can be distinguished. The first one aims at providing interactive feedback for lighting design applications. The second one gives higher priority to the quality of results, and therefore relies on offline computations. Recently, impressive advances have been made in both categories. In this report, we present a survey and classification of the most up-to-date of these methods.}, JOURNAL = {Computer Graphics Forum}, VOLUME = {22}, PAGES = {55--77}, }
Endnote
%0 Journal Article %A Damez, Cyrille %A Dmitriev, Kirill %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T State of the Art for Global Illumination in Interactive Applications and High-Quality Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2E2E-B %F EDOC: 202039 %F OTHER: Local-ID: C125675300671F7B-884A78970185CB39C1256D030043DEC6-DDM2003 %D 2003 %* Review method: peer-reviewed %X Global illumination algorithms are regarded as computationally intensive. This cost is a practical problem when producing animations or when interactions with complex models are required. Several algorithms have been proposed to address this issue. Roughly, two families of methods can be distinguished. The first one aims at providing interactive feedback for lighting design applications. The second one gives higher priority to the quality of results, and therefore relies on offline computations. Recently, impressive advances have been made in both categories. In this report, we present a survey and classification of the most up-to-date of these methods. %J Computer Graphics Forum %V 22 %& 55 %P 55 - 77 %@ false
Drago, F., Myszkowski, K., Annen, T., and Chiba, N. 2003a. Adaptive Logarithmic Mapping For Displaying High Contrast Scenes. EUROGRAPHICS 2003 (EUROGRAPHICS-03) : the European Association for Computer Graphics, 24th Annual Conference, Blackwell.
Abstract
We propose a fast, high quality tone mapping technique to display high contrast images on devices with limited dynamic range of luminance values. The method is based on logarithmic compression of luminance values, imitating the human response to light. A bias power function is introduced to adaptively vary logarithmic bases, resulting in good preservation of details and contrast. To improve contrast in dark areas, changes to the gamma correction procedure are proposed. Our adaptive logarithmic mapping technique is capable of producing perceptually tuned images with high dynamic content and works at interactive speeds. We demonstrate a successful application of our technique to a high dynamic range video player which enables to adjust optimal viewing conditions for any kind of display while taking into account the user preferences concerning brightness, contrast compression, and detail reproduction.
Export
BibTeX
@inproceedings{Drago2003b, TITLE = {Adaptive Logarithmic Mapping For Displaying High Contrast Scenes}, AUTHOR = {Drago, Frederic and Myszkowski, Karol and Annen, Thomas and Chiba, Norishige}, EDITOR = {Brunet, Pere and Fellner, Dieter W.}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-53A4B81D590A3EEAC1256CFD003CE441-Drago2003b}, PUBLISHER = {Blackwell}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {We propose a fast, high quality tone mapping technique to display high contrast images on devices with limited dynamic range of luminance values. The method is based on logarithmic compression of luminance values, imitating the human response to light. A bias power function is introduced to adaptively vary logarithmic bases, resulting in good preservation of details and contrast. To improve contrast in dark areas, changes to the gamma correction procedure are proposed. Our adaptive logarithmic mapping technique is capable of producing perceptually tuned images with high dynamic content and works at interactive speeds. We demonstrate a successful application of our technique to a high dynamic range video player which enables to adjust optimal viewing conditions for any kind of display while taking into account the user preferences concerning brightness, contrast compression, and detail reproduction.}, BOOKTITLE = {EUROGRAPHICS 2003 (EUROGRAPHICS-03) : the European Association for Computer Graphics, 24th Annual Conference}, PAGES = {419--426}, SERIES = {Computer Graphics Forum}, ADDRESS = {Granada, Spain}, }
Endnote
%0 Conference Proceedings %A Drago, Frederic %A Myszkowski, Karol %A Annen, Thomas %A Chiba, Norishige %E Brunet, Pere %E Fellner, Dieter W. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Adaptive Logarithmic Mapping For Displaying High Contrast Scenes : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2BEF-F %F EDOC: 201862 %F OTHER: Local-ID: C125675300671F7B-53A4B81D590A3EEAC1256CFD003CE441-Drago2003b %D 2003 %B EUROGRAPHICS 2003 %Z date of event: 2003-09-01 - 2003-09-05 %C Granada, Spain %X We propose a fast, high quality tone mapping technique to display high contrast images on devices with limited dynamic range of luminance values. The method is based on logarithmic compression of luminance values, imitating the human response to light. A bias power function is introduced to adaptively vary logarithmic bases, resulting in good preservation of details and contrast. To improve contrast in dark areas, changes to the gamma correction procedure are proposed. Our adaptive logarithmic mapping technique is capable of producing perceptually tuned images with high dynamic content and works at interactive speeds. We demonstrate a successful application of our technique to a high dynamic range video player which enables to adjust optimal viewing conditions for any kind of display while taking into account the user preferences concerning brightness, contrast compression, and detail reproduction. %B EUROGRAPHICS 2003 (EUROGRAPHICS-03) : the European Association for Computer Graphics, 24th Annual Conference %P 419 - 426 %I Blackwell %B Computer Graphics Forum
Drago, F., Martens, W., Myszkowski, K., and Chiba, N. 2003b. Design of a Tone Mapping Operator for High Dynamic Range Images Based upon Psychophysical Evaluation and Preference Mapping. Human Vision and Electronic Imaging VIII (HVEI-03), SPIE.
Abstract
A tone mapping algorithm for displaying high contrast scenes was designed on the basis of the results of experimental tests using human subjects. Systematic perceptual evaluation of several existing tone mapping techniques revealed that the most ``natural'' appearance was determined by the presence in the output image of detailed scenery features often made visible by limiting contrast and by properly reproducing brightness. Taking these results into account, we developed a system to produce images close to the ideal preference point for high dynamic range input image data. Of the algorithms that we tested, only the Retinex algorithm was capable of retrieving detailed scene features hidden in high luminance areas while still preserving a good contrast level. This paper presents changes made to Retinex algorithm for processing high dynamic range images, and a further integration of the Retinex with specialized tone mapping algorithms that enables the production of images that appear as similar as possible to the viewer's perception of actual scenes.
Export
BibTeX
@inproceedings{Myszkowski2003, TITLE = {Design of a Tone Mapping Operator for High Dynamic Range Images Based upon Psychophysical Evaluation and Preference Mapping}, AUTHOR = {Drago, Frederic and Martens, William and Myszkowski, Karol and Chiba, Norishige}, EDITOR = {Rogowitz, Bernice and Pappas, Thrasyvoulos}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-AC62EE60325404E4C1256CE8006BA646-Myszkowski2003}, PUBLISHER = {SPIE}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {A tone mapping algorithm for displaying high contrast scenes was designed on the basis of the results of experimental tests using human subjects. Systematic perceptual evaluation of several existing tone mapping techniques revealed that the most ``natural'' appearance was determined by the presence in the output image of detailed scenery features often made visible by limiting contrast and by properly reproducing brightness. Taking these results into account, we developed a system to produce images close to the ideal preference point for high dynamic range input image data. Of the algorithms that we tested, only the Retinex algorithm was capable of retrieving detailed scene features hidden in high luminance areas while still preserving a good contrast level. This paper presents changes made to Retinex algorithm for processing high dynamic range images, and a further integration of the Retinex with specialized tone mapping algorithms that enables the production of images that appear as similar as possible to the viewer's perception of actual scenes.}, BOOKTITLE = {Human Vision and Electronic Imaging VIII (HVEI-03)}, PAGES = {321--331}, SERIES = {SPIE proceedings}, ADDRESS = {Santa Clara, USA}, }
Endnote
%0 Conference Proceedings %A Drago, Frederic %A Martens, William %A Myszkowski, Karol %A Chiba, Norishige %E Rogowitz, Bernice %E Pappas, Thrasyvoulos %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Max Planck Society %T Design of a Tone Mapping Operator for High Dynamic Range Images Based upon Psychophysical Evaluation and Preference Mapping : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2CB4-B %F EDOC: 201868 %F OTHER: Local-ID: C125675300671F7B-AC62EE60325404E4C1256CE8006BA646-Myszkowski2003 %D 2003 %B HVEI 2003 %Z date of event: 2003-01-21 - 2003-01-23 %C Santa Clara, USA %X A tone mapping algorithm for displaying high contrast scenes was designed on the basis of the results of experimental tests using human subjects. Systematic perceptual evaluation of several existing tone mapping techniques revealed that the most ``natural'' appearance was determined by the presence in the output image of detailed scenery features often made visible by limiting contrast and by properly reproducing brightness. Taking these results into account, we developed a system to produce images close to the ideal preference point for high dynamic range input image data. Of the algorithms that we tested, only the Retinex algorithm was capable of retrieving detailed scene features hidden in high luminance areas while still preserving a good contrast level. This paper presents changes made to Retinex algorithm for processing high dynamic range images, and a further integration of the Retinex with specialized tone mapping algorithms that enables the production of images that appear as similar as possible to the viewer's perception of actual scenes. %B Human Vision and Electronic Imaging VIII (HVEI-03) %P 321 - 331 %I SPIE %B SPIE proceedings
Drago, F., Martens, W.L., Myszkowski, K., and Seidel, H.-P. 2003c. Perceptual Evaluation of Tone Mapping Operators. Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/DragoMMS03, TITLE = {Perceptual Evaluation of Tone Mapping Operators}, AUTHOR = {Drago, Fr{\'e}d{\'e}ric and Martens, William L. and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7466-8}, DOI = {10.1145/965400.965487}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications}, EDITOR = {Rockwood, Alyn P.}, PAGES = {1--1}, ADDRESS = {San Diego, CA, USA}, }
Endnote
%0 Conference Proceedings %A Drago, Fr&#233;d&#233;ric %A Martens, William L. %A Myszkowski, Karol %A Seidel, Hans-Peter %+ External Organizations External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual Evaluation of Tone Mapping Operators : %G eng %U http://hdl.handle.net/21.11116/0000-000F-0CB3-A %R 10.1145/965400.965487 %D 2003 %B ACM SIGGRAPH 2003 Conference on Sketches and Applications %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego, CA, USA %B Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications %E Rockwood, Alyn P. %P 1 - 1 %I ACM %@ 978-1-4503-7466-8
Havran, V., Damez, C., Myszkowski, K., and Seidel, H.-P. 2003a. An Efficient Spatio-temporal Architecture for Animation Rendering. Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications, ACM.
Export
BibTeX
@inproceedings{DBLP:conf/siggraph/HavranDMS03, TITLE = {An Efficient Spatio-temporal Architecture for Animation Rendering}, AUTHOR = {Havran, Vlastimil and Damez, Cyrille and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-4503-7466-8}, DOI = {10.1145/965400.965402}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications}, EDITOR = {Rockwood, Alyn P.}, PAGES = {1--1}, ADDRESS = {San Diego}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Damez, Cyrille %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Efficient Spatio-temporal Architecture for Animation Rendering : %G eng %U http://hdl.handle.net/21.11116/0000-000F-0CA2-D %R 10.1145/965400.965402 %D 2003 %B ACM SIGGRAPH 2003 Conference on Sketches and Applications %Z date of event: 2003-07-27 - 2003-07-31 %C San Diego %B Proceedings of the ACM SIGGRAPH 2003 Conference on Sketches and Applications %E Rockwood, Alyn P. %P 1 - 1 %I ACM %@ 978-1-4503-7466-8
Havran, V., Damez, C., Myszkowski, K., and Seidel, H.-P. 2003b. An Efficient Spatio-temporal Architecture for Animation Rendering. Rendering Techniques 2003 (EGWR 2003), The Eurographics Association.
Abstract
Producing high quality animations featuring rich object appearance and <br>compelling lighting effects is very time consuming using traditional <br>frame-by-frame rendering systems. In this paper we present a rendering <br>architecture for computing multiple frames at once by exploiting the coherence <br>between image samples in the temporal domain. For each sample representing a <br>given point in the scene we update its view-dependent components for each frame<br>and add its contribution to pixels identified through the compensation of <br>camera and object motion. This leads naturally to a high quality motion blur <br>and significantly reduces the cost of illumination computations. The required <br>visibility information is provided using a custom ray tracing acceleration data <br>structure for multiple frames simultaneously. We demonstrate that precise<br>and costly global illumination techniques such as bidirectional path tracing <br>become affordable in this rendering architecture.
Export
BibTeX
@inproceedings{Havran-et-al_EGWR03, TITLE = {An Efficient Spatio-temporal Architecture for Animation Rendering}, AUTHOR = {Havran, Vlastimil and Damez, Cyrille and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58113-754-0}, DOI = {10.2312/EGWR/EGWR03/106-117}, LOCALID = {Local-ID: C125675300671F7B-375DE41ADBC27783C1256D2500414C13-Havran2003:EGSR}, PUBLISHER = {The Eurographics Association}, YEAR = {2003}, DATE = {2003}, ABSTRACT = {Producing high quality animations featuring rich object appearance and <br>compelling lighting effects is very time consuming using traditional <br>frame-by-frame rendering systems. In this paper we present a rendering <br>architecture for computing multiple frames at once by exploiting the coherence <br>between image samples in the temporal domain. For each sample representing a <br>given point in the scene we update its view-dependent components for each frame<br>and add its contribution to pixels identified through the compensation of <br>camera and object motion. This leads naturally to a high quality motion blur <br>and significantly reduces the cost of illumination computations. The required <br>visibility information is provided using a custom ray tracing acceleration data <br>structure for multiple frames simultaneously. We demonstrate that precise<br>and costly global illumination techniques such as bidirectional path tracing <br>become affordable in this rendering architecture.}, BOOKTITLE = {Rendering Techniques 2003 (EGWR 2003)}, EDITOR = {Christensen, Per and Cohen-Or, Daniel}, PAGES = {106--117, 303}, ADDRESS = {Leuven, Belgium}, }
Endnote
%0 Conference Proceedings %A Havran, Vlastimil %A Damez, Cyrille %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T An Efficient Spatio-temporal Architecture for Animation Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C20-6 %F EDOC: 201824 %F OTHER: Local-ID: C125675300671F7B-375DE41ADBC27783C1256D2500414C13-Havran2003:EGSR %R 10.2312/EGWR/EGWR03/106-117 %D 2003 %B 14th Eurographics Workshop on Rendering Techniques %Z date of event: 2003-06-25 - 2003-06-27 %C Leuven, Belgium %X Producing high quality animations featuring rich object appearance and <br>compelling lighting effects is very time consuming using traditional <br>frame-by-frame rendering systems. In this paper we present a rendering <br>architecture for computing multiple frames at once by exploiting the coherence <br>between image samples in the temporal domain. For each sample representing a <br>given point in the scene we update its view-dependent components for each frame<br>and add its contribution to pixels identified through the compensation of <br>camera and object motion. This leads naturally to a high quality motion blur <br>and significantly reduces the cost of illumination computations. The required <br>visibility information is provided using a custom ray tracing acceleration data <br>structure for multiple frames simultaneously. We demonstrate that precise<br>and costly global illumination techniques such as bidirectional path tracing <br>become affordable in this rendering architecture. %B Rendering Techniques 2003 %E Christensen, Per; Cohen-Or, Daniel %P 106 - 117, 303 %I The Eurographics Association %@ 1-58113-754-0
Mantiuk, R., Myszkowski, K., and Pattanaik, S. 2003. Attention Guided MPEG Compression for Computer Animations. Proceedings of the 19th Spring Conference on Computer Graphics 2003 (SCCG 03), ACM.
Export
BibTeX
@inproceedings{Mantiuk2003b, TITLE = {Attention Guided {MPEG} Compression for Computer Animations}, AUTHOR = {Mantiuk, Rafal and Myszkowski, Karol and Pattanaik, Sumant}, EDITOR = {Joy, Kenneth I.}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-19ABC3A0ED74A809C1256CFD003DB24F-Mantiuk2003b}, PUBLISHER = {ACM}, YEAR = {2003}, DATE = {2003}, BOOKTITLE = {Proceedings of the 19th Spring Conference on Computer Graphics 2003 (SCCG 03)}, PAGES = {262--267}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Mantiuk, Rafal %A Myszkowski, Karol %A Pattanaik, Sumant %E Joy, Kenneth I. %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society %T Attention Guided MPEG Compression for Computer Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2C52-7 %F EDOC: 202006 %F OTHER: Local-ID: C125675300671F7B-19ABC3A0ED74A809C1256CFD003DB24F-Mantiuk2003b %D 2003 %B SCCG 2003 %Z date of event: 2003-04-24 - 2003-04-26 %C Budmerice, Slovakia %B Proceedings of the 19th Spring Conference on Computer Graphics 2003 (SCCG 03) %P 262 - 267 %I ACM
2002
Damez, C., Dmitriev, K., and Myszkowski, K. 2002. Global Illumination for Interactive Applications and High-Quality Animations. Eurographics 2002: State of the Art Reports, Eurographics.
Export
BibTeX
@inproceedings{Damez2002, TITLE = {Global Illumination for Interactive Applications and High-Quality Animations}, AUTHOR = {Damez, Cyrille and Dmitriev, Kirill and Myszkowski, Karol}, EDITOR = {Fellner, Dieter and Scopignio, Roberto}, LANGUAGE = {eng}, ISSN = {1017-4565}, LOCALID = {Local-ID: C125675300671F7B-96B7968CB1A20486C1256C3600327527-Damez2002}, PUBLISHER = {Eurographics}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Eurographics 2002: State of the Art Reports}, PAGES = {1--24}, ADDRESS = {Saarbr{\"u}cken, Germany}, }
Endnote
%0 Conference Proceedings %A Damez, Cyrille %A Dmitriev, Kirill %A Myszkowski, Karol %E Fellner, Dieter %E Scopignio, Roberto %+ Computer Graphics, MPI for Informatics, Max Planck Society Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Global Illumination for Interactive Applications and High-Quality Animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2F9F-3 %F EDOC: 202132 %F OTHER: Local-ID: C125675300671F7B-96B7968CB1A20486C1256C3600327527-Damez2002 %D 2002 %B EUROGRAPHICS 2002 STAR %Z date of event: 2002-09-02 - 2002-09-06 %C Saarbr&#252;cken, Germany %B Eurographics 2002: State of the Art Reports %P 1 - 24 %I Eurographics %@ false
Dmitriev, K., Brabec, S., Myszkowski, K., and Seidel, H.-P. 2002. Interactive Global Illumination Using Selective Photon Tracing. Proceedings of the 13th Eurographics Workshop on Rendering, Eurographics Association.
Export
BibTeX
@inproceedings{Dmitriev-et-al_Eurographics02, TITLE = {Interactive Global Illumination Using Selective Photon Tracing}, AUTHOR = {Dmitriev, Kirill and Brabec, Stefan and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-534-3}, DOI = {10.2312/EGWR/EGWR02/025-036}, LOCALID = {Local-ID: C125675300671F7B-5D4014450BF5525BC1256C360028CE0D-Dmitriev2002}, PUBLISHER = {Eurographics Association}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the 13th Eurographics Workshop on Rendering}, PAGES = {25--36}, ADDRESS = {Pisa, Italy}, }
Endnote
%0 Conference Proceedings %A Dmitriev, Kirill %A Brabec, Stefan %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Interactive Global Illumination Using Selective Photon Tracing : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FBC-1 %F EDOC: 202130 %F OTHER: Local-ID: C125675300671F7B-5D4014450BF5525BC1256C360028CE0D-Dmitriev2002 %R 10.2312/EGWR/EGWR02/025-036 %D 2002 %B 13th Eurographics Workshop on Rendering %Z date of event: 2002-06-26 - 2002-06-28 %C Pisa, Italy %B Proceedings of the 13th Eurographics Workshop on Rendering %P 25 - 36 %I Eurographics Association %@ 978-1-58113-534-3
Drago, F., Martens, W., Myszkowski, K., and Seidel, H.-P. 2002. Perceptual evaluation of tone mapping operators with regard to similarity and preference. Max-Planck-Institut für Informatik, Saarbrücken.
Abstract
Seven tone mapping methods currently available to display high dynamic range images were submitted to perceptual evaluation in order to find the attributes most predictive of the success of a robust all-around tone mapping algorithm. The two most salient Stimulus Space dimensions underlying the perception of a set of images produced by six of the tone mappings were revealed using INdividual Differences SCALing (INDSCAL) analysis; and an ideal preference point within the INDSCAL-derived Stimulus Space was determined for a group of 11 observers using PREFerence MAPping (PREFMAP) analysis. Interpretation of the INDSCAL results was aided by pairwise comparisons of images that led to an ordering of the images according to which were more or less natural looking.
Export
BibTeX
@techreport{DragoMartensMyszkowskiSeidel2002, TITLE = {Perceptual evaluation of tone mapping operators with regard to similarity and preference}, AUTHOR = {Drago, Frederic and Martens, William and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, URL = {http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2002-4-002}, NUMBER = {MPI-I-2002-4-002}, INSTITUTION = {Max-Planck-Institut f{\"u}r Informatik}, ADDRESS = {Saarbr{\"u}cken}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {Seven tone mapping methods currently available to display high dynamic range images were submitted to perceptual evaluation in order to find the attributes most predictive of the success of a robust all-around tone mapping algorithm. The two most salient Stimulus Space dimensions underlying the perception of a set of images produced by six of the tone mappings were revealed using INdividual Differences SCALing (INDSCAL) analysis; and an ideal preference point within the INDSCAL-derived Stimulus Space was determined for a group of 11 observers using PREFerence MAPping (PREFMAP) analysis. Interpretation of the INDSCAL results was aided by pairwise comparisons of images that led to an ordering of the images according to which were more or less natural looking.}, TYPE = {Research Report / Max-Planck-Institut f&#252;r Informatik}, }
Endnote
%0 Report %A Drago, Frederic %A Martens, William %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society External Organizations Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptual evaluation of tone mapping operators with regard to similarity and preference : %G eng %U http://hdl.handle.net/11858/00-001M-0000-0014-6C83-0 %U http://domino.mpi-inf.mpg.de/internet/reports.nsf/NumberView/2002-4-002 %Y Max-Planck-Institut f&#252;r Informatik %C Saarbr&#252;cken %D 2002 %P 30 p. %X Seven tone mapping methods currently available to display high dynamic range images were submitted to perceptual evaluation in order to find the attributes most predictive of the success of a robust all-around tone mapping algorithm. The two most salient Stimulus Space dimensions underlying the perception of a set of images produced by six of the tone mappings were revealed using INdividual Differences SCALing (INDSCAL) analysis; and an ideal preference point within the INDSCAL-derived Stimulus Space was determined for a group of 11 observers using PREFerence MAPping (PREFMAP) analysis. Interpretation of the INDSCAL results was aided by pairwise comparisons of images that led to an ordering of the images according to which were more or less natural looking. %B Research Report / Max-Planck-Institut f&#252;r Informatik
Myszkowski, K., Tawara, T., and Seidel, H.-P. 2002. Using Animation Quality Metric to Improve Efficiency of Global Illumination Computation for Dynamic Environments. Proceedings of 7th SPIE Conference Human Vision and Electronic Imaging, SPIE - The International Society for Optical Engineering.
Export
BibTeX
@inproceedings{Myszkowski-et-al_HVEI02, TITLE = {Using Animation Quality Metric to Improve Efficiency of Global Illumination Computation for Dynamic Environments}, AUTHOR = {Myszkowski, Karol and Tawara, Takehiro and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {0-8194-4402-2}, DOI = {10.1117/12.469514}, LOCALID = {Local-ID: C125675300671F7B-3C349C0FFBBA9B5FC1256C36002A89AD-MyszkowskiSpie2002}, PUBLISHER = {SPIE -- The International Society for Optical Engineering}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of 7th SPIE Conference Human Vision and Electronic Imaging}, EDITOR = {Rogowitz, Bernice and Pappas, Thrasyvoulos}, PAGES = {187--196}, SERIES = {SPIE Proceedings Series}, VOLUME = {4662}, ADDRESS = {San Jose, USA}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %A Tawara, Takehiro %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Using Animation Quality Metric to Improve Efficiency of Global Illumination Computation for Dynamic Environments : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-30B6-B %F EDOC: 202177 %F OTHER: Local-ID: C125675300671F7B-3C349C0FFBBA9B5FC1256C36002A89AD-MyszkowskiSpie2002 %R 10.1117/12.469514 %D 2002 %B 7th SPIE Conference Human Vision and Electronic Imaging %Z date of event: 2002-01-21 - 2002-01-24 %C San Jose, USA %B Proceedings of 7th SPIE Conference Human Vision and Electronic Imaging %E Rogowitz, Bernice; Pappas, Thrasyvoulos %P 187 - 196 %I SPIE - The International Society for Optical Engineering %@ 0-8194-4402-2 %B SPIE Proceedings Series %N 4662
Myszkowski, K. 2002. Perception-Based Global Illumination, Rendering, and Animation Techniques. Proceedings of the 18th Spring Conference on Computer Graphics (SCCG 2002), ACM Siggraph.
Export
BibTeX
@inproceedings{MyszkowskiSCCG2002, TITLE = {Perception-Based Global Illumination, Rendering, and Animation Techniques}, AUTHOR = {Myszkowski, Karol}, EDITOR = {Chalmers, Alan}, LANGUAGE = {eng}, ISBN = {1-58113-608-0}, LOCALID = {Local-ID: C125675300671F7B-E4ADD3B275CD72ECC1256C3600371EFE-MyszkowskiSCCG2002}, PUBLISHER = {ACM Siggraph}, YEAR = {2002}, DATE = {2002}, BOOKTITLE = {Proceedings of the 18th Spring Conference on Computer Graphics (SCCG 2002)}, PAGES = {13--24}, ADDRESS = {Budmerice, Slovakia}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %E Chalmers, Alan %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-Based Global Illumination, Rendering, and Animation Techniques : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3027-B %F EDOC: 202222 %F OTHER: Local-ID: C125675300671F7B-E4ADD3B275CD72ECC1256C3600371EFE-MyszkowskiSCCG2002 %D 2002 %B SCCG 2002 %Z date of event: 2002-04-24 - 2002-04-27 %C Budmerice, Slovakia %B Proceedings of the 18th Spring Conference on Computer Graphics (SCCG 2002) %P 13 - 24 %I ACM Siggraph %@ 1-58113-608-0
Tawara, T., Myszkowski, K., and Seidel, H.-P. 2002. Localizing the Final Gathering for Dynamic Scenes using the Photon Map. Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002), Akademische Verlagsgesellschaft Aka GmbH.
Abstract
Rendering of high quality animations with global <br>illumination effects is very costly using traditional <br>techniques designed for static scenes.<br>In this paper we present an extension of<br>the photon mapping algorithm <br>to handle dynamic environments. First, for each animation segment<br>the static irradiance cache is computed only once for the scene with<br>all dynamic objects removed. Then, for each frame, the<br>dynamic objects are inserted and the irradiance cache<br>is updated locally in the scene regions whose lighting<br>is strongly affected by the objects. In the remaining<br>scene regions the photon map is used to<br>correct the irradiance values in the static cache.<br>As a result the overall animation rendering efficiency<br>is significantly improved and the temporal aliasing is<br>reduced.
Export
BibTeX
@inproceedings{Tawara-et-al_VMV02, TITLE = {Localizing the Final Gathering for Dynamic Scenes using the Photon Map}, AUTHOR = {Tawara, Takehiro and Myszkowski, Karol and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {1-58603-302-6}, LOCALID = {Local-ID: C125675300671F7B-26CF9AFACE9BDEF4C1256C80005DCB2A-Tawara2002}, PUBLISHER = {Akademische Verlagsgesellschaft Aka GmbH}, YEAR = {2002}, DATE = {2002}, ABSTRACT = {Rendering of high quality animations with global <br>illumination effects is very costly using traditional <br>techniques designed for static scenes.<br>In this paper we present an extension of<br>the photon mapping algorithm <br>to handle dynamic environments. First, for each animation segment<br>the static irradiance cache is computed only once for the scene with<br>all dynamic objects removed. Then, for each frame, the<br>dynamic objects are inserted and the irradiance cache<br>is updated locally in the scene regions whose lighting<br>is strongly affected by the objects. In the remaining<br>scene regions the photon map is used to<br>correct the irradiance values in the static cache.<br>As a result the overall animation rendering efficiency<br>is significantly improved and the temporal aliasing is<br>reduced.}, BOOKTITLE = {Proceedings of Vision, Modeling, and Visualization 2002 (VMV 2002)}, PAGES = {69--76}, ADDRESS = {Erlangen, Germany}, }
Endnote
%0 Conference Proceedings %A Tawara, Takehiro %A Myszkowski, Karol %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Localizing the Final Gathering for Dynamic Scenes using the Photon Map : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-2FD2-D %F EDOC: 202197 %F OTHER: Local-ID: C125675300671F7B-26CF9AFACE9BDEF4C1256C80005DCB2A-Tawara2002 %D 2002 %B 7th International Fall Workshop on Vision, Modeling, and Visualization %Z date of event: 2002-11-20 - 2002-11-22 %C Erlangen, Germany %X Rendering of high quality animations with global <br>illumination effects is very costly using traditional <br>techniques designed for static scenes.<br>In this paper we present an extension of<br>the photon mapping algorithm <br>to handle dynamic environments. First, for each animation segment<br>the static irradiance cache is computed only once for the scene with<br>all dynamic objects removed. Then, for each frame, the<br>dynamic objects are inserted and the irradiance cache<br>is updated locally in the scene regions whose lighting<br>is strongly affected by the objects. In the remaining<br>scene regions the photon map is used to<br>correct the irradiance values in the static cache.<br>As a result the overall animation rendering efficiency<br>is significantly improved and the temporal aliasing is<br>reduced. %B Proceedings of Vision, Modeling, and Visualization 2002 %P 69 - 76 %I Akademische Verlagsgesellschaft Aka GmbH %@ 1-58603-302-6
2001
Daubert, K., Lensch, H.P.A., Heidrich, W., and Seidel, H.-P. 2001. Efficient Cloth Modeling and Rendering. Rendering Techniques 2001 (EGSR 2001), Springer.
Abstract
Realistic modeling and high-performance rendering of cloth and<br>clothing is a challenging problem. Often these materials are seen<br>at distances where individual stitches and knits can be made out<br>and need to be accounted for. Modeling of the geometry at this<br>level of detail fails due to sheer complexity, while simple<br>texture mapping techniques do not produce the desired quality.<br> <br>In this paper, we describe an efficient and realistic approach<br>that takes into account view-dependent effects such as small<br>displacements causing occlusion and shadows, as well as<br>illumination effects. The method is efficient in terms of memory<br>consumption, and uses a combination of hardware and software<br>rendering to achieve high performance. It is conceivable that<br>future graphics hardware will be flexible enough for full<br>hardware rendering of the proposed method.
Export
BibTeX
@inproceedings{Daubert-et-al_EGSR01, TITLE = {Efficient Cloth Modeling and Rendering}, AUTHOR = {Daubert, Katja and Lensch, Hendrik P. A. and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-2767}, ISBN = {978-3-211-83709-2}, DOI = {10.1007/978-3-7091-6242-2_6}, LOCALID = {Local-ID: C125675300671F7B-FBC662E15414073CC1256A7D00509B96-Daubert2001}, PUBLISHER = {Springer}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Realistic modeling and high-performance rendering of cloth and<br>clothing is a challenging problem. Often these materials are seen<br>at distances where individual stitches and knits can be made out<br>and need to be accounted for. Modeling of the geometry at this<br>level of detail fails due to sheer complexity, while simple<br>texture mapping techniques do not produce the desired quality.<br> <br>In this paper, we describe an efficient and realistic approach<br>that takes into account view-dependent effects such as small<br>displacements causing occlusion and shadows, as well as<br>illumination effects. The method is efficient in terms of memory<br>consumption, and uses a combination of hardware and software<br>rendering to achieve high performance. It is conceivable that<br>future graphics hardware will be flexible enough for full<br>hardware rendering of the proposed method.}, BOOKTITLE = {Rendering Techniques 2001 (EGSR 2001)}, EDITOR = {Myszkowski, Karol}, PAGES = {63--70}, SERIES = {Eurographics}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Daubert, Katja %A Lensch, Hendrik P. A. %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Efficient Cloth Modeling and Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-327C-0 %F EDOC: 520206 %F OTHER: Local-ID: C125675300671F7B-FBC662E15414073CC1256A7D00509B96-Daubert2001 %R 10.1007/978-3-7091-6242-2_6 %D 2001 %B 12th Eurographics Workshop on Rendering Techniques %Z date of event: 2001-06-25 - 2001-06-27 %C London, UK %X Realistic modeling and high-performance rendering of cloth and<br>clothing is a challenging problem. Often these materials are seen<br>at distances where individual stitches and knits can be made out<br>and need to be accounted for. Modeling of the geometry at this<br>level of detail fails due to sheer complexity, while simple<br>texture mapping techniques do not produce the desired quality.<br> <br>In this paper, we describe an efficient and realistic approach<br>that takes into account view-dependent effects such as small<br>displacements causing occlusion and shadows, as well as<br>illumination effects. The method is efficient in terms of memory<br>consumption, and uses a combination of hardware and software<br>rendering to achieve high performance. It is conceivable that<br>future graphics hardware will be flexible enough for full<br>hardware rendering of the proposed method. %B Rendering Techniques 2001 %E Myszkowski, Karol %P 63 - 70 %I Springer %@ 978-3-211-83709-2 %B Eurographics %@ false
Drago, F. and Myszkowski, K. 2001. Validation Proposal for Global Illumination and Rendering Techniques. Computers & Graphics25, 3.
Abstract
The goal of this study is to develop a complete set of data characterizing geometry, luminaires, and surfaces of a non-trivial existing environment for testing global illumination and rendering techniques. This paper briefly discusses the process of data acquisition. Also, the results of experiments on evaluating lighting simulation accuracy, and rendering fidelity for a Density Estimation Particle Tracing algorithm are presented. The importance of using the BRDF of surfaces in place of the more commonly used specular and diffuse reflectance coefficients is investigated for the test scene. The results obtained are contrasted with an ``artistic approach'' in which a skilled artist manually sets all reflectance characteristics to obtain a visually pleasant appearance that corresponds to the existing environment.
Export
BibTeX
@article{Myszkowski2001a, TITLE = {Validation Proposal for Global Illumination and Rendering Techniques}, AUTHOR = {Drago, Frederic and Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {0097-8493}, LOCALID = {Local-ID: C125675300671F7B-BD9D44D4C62EF5B8C1256A7D004D7794-Myszkowski2001a}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {The goal of this study is to develop a complete set of data characterizing geometry, luminaires, and surfaces of a non-trivial existing environment for testing global illumination and rendering techniques. This paper briefly discusses the process of data acquisition. Also, the results of experiments on evaluating lighting simulation accuracy, and rendering fidelity for a Density Estimation Particle Tracing algorithm are presented. The importance of using the BRDF of surfaces in place of the more commonly used specular and diffuse reflectance coefficients is investigated for the test scene. The results obtained are contrasted with an ``artistic approach'' in which a skilled artist manually sets all reflectance characteristics to obtain a visually pleasant appearance that corresponds to the existing environment.}, JOURNAL = {Computers \& Graphics}, VOLUME = {25}, NUMBER = {3}, PAGES = {511--518}, }
Endnote
%0 Journal Article %A Drago, Frederic %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Validation Proposal for Global Illumination and Rendering Techniques : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32D6-2 %F EDOC: 520202 %F OTHER: Local-ID: C125675300671F7B-BD9D44D4C62EF5B8C1256A7D004D7794-Myszkowski2001a %D 2001 %* Review method: peer-reviewed %X The goal of this study is to develop a complete set of data characterizing geometry, luminaires, and surfaces of a non-trivial existing environment for testing global illumination and rendering techniques. This paper briefly discusses the process of data acquisition. Also, the results of experiments on evaluating lighting simulation accuracy, and rendering fidelity for a Density Estimation Particle Tracing algorithm are presented. The importance of using the BRDF of surfaces in place of the more commonly used specular and diffuse reflectance coefficients is investigated for the test scene. The results obtained are contrasted with an ``artistic approach'' in which a skilled artist manually sets all reflectance characteristics to obtain a visually pleasant appearance that corresponds to the existing environment. %J Computers & Graphics %V 25 %N 3 %& 511 %P 511 - 518 %@ false
Ershov, S., Kolchin, K., and Myszkowski, K. 2001. Rendering Pearlescent Appearance Based on Paint-Composition Modeling. The European Association for Computer Graphics 22th Annual Conference: EUROGRAPHICS 2001, Blackwell.
Export
BibTeX
@inproceedings{Myszkowski2000c, TITLE = {Rendering Pearlescent Appearance Based on Paint-Composition Modeling}, AUTHOR = {Ershov, Sergey and Kolchin, Konstantin and Myszkowski, Karol}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-9BFB2B24DB8E17BBC1256A7D004F1487-Myszkowski2000c}, PUBLISHER = {Blackwell}, YEAR = {2003}, DATE = {2001}, BOOKTITLE = {The European Association for Computer Graphics 22th Annual Conference: EUROGRAPHICS 2001}, EDITOR = {Chalmers, Alan and Rhyne, Theresa-Marie}, PAGES = {227--238}, SERIES = {Computer Graphics Forum}, }
Endnote
%0 Conference Proceedings %A Ershov, Sergey %A Kolchin, Konstantin %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Rendering Pearlescent Appearance Based on Paint-Composition Modeling : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32C8-2 %F EDOC: 520204 %F OTHER: Local-ID: C125675300671F7B-9BFB2B24DB8E17BBC1256A7D004F1487-Myszkowski2000c %I Blackwell %D 2001 %B Untitled Event %Z date of event: 2003-01-01 - 2003-01-01 %C Manchester, UK %B The European Association for Computer Graphics 22th Annual Conference: EUROGRAPHICS 2001 %E Chalmers, Alan; Rhyne, Theresa-Marie %P 227 - 238 %I Blackwell %B Computer Graphics Forum
Gortler, S. and Myszkowski, K., eds. 2001. Rendering Techniques 2001: Proceedings of the 12th Eurographics Workshop on Rendering. Springer.
Export
BibTeX
@proceedings{Myszkowski2000egwr, TITLE = {Rendering Techniques 2001: Proceedings of the 12th Eurographics Workshop on Rendering}, EDITOR = {Gortler, Steven and Myszkowski, Karol}, LANGUAGE = {eng}, ISBN = {3-211-83709-4}, LOCALID = {Local-ID: C125675300671F7B-E3F9C0E3D792F582C1256A7D004CBE1F-Myszkowski2000egwr}, PUBLISHER = {Springer}, YEAR = {2001}, DATE = {2001}, PAGES = {1-347}, }
Endnote
%0 Conference Proceedings %E Gortler, Steven %E Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Rendering Techniques 2001: Proceedings of the 12th Eurographics Workshop on Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32CA-D %F EDOC: 520201 %@ 3-211-83709-4 %F OTHER: Local-ID: C125675300671F7B-E3F9C0E3D792F582C1256A7D004CBE1F-Myszkowski2000egwr %I Springer %D 2001 %B Untitled Event %Z date of event: 2001 - %D 2001 %C University College London %P 1-347
Haber, J., Myszkowski, K., Yamauchi, H., and Seidel, H.-P. 2001. Perceptually Guided Corrective Splatting. Computer Graphics Forum, Blackwell.
Abstract
One of the basic difficulties with interactive walkthroughs is the high<br> quality rendering of object surfaces with non-diffuse light scattering<br> characteristics. Since full ray tracing at interactive rates is usually<br> impossible, we render a precomputed global illumination solution using<br> graphics hardware and use remaining computational power to correct the<br> appearance of non-diffuse objects on-the-fly. The question arises, how to<br> obtain the best image quality as perceived by a human observer within a<br> limited amount of time for each frame. We address this problem by<br> enforcing corrective computation for those non-diffuse objects that are<br> selected using a computational model of visual attention. We consider both<br> the saliency- and task-driven selection of those objects and benefit<br> from the fact that shading artifacts of ``unattended'' objects are likely<br> to remain unnoticed. We use a hierarchical image-space sampling scheme to<br> control ray tracing and splat the generated point samples. The resulting<br> image converges progressively to a ray traced solution if the viewing<br> parameters remain unchanged. Moreover, we use a sample cache to enhance<br> visual appearance if the time budget for correction has been too low for<br> some frame. We check the validity of the cached samples using a <br> novel criterion suited for non-diffuse surfaces and reproject valid<br> samples into the current view.
Export
BibTeX
@inproceedings{Haber-et-al_Eurograph.01, TITLE = {Perceptually Guided Corrective Splatting}, AUTHOR = {Haber, J{\"o}rg and Myszkowski, Karol and Yamauchi, Hitoshi and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0167-7055}, DOI = {10.1111/1467-8659.00507}, LOCALID = {Local-ID: C125675300671F7B-3992DB8541113439C1256A72003B9C5A-Haber:2001:PGCS}, PUBLISHER = {Blackwell}, PUBLISHER = {Blackwell-Wiley}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {One of the basic difficulties with interactive walkthroughs is the high<br> quality rendering of object surfaces with non-diffuse light scattering<br> characteristics. Since full ray tracing at interactive rates is usually<br> impossible, we render a precomputed global illumination solution using<br> graphics hardware and use remaining computational power to correct the<br> appearance of non-diffuse objects on-the-fly. The question arises, how to<br> obtain the best image quality as perceived by a human observer within a<br> limited amount of time for each frame. We address this problem by<br> enforcing corrective computation for those non-diffuse objects that are<br> selected using a computational model of visual attention. We consider both<br> the saliency- and task-driven selection of those objects and benefit<br> from the fact that shading artifacts of ``unattended'' objects are likely<br> to remain unnoticed. We use a hierarchical image-space sampling scheme to<br> control ray tracing and splat the generated point samples. The resulting<br> image converges progressively to a ray traced solution if the viewing<br> parameters remain unchanged. Moreover, we use a sample cache to enhance<br> visual appearance if the time budget for correction has been too low for<br> some frame. We check the validity of the cached samples using a <br> novel criterion suited for non-diffuse surfaces and reproject valid<br> samples into the current view.}, BOOKTITLE = {Proceedings of the Eurographics Conference 2001}, EDITOR = {Chalmers, Alan and Rhyne, Theresa-Marie}, PAGES = {142--153}, JOURNAL = {Computer Graphics Forum}, VOLUME = {20}, ISSUE = {3}, ADDRESS = {Manchester, UK}, }
Endnote
%0 Conference Proceedings %A Haber, J&#246;rg %A Myszkowski, Karol %A Yamauchi, Hitoshi %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perceptually Guided Corrective Splatting : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32C0-1 %F EDOC: 520198 %F OTHER: Local-ID: C125675300671F7B-3992DB8541113439C1256A72003B9C5A-Haber:2001:PGCS %R 10.1111/1467-8659.00507 %D 2001 %B Eurographics Conference 2001 %Z date of event: 2001 - %C Manchester, UK %X One of the basic difficulties with interactive walkthroughs is the high<br> quality rendering of object surfaces with non-diffuse light scattering<br> characteristics. Since full ray tracing at interactive rates is usually<br> impossible, we render a precomputed global illumination solution using<br> graphics hardware and use remaining computational power to correct the<br> appearance of non-diffuse objects on-the-fly. The question arises, how to<br> obtain the best image quality as perceived by a human observer within a<br> limited amount of time for each frame. We address this problem by<br> enforcing corrective computation for those non-diffuse objects that are<br> selected using a computational model of visual attention. We consider both<br> the saliency- and task-driven selection of those objects and benefit<br> from the fact that shading artifacts of ``unattended'' objects are likely<br> to remain unnoticed. We use a hierarchical image-space sampling scheme to<br> control ray tracing and splat the generated point samples. The resulting<br> image converges progressively to a ray traced solution if the viewing<br> parameters remain unchanged. Moreover, we use a sample cache to enhance<br> visual appearance if the time budget for correction has been too low for<br> some frame. We check the validity of the cached samples using a <br> novel criterion suited for non-diffuse surfaces and reproject valid<br> samples into the current view. %B Proceedings of the Eurographics Conference 2001 %E Chalmers, Alan; Rhyne, Theresa-Marie %P 142 - 153 %I Blackwell %J Computer Graphics Forum %V 20 %N 3 %I Blackwell-Wiley %@ false
Lensch, H.P.A., Kautz, J., Goesele, M., Heidrich, W., and Seidel, H.-P. 2001. Image-Based Reconstruction of Spatially Varying Materials. Rendering Techniques 2001 (EGSR 2001), Springer.
Abstract
The measurement of accurate material properties is an important step<br>towards photorealistic rendering. Many real-world objects are composed<br>of a number of materials that often show subtle changes even within a<br>single material. Thus, for photorealistic rendering both the general<br>surface properties as well as the spatially varying effects of the<br>object are needed.<br><br>We present an image-based measuring method that robustly detects the<br>different materials of real objects and fits an average bidirectional<br>reflectance distribution function (BRDF) to each of them. In order to<br>model the local changes as well, we project the measured data for each<br>surface point into a basis formed by the recovered BRDFs leading to a<br>truly spatially varying BRDF representation.<br><br>A high quality model of a real object can be generated with relatively<br>few input data. The generated model allows for rendering under<br>arbitrary viewing and lighting conditions and realistically reproduces<br>the appearance of the original object.
Export
BibTeX
@inproceedings{Lensch-et-al_EGSR01, TITLE = {Image-Based Reconstruction of Spatially Varying Materials}, AUTHOR = {Lensch, Hendrik P. A. and Kautz, Jan and Goesele, Michael and Heidrich, Wolfgang and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-2767}, ISBN = {978-3-211-83709-2}, DOI = {10.1007/978-3-7091-6242-2_10}, LOCALID = {Local-ID: C125675300671F7B-249EA7C6EDD9BBF4C1256A7D0052B695-Lensch:2001:IRS}, PUBLISHER = {Springer}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {The measurement of accurate material properties is an important step<br>towards photorealistic rendering. Many real-world objects are composed<br>of a number of materials that often show subtle changes even within a<br>single material. Thus, for photorealistic rendering both the general<br>surface properties as well as the spatially varying effects of the<br>object are needed.<br><br>We present an image-based measuring method that robustly detects the<br>different materials of real objects and fits an average bidirectional<br>reflectance distribution function (BRDF) to each of them. In order to<br>model the local changes as well, we project the measured data for each<br>surface point into a basis formed by the recovered BRDFs leading to a<br>truly spatially varying BRDF representation.<br><br>A high quality model of a real object can be generated with relatively<br>few input data. The generated model allows for rendering under<br>arbitrary viewing and lighting conditions and realistically reproduces<br>the appearance of the original object.}, BOOKTITLE = {Rendering Techniques 2001 (EGSR 2001)}, EDITOR = {Gortler, Steven and Myszkowski, Karol}, PAGES = {103--114}, SERIES = {Eurographics}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Lensch, Hendrik P. A. %A Kautz, Jan %A Goesele, Michael %A Heidrich, Wolfgang %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Image-Based Reconstruction of Spatially Varying Materials : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32A1-7 %F EDOC: 520207 %F OTHER: Local-ID: C125675300671F7B-249EA7C6EDD9BBF4C1256A7D0052B695-Lensch:2001:IRS %R 10.1007/978-3-7091-6242-2_10 %D 2001 %B 12th Eurographics Workshop on Rendering Techniques %Z date of event: 2001-06-25 - 2001-06-27 %C London, UK %X The measurement of accurate material properties is an important step<br>towards photorealistic rendering. Many real-world objects are composed<br>of a number of materials that often show subtle changes even within a<br>single material. Thus, for photorealistic rendering both the general<br>surface properties as well as the spatially varying effects of the<br>object are needed.<br><br>We present an image-based measuring method that robustly detects the<br>different materials of real objects and fits an average bidirectional<br>reflectance distribution function (BRDF) to each of them. In order to<br>model the local changes as well, we project the measured data for each<br>surface point into a basis formed by the recovered BRDFs leading to a<br>truly spatially varying BRDF representation.<br><br>A high quality model of a real object can be generated with relatively<br>few input data. The generated model allows for rendering under<br>arbitrary viewing and lighting conditions and realistically reproduces<br>the appearance of the original object. %B Rendering Techniques 2001 %E Gortler, Steven; Myszkowski, Karol %P 103 - 114 %I Springer %@ 978-3-211-83709-2 %B Eurographics %@ false
Myszkowski, K. 2001a. Applications of the Visual Differences Predictor in Global Illumination Computation. The Journal of three Dimensional Images15, 4.
Abstract
We investigate applications of the Visible Difference Predictor (VDP) to steer global illumination computation. We use the VDP to monitor the progression of computation as a function of time for major global illumination algorithms. Based on the results obtained, we propose a novel global illumination algorithm which is a hybrid of stochastic (density estimation) and deterministic (adaptive mesh refinement) techniques used in an optimized sequence to reduce the differences between the intermediate and final images as predicted by the VDP. Also, the VDP is applied to decide upon stopping conditions for global illumination simulation, when further continuation of computation does not contribute to perceivable changes in the quality of the resulting images.
Export
BibTeX
@article{Myszkowski2001Aizu, TITLE = {Applications of the Visual Differences Predictor in Global Illumination Computation}, AUTHOR = {Myszkowski, Karol}, LANGUAGE = {eng}, ISSN = {1342-2189}, LOCALID = {Local-ID: C125675300671F7B-24AAEBE8788AB04EC1256B3B00604EF9-Myszkowski2001Aizu}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {We investigate applications of the Visible Difference Predictor (VDP) to steer global illumination computation. We use the VDP to monitor the progression of computation as a function of time for major global illumination algorithms. Based on the results obtained, we propose a novel global illumination algorithm which is a hybrid of stochastic (density estimation) and deterministic (adaptive mesh refinement) techniques used in an optimized sequence to reduce the differences between the intermediate and final images as predicted by the VDP. Also, the VDP is applied to decide upon stopping conditions for global illumination simulation, when further continuation of computation does not contribute to perceivable changes in the quality of the resulting images.}, JOURNAL = {The Journal of three Dimensional Images}, VOLUME = {15}, NUMBER = {4}, PAGES = {57--64}, }
Endnote
%0 Journal Article %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Applications of the Visual Differences Predictor in Global Illumination Computation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3265-2 %F EDOC: 520242 %F OTHER: Local-ID: C125675300671F7B-24AAEBE8788AB04EC1256B3B00604EF9-Myszkowski2001Aizu %D 2001 %* Review method: peer-reviewed %X We investigate applications of the Visible Difference Predictor (VDP) to steer global illumination computation. We use the VDP to monitor the progression of computation as a function of time for major global illumination algorithms. Based on the results obtained, we propose a novel global illumination algorithm which is a hybrid of stochastic (density estimation) and deterministic (adaptive mesh refinement) techniques used in an optimized sequence to reduce the differences between the intermediate and final images as predicted by the VDP. Also, the VDP is applied to decide upon stopping conditions for global illumination simulation, when further continuation of computation does not contribute to perceivable changes in the quality of the resulting images. %J The Journal of three Dimensional Images %V 15 %N 4 %& 57 %P 57 - 64 %@ false
Myszkowski, K. 2001b. Chapter 6: Applications in Rendering and Animation. In: ACM Siggraph 2001, Course Notes: Seeing is Believing: Reality Perception in Modeling, Rendering and Animation. ACM Siggraph, New York, USA.
Export
BibTeX
@incollection{Myszkowski2001e, TITLE = {Chapter 6: Applications in Rendering and Animation}, AUTHOR = {Myszkowski, Karol}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-0AB78270DB481004C1256A7D00502668-Myszkowski2001e}, PUBLISHER = {ACM Siggraph}, ADDRESS = {New York, USA}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {ACM Siggraph 2001, Course Notes: Seeing is Believing: Reality Perception in Modeling, Rendering and Animation}, EDITOR = {McNamara, Ann and Chalmers, Alan}, PAGES = {1--52}, SERIES = {ACM Siggraph 2001, Course Notes}, VOLUME = {21}, }
Endnote
%0 Book Section %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Chapter 6: Applications in Rendering and Animation : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3271-6 %F EDOC: 520205 %F OTHER: Local-ID: C125675300671F7B-0AB78270DB481004C1256A7D00502668-Myszkowski2001e %I ACM Siggraph %C New York, USA %D 2001 %B ACM Siggraph 2001, Course Notes: Seeing is Believing: Reality Perception in Modeling, Rendering and Animation %E McNamara, Ann; Chalmers, Alan %P 1 - 52 %I ACM Siggraph %C New York, USA %S ACM Siggraph 2001, Course Notes %N 21
Myszkowski, K. 2001c. Efficient and Predictive Realistic Image Synthesis. .
Abstract
Synthesis of realistic images which predict the appearance of the real world has many applications including architecture and interior design, illumination engineering, environmental assessment, special effects and film production, along with many others. Due to costly global illumination computation, which is required for the prediction of appearance, physically-based rendering still remains the domain of research laboratories, and is rarely used in industrial practice. The main goal of this work is to analyze problems and provide solutions towards making predictive rendering an efficient and practical tool. First, existing global illumination techniques are discussed, then efficient solutions which handle complex geometry, multiple light sources, and arbitrary light scattering characteristics are proposed. Since real-time lighting computation is not affordable for complex environments, techniques of lighting storage and real-time reconstruction using pre-calculated results are developed. Special attention is paid to the solutions which use perception-guided algorithms to improve their performance. This makes it possible to focus the computation on readily visible scene details, and to stop it when further improvement of the image quality cannot be perceived by the human observer. Also, by better use of perception-motivated physically-based partial solutions, meaningful images can be presented to the user at the early stages of computation. Since many algorithms make simplifying assumptions about the underlying physical model in order to achieve gains in rendering performance, a validation procedure for testing lighting simulation accuracy and image quality is proposed. To check the requirement of appearance predictability imposed on the developed algorithms, the rendered images are compared against the corresponding real-world views.
Export
BibTeX
@phdthesis{Myszkowski2001hab, TITLE = {Efficient and Predictive Realistic Image Synthesis}, AUTHOR = {Myszkowski, Karol}, LANGUAGE = {eng}, LOCALID = {Local-ID: C125675300671F7B-D5B808201D3BF620C1256A7D004363DC-Myszkowski2001hab}, SCHOOL = {Warsaw Institute of Technology}, ADDRESS = {Warsaw, Poland}, YEAR = {2001}, DATE = {2001}, ABSTRACT = {Synthesis of realistic images which predict the appearance of the real world has many applications including architecture and interior design, illumination engineering, environmental assessment, special effects and film production, along with many others. Due to costly global illumination computation, which is required for the prediction of appearance, physically-based rendering still remains the domain of research laboratories, and is rarely used in industrial practice. The main goal of this work is to analyze problems and provide solutions towards making predictive rendering an efficient and practical tool. First, existing global illumination techniques are discussed, then efficient solutions which handle complex geometry, multiple light sources, and arbitrary light scattering characteristics are proposed. Since real-time lighting computation is not affordable for complex environments, techniques of lighting storage and real-time reconstruction using pre-calculated results are developed. Special attention is paid to the solutions which use perception-guided algorithms to improve their performance. This makes it possible to focus the computation on readily visible scene details, and to stop it when further improvement of the image quality cannot be perceived by the human observer. Also, by better use of perception-motivated physically-based partial solutions, meaningful images can be presented to the user at the early stages of computation. Since many algorithms make simplifying assumptions about the underlying physical model in order to achieve gains in rendering performance, a validation procedure for testing lighting simulation accuracy and image quality is proposed. To check the requirement of appearance predictability imposed on the developed algorithms, the rendered images are compared against the corresponding real-world views.}, TYPE = {Habilitation thesis}, }
Endnote
%0 Thesis %A Myszkowski, Karol %+ External Organizations %T Efficient and Predictive Realistic Image Synthesis : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-1482-7 %F EDOC: 520200 %F OTHER: Local-ID: C125675300671F7B-D5B808201D3BF620C1256A7D004363DC-Myszkowski2001hab %I Warsaw Institute of Technology %C Warsaw, Poland %D 2001 %V habilitation %9 habilitation %X Synthesis of realistic images which predict the appearance of the real world has many applications including architecture and interior design, illumination engineering, environmental assessment, special effects and film production, along with many others. Due to costly global illumination computation, which is required for the prediction of appearance, physically-based rendering still remains the domain of research laboratories, and is rarely used in industrial practice. The main goal of this work is to analyze problems and provide solutions towards making predictive rendering an efficient and practical tool. First, existing global illumination techniques are discussed, then efficient solutions which handle complex geometry, multiple light sources, and arbitrary light scattering characteristics are proposed. Since real-time lighting computation is not affordable for complex environments, techniques of lighting storage and real-time reconstruction using pre-calculated results are developed. Special attention is paid to the solutions which use perception-guided algorithms to improve their performance. This makes it possible to focus the computation on readily visible scene details, and to stop it when further improvement of the image quality cannot be perceived by the human observer. Also, by better use of perception-motivated physically-based partial solutions, meaningful images can be presented to the user at the early stages of computation. Since many algorithms make simplifying assumptions about the underlying physical model in order to achieve gains in rendering performance, a validation procedure for testing lighting simulation accuracy and image quality is proposed. To check the requirement of appearance predictability imposed on the developed algorithms, the rendered images are compared against the corresponding real-world views.
Myszkowski, K., Tawara, T., Akamine, H., and Seidel, H.-P. 2001. Perception-Guided Global Illumination Solution for Animation Rendering. Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, ACM.
Export
BibTeX
@inproceedings{Myszkowski-et-al_SIGGRAPH01, TITLE = {Perception-Guided Global Illumination Solution for Animation Rendering}, AUTHOR = {Myszkowski, Karol and Tawara, Takehiro and Akamine, Hiroyuki and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISBN = {978-1-58113-374-5}, URL = {http://www.mpi-sb.mpg.de/resources/aqm/dynenv/paper/sg2001-myszkowski.pdf}, DOI = {10.1145/383259.383284}, LOCALID = {Local-ID: C125675300671F7B-1D7B8F7EA05FC2F8C1256A7D004EABD7-Myszkowski2001b}, PUBLISHER = {ACM}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques}, EDITOR = {Fiume, Eugene}, PAGES = {221--230}, ADDRESS = {Los Angeles, CA, USA}, }
Endnote
%0 Conference Proceedings %A Myszkowski, Karol %A Tawara, Takehiro %A Akamine, Hiroyuki %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-Guided Global Illumination Solution for Animation Rendering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32BD-B %F EDOC: 520203 %U http://www.mpi-sb.mpg.de/resources/aqm/dynenv/paper/sg2001-myszkowski.pdf %F OTHER: Local-ID: C125675300671F7B-1D7B8F7EA05FC2F8C1256A7D004EABD7-Myszkowski2001b %R 10.1145/383259.383284 %D 2001 %B 28th Annual Conference on Computer Graphics and Interactive Techniques %Z date of event: 2001-08-12 - 2001-08-17 %C Los Angeles, CA, USA %B Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques %E Fiume, Eugene %P 221 - 230 %I ACM %@ 978-1-58113-374-5
Scheel, A., Stamminger, M., and Seidel, H.-P. 2001. Thrifty Final Gather for Radiosity. Rendering Techniques 2001 (EGSR 2001), Springer.
Export
BibTeX
@inproceedings{Scheel-et-al_EGSR01, TITLE = {Thrifty Final Gather for Radiosity}, AUTHOR = {Scheel, Annette and Stamminger, Marc and Seidel, Hans-Peter}, LANGUAGE = {eng}, ISSN = {0946-2767}, ISBN = {978-3-211-83709-2}, DOI = {10.1007/978-3-7091-6242-2_1}, LOCALID = {Local-ID: C125675300671F7B-B8DB9D578EB3CD31C1256A7D0059548F-Scheel2001}, PUBLISHER = {Springer}, YEAR = {2001}, DATE = {2001}, BOOKTITLE = {Rendering Techniques 2001 (EGSR 2001)}, EDITOR = {Gortler, Steven and Myszkowski, Karol}, PAGES = {1--12}, SERIES = {Eurographics}, ADDRESS = {London, UK}, }
Endnote
%0 Conference Proceedings %A Scheel, Annette %A Stamminger, Marc %A Seidel, Hans-Peter %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Thrifty Final Gather for Radiosity : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-32D3-8 %F EDOC: 520208 %F OTHER: Local-ID: C125675300671F7B-B8DB9D578EB3CD31C1256A7D0059548F-Scheel2001 %R 10.1007/978-3-7091-6242-2_1 %D 2001 %B 12th Eurographics Workshop on Rendering Techniques %Z date of event: 2001-06-25 - 2001-06-27 %C London, UK %B Rendering Techniques 2001 %E Gortler, Steven; Myszkowski, Karol %P 1 - 12 %I Springer %@ 978-3-211-83709-2 %B Eurographics %@ false
2000
Myszkowski, K. and Kunii, T.L. 2000. A Case Study Towards Validation of Global Illumination Algorithms: Progressive Hierarchical Radiosity with Clustering. The Visual Computer16, 5.
Abstract
The paper consists of two main parts: presentation of an efficient global illumination algorithm and description of its extensive experimental validation. In the first part, a hybrid of cluster-based hierarchical and progressive radiosity techniques is proposed, which does not require storing links between interacting surfaces and clusters. The clustering does not rely on input geometry, but is performed on the basis of local position in the scene for a pre-meshed scene model. The locality of the resulting clusters improves the accuracy of form factor calculations, and increases the number of possible high-level energy transfers between clusters within an imposed error bound. Limited refinement of the hierarchy of light interactions is supported without compromising the quality of shading when intermediate images are produced immediately upon user request. In the second part, a multi-stage validation procedure is proposed and results obtained using the presented algorithm are discussed. At first, experimental validation of the algorithm against analytically-derived and measured real-world data is performed to check how calculation speed is traded for lighting simulation accuracy for various clustering and meshing scenarios. Then the algorithm performance and rendering quality is tested by a direct comparison of the virtual and real-world images of a complex environment.
Export
BibTeX
@article{Myszkowski2000b, TITLE = {A Case Study Towards Validation of Global Illumination Algorithms: Progressive Hierarchical Radiosity with Clustering}, AUTHOR = {Myszkowski, Karol and Kunii, Tosiyasu L.}, LANGUAGE = {eng}, ISSN = {0178-2789}, URL = {http://link.springer.de/link/service/journals/00371/bibs/0016005/00160271.htm}, LOCALID = {Local-ID: C125675300671F7B-B8F930C0A468A8C2C1256A000045EE94-Myszkowski2000b}, YEAR = {2000}, DATE = {2000}, ABSTRACT = {The paper consists of two main parts: presentation of an efficient global illumination algorithm and description of its extensive experimental validation. In the first part, a hybrid of cluster-based hierarchical and progressive radiosity techniques is proposed, which does not require storing links between interacting surfaces and clusters. The clustering does not rely on input geometry, but is performed on the basis of local position in the scene for a pre-meshed scene model. The locality of the resulting clusters improves the accuracy of form factor calculations, and increases the number of possible high-level energy transfers between clusters within an imposed error bound. Limited refinement of the hierarchy of light interactions is supported without compromising the quality of shading when intermediate images are produced immediately upon user request. In the second part, a multi-stage validation procedure is proposed and results obtained using the presented algorithm are discussed. At first, experimental validation of the algorithm against analytically-derived and measured real-world data is performed to check how calculation speed is traded for lighting simulation accuracy for various clustering and meshing scenarios. Then the algorithm performance and rendering quality is tested by a direct comparison of the virtual and real-world images of a complex environment.}, JOURNAL = {The Visual Computer}, VOLUME = {16}, NUMBER = {5}, PAGES = {271--288}, }
Endnote
%0 Journal Article %A Myszkowski, Karol %A Kunii, Tosiyasu L. %+ Computer Graphics, MPI for Informatics, Max Planck Society %T A Case Study Towards Validation of Global Illumination Algorithms: Progressive Hierarchical Radiosity with Clustering : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3480-1 %F EDOC: 520172 %U http://link.springer.de/link/service/journals/00371/bibs/0016005/00160271.htm %F OTHER: Local-ID: C125675300671F7B-B8F930C0A468A8C2C1256A000045EE94-Myszkowski2000b %D 2000 %* Review method: peer-reviewed %X The paper consists of two main parts: presentation of an efficient global illumination algorithm and description of its extensive experimental validation. In the first part, a hybrid of cluster-based hierarchical and progressive radiosity techniques is proposed, which does not require storing links between interacting surfaces and clusters. The clustering does not rely on input geometry, but is performed on the basis of local position in the scene for a pre-meshed scene model. The locality of the resulting clusters improves the accuracy of form factor calculations, and increases the number of possible high-level energy transfers between clusters within an imposed error bound. Limited refinement of the hierarchy of light interactions is supported without compromising the quality of shading when intermediate images are produced immediately upon user request. In the second part, a multi-stage validation procedure is proposed and results obtained using the presented algorithm are discussed. At first, experimental validation of the algorithm against analytically-derived and measured real-world data is performed to check how calculation speed is traded for lighting simulation accuracy for various clustering and meshing scenarios. Then the algorithm performance and rendering quality is tested by a direct comparison of the virtual and real-world images of a complex environment. %J The Visual Computer %V 16 %N 5 %& 271 %P 271 - 288 %@ false
Myszkowski, K. 2000. Chapter 4: Perception-driven Global Illumination and Rendering Computation, and Chapter 6: Perception-driven rendering of high-quality walkthrough animations. In: Image quality metrics (Course 44). ACM SIGGRAPH, New York, USA.
Export
BibTeX
@incollection{Myszkowski2000d, TITLE = {Chapter 4: Perception-driven Global Illumination and Rendering Computation, and Chapter 6: Perception-driven rendering of high-quality walkthrough animations}, AUTHOR = {Myszkowski, Karol}, LANGUAGE = {eng}, ISBN = {1-58113-276-X}, LOCALID = {Local-ID: C125675300671F7B-C31DAAC1BAF847C2C1256A0000477A7E-Myszkowski2000d}, PUBLISHER = {ACM SIGGRAPH}, ADDRESS = {New York, USA}, YEAR = {2000}, DATE = {2000}, BOOKTITLE = {Image quality metrics (Course 44)}, EDITOR = {McNamara, Ann and Chalmers, Alan}, PAGES = {43--59{\textasciitilde}and{\textasciitilde}75-81}, SERIES = {ACM Siggraph Course Notes}, VOLUME = {44}, }
Endnote
%0 Book Section %A Myszkowski, Karol %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Chapter 4: Perception-driven Global Illumination and Rendering Computation, and Chapter 6: Perception-driven rendering of high-quality walkthrough animations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3490-E %F EDOC: 520173 %F OTHER: Local-ID: C125675300671F7B-C31DAAC1BAF847C2C1256A0000477A7E-Myszkowski2000d %I ACM SIGGRAPH %C New York, USA %D 2000 %B Image quality metrics (Course 44) %E McNamara, Ann; Chalmers, Alan %P 43 - 59~and~75-81 %I ACM SIGGRAPH %C New York, USA %@ 1-58113-276-X %S ACM Siggraph Course Notes %N 44
Myszkowski, K., Rokita, P., and Tawara, T. 2000. Perception-Based Fast Rendering and Antialiasing of Walkthrough Sequences. IEEE Transactions on Visualization and Computer Graphics6, 4.
Abstract
In this paper, we consider accelerated rendering of high quality walkthrough animation sequences along predefined paths. To improve rendering performance we use a combination of: a hybrid ray tracing and Image-Based Rendering (IBR) technique, and a novel perception-based antialiasing technique. In our rendering solution we derive as many pixels as possible using inexpensive IBR techniques without affecting the animation quality. A perception-based spatiotemporal Animation Quality Metric (AQM) is used to automatically guide such a hybrid rendering. The Image Flow (IF) obtained as a by-product of the IBR computation is an integral part of the AQM. The final animation quality is enhanced by an efficient spatiotemporal antialiasing, which utilizes the IF to perform a motion-compensated filtering. The filter parameters have been tuned using the AQM predictions of animation quality as perceived by the human observer. These parameters adapt locally to the visual pattern velocity.
Export
BibTeX
@article{Myszkowski2000a, TITLE = {Perception-Based Fast Rendering and Antialiasing of Walkthrough Sequences}, AUTHOR = {Myszkowski, Karol and Rokita, Przemyslaw and Tawara, Takehiro}, LANGUAGE = {eng}, ISSN = {1077-2626}, URL = {http://www.computer.org/tvcg/tg2000/v4toc.htm}, LOCALID = {Local-ID: C125675300671F7B-D26AC90942D3569BC1256A000039C125-Myszkowski2000a}, YEAR = {2000}, DATE = {2000}, ABSTRACT = {In this paper, we consider accelerated rendering of high quality walkthrough animation sequences along predefined paths. To improve rendering performance we use a combination of: a hybrid ray tracing and Image-Based Rendering (IBR) technique, and a novel perception-based antialiasing technique. In our rendering solution we derive as many pixels as possible using inexpensive IBR techniques without affecting the animation quality. A perception-based spatiotemporal Animation Quality Metric (AQM) is used to automatically guide such a hybrid rendering. The Image Flow (IF) obtained as a by-product of the IBR computation is an integral part of the AQM. The final animation quality is enhanced by an efficient spatiotemporal antialiasing, which utilizes the IF to perform a motion-compensated filtering. The filter parameters have been tuned using the AQM predictions of animation quality as perceived by the human observer. These parameters adapt locally to the visual pattern velocity.}, JOURNAL = {IEEE Transactions on Visualization and Computer Graphics}, VOLUME = {6}, NUMBER = {4}, PAGES = {360--379}, }
Endnote
%0 Journal Article %A Myszkowski, Karol %A Rokita, Przemyslaw %A Tawara, Takehiro %+ Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society Computer Graphics, MPI for Informatics, Max Planck Society %T Perception-Based Fast Rendering and Antialiasing of Walkthrough Sequences : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-34E7-E %F EDOC: 520170 %U http://www.computer.org/tvcg/tg2000/v4toc.htm %F OTHER: Local-ID: C125675300671F7B-D26AC90942D3569BC1256A000039C125-Myszkowski2000a %D 2000 %* Review method: peer-reviewed %X In this paper, we consider accelerated rendering of high quality walkthrough animation sequences along predefined paths. To improve rendering performance we use a combination of: a hybrid ray tracing and Image-Based Rendering (IBR) technique, and a novel perception-based antialiasing technique. In our rendering solution we derive as many pixels as possible using inexpensive IBR techniques without affecting the animation quality. A perception-based spatiotemporal Animation Quality Metric (AQM) is used to automatically guide such a hybrid rendering. The Image Flow (IF) obtained as a by-product of the IBR computation is an integral part of the AQM. The final animation quality is enhanced by an efficient spatiotemporal antialiasing, which utilizes the IF to perform a motion-compensated filtering. The filter parameters have been tuned using the AQM predictions of animation quality as perceived by the human observer. These parameters adapt locally to the visual pattern velocity. %J IEEE Transactions on Visualization and Computer Graphics %V 6 %N 4 %& 360 %P 360 - 379 %@ false
Volevich, V., Myszkowski, K., Khodulev, A., and Kopylov, E. 2000. Using the Visual Differences Predictor to Improve Performance of Progressive Global Illumination Computations. ACM Transactions on Graphics19, 2.
Abstract
A novel view-independent technique for progressive global illumination computations has been developed that uses prediction of visible differences to improve both efficiency and effectiveness of physically-sound lighting solutions. The technique is a mixture of stochastic (density estimation) and deterministic (adaptive mesh refinement) algorithms that are used in a sequence optimized to reduce the differences between the intermediate and final images as perceived by the human observer in the course of lighting computations. The quantitative measurements of visibility were obtained using the model of human vision captured in the Visible Differences Predictor (VDP) developed by Daly \cite{Daly93}. The VDP responses were used to support selection of the best component algorithms from a pool of global illumination solutions, and to enhance the selected algorithms for even better progressive refinement of the image quality. Also, the VDP was used to determine the optimal sequential order of component-algorithm execution, and to choose the points at which switch-over between algorithms should take place. As the VDP is computationally expensive, it was applied exclusively at the stage of design and tuning of the composite technique, and so perceptual considerations are embedded into the resulting solution, though no VDP calculations are performed during the lighting simulation. The proposed global illumination technique is also novel, providing at unprecedented speeds intermediate image solutions of high quality even for complex scenes. One advantage of the technique is that local estimates of global illumination are readily available at early stages of computations. This makes possible the development of more robust adaptive mesh subdivision, which is guided by local contrast information. Also, based on stochastically-derived estimates of the local illumination error, an efficient object space filtering is applied to substantially reduce the visible noise inherent in stochastic solutions.
Export
BibTeX
@article{Volevich2000, TITLE = {Using the Visual Differences Predictor to Improve Performance of Progressive Global Illumination Computations}, AUTHOR = {Volevich, Vladimir and Myszkowski, Karol and Khodulev, Andrei and Kopylov, Edward}, LANGUAGE = {eng}, ISSN = {0730-0301}, URL = {http://www.acm.org/pubs/citations/journals/tog/2000-19-2/p122-volevich/}, LOCALID = {Local-ID: C125675300671F7B-7CA22EAB9B616843C1256A00003EB0E0-Volevich2000}, YEAR = {2000}, DATE = {2000}, ABSTRACT = {A novel view-independent technique for progressive global illumination computations has been developed that uses prediction of visible differences to improve both efficiency and effectiveness of physically-sound lighting solutions. The technique is a mixture of stochastic (density estimation) and deterministic (adaptive mesh refinement) algorithms that are used in a sequence optimized to reduce the differences between the intermediate and final images as perceived by the human observer in the course of lighting computations. The quantitative measurements of visibility were obtained using the model of human vision captured in the Visible Differences Predictor (VDP) developed by Daly \cite{Daly93}. The VDP responses were used to support selection of the best component algorithms from a pool of global illumination solutions, and to enhance the selected algorithms for even better progressive refinement of the image quality. Also, the VDP was used to determine the optimal sequential order of component-algorithm execution, and to choose the points at which switch-over between algorithms should take place. As the VDP is computationally expensive, it was applied exclusively at the stage of design and tuning of the composite technique, and so perceptual considerations are embedded into the resulting solution, though no VDP calculations are performed during the lighting simulation. The proposed global illumination technique is also novel, providing at unprecedented speeds intermediate image solutions of high quality even for complex scenes. One advantage of the technique is that local estimates of global illumination are readily available at early stages of computations. This makes possible the development of more robust adaptive mesh subdivision, which is guided by local contrast information. Also, based on stochastically-derived estimates of the local illumination error, an efficient object space filtering is applied to substantially reduce the visible noise inherent in stochastic solutions.}, JOURNAL = {ACM Transactions on Graphics}, VOLUME = {19}, NUMBER = {2}, PAGES = {122--161}, }
Endnote
%0 Journal Article %A Volevich, Vladimir %A Myszkowski, Karol %A Khodulev, Andrei %A Kopylov, Edward %+ Computer Graphics, MPI for Informatics, Max Planck Society %T Using the Visual Differences Predictor to Improve Performance of Progressive Global Illumination Computations : %G eng %U http://hdl.handle.net/11858/00-001M-0000-000F-3506-F %F EDOC: 520171 %U http://www.acm.org/pubs/citations/journals/tog/2000-19-2/p122-volevich/ %F OTHER: Local-ID: C125675300671F7B-7CA22EAB9B616843C1256A00003EB0E0-Volevich2000 %D 2000 %* Review method: peer-reviewed %X A novel view-independent technique for progressive global illumination computations has been developed that uses prediction of visible differences to improve both efficiency and effectiveness of physically-sound lighting solutions. The technique is a mixture of stochastic (density estimation) and deterministic (adaptive mesh refinement) algorithms that are used in a sequence optimized to reduce the differences between the intermediate and final images as perceived by the human observer in the course of lighting computations. The quantitative measurements of visibility were obtained using the model of human vision captured in the Visible Differences Predictor (VDP) developed by Daly \cite{Daly93}. The VDP responses were used to support selection of the best component algorithms from a pool of global illumination solutions, and to enhance the selected algorithms for even better progressive refinement of the image quality. Also, the VDP was used to determine the optimal sequential order of component-algorithm execution, and to choose the points at which switch-over between algorithms should take place. As the VDP is computationally expensive, it was applied exclusively at the stage of design and tuning of the composite technique, and so perceptual considerations are embedded into the resulting solution, though no VDP calculations are performed during the lighting simulation. The proposed global illumination technique is also novel, providing at unprecedented speeds intermediate image solutions of high quality even for complex scenes. One advantage of the technique is that local estimates of global illumination are readily available at early stages of computations. This makes possible the development of more robust adaptive mesh subdivision, which is guided by local contrast information. Also, based on stochastically-derived estimates of the local illumination error, an efficient object space filtering is applied to substantially reduce the visible noise inherent in stochastic solutions. %J ACM Transactions on Graphics %V 19 %N 2 %& 122 %P 122 - 161 %@ false